index
int64
0
0
repo_id
stringlengths
48
65
file_path
stringlengths
62
122
content
stringlengths
27
3.15M
__index_level_0__
int64
0
10k
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/model_doc/marian.mdx-hf-doc-builder.js
import{S as _m,i as gm,s as vm,e as o,k as l,w as M,t as r,M as km,c as s,d as t,m as c,a,x as b,h as i,b as p,G as e,g as f,y,q as x,o as w,B as $,v as Tm,L as Vt}from"../../chunks/vendor-hf-doc-builder.js";import{T as Dn}from"../../chunks/Tip-hf-doc-builder.js";import{D as Z}from"../../chunks/Docstring-hf-doc-builder.js";import{C as Fe}from"../../chunks/CodeBlock-hf-doc-builder.js";import{I as fe}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{E as Rt}from"../../chunks/ExampleCodeBlock-hf-doc-builder.js";function Mm(E){let h,k,g,_,v;return _=new Fe({props:{code:`from transformers import MarianModel, MarianConfig # Initializing a Marian Helsinki-NLP/opus-mt-en-de style configuration configuration = MarianConfig() # Initializing a model from the Helsinki-NLP/opus-mt-en-de style configuration model = MarianModel(configuration) # Accessing the model configuration configuration = model.config`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MarianModel, MarianConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a Marian Helsinki-NLP/opus-mt-en-de style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = MarianConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the Helsinki-NLP/opus-mt-en-de style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = MarianModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),{c(){h=o("p"),k=r("Examples:"),g=l(),M(_.$$.fragment)},l(d){h=s(d,"P",{});var u=a(h);k=i(u,"Examples:"),u.forEach(t),g=c(d),b(_.$$.fragment,d)},m(d,u){f(d,h,u),e(h,k),f(d,g,u),y(_,d,u),v=!0},p:Vt,i(d){v||(x(_.$$.fragment,d),v=!0)},o(d){w(_.$$.fragment,d),v=!1},d(d){d&&t(h),d&&t(g),$(_,d)}}}function bm(E){let h,k,g,_,v;return _=new Fe({props:{code:`from transformers import MarianTokenizer tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de") src_texts = ["I am a small frog.", "Tom asked his teacher for advice."] tgt_texts = ["Ich bin ein kleiner Frosch.", "Tom bat seinen Lehrer um Rat."] # optional inputs = tokenizer(src_texts, text_target=tgt_texts, return_tensors="pt", padding=True) outputs = model(**inputs) # should work`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MarianTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MarianTokenizer.from_pretrained(<span class="hljs-string">&quot;Helsinki-NLP/opus-mt-en-de&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>src_texts = [<span class="hljs-string">&quot;I am a small frog.&quot;</span>, <span class="hljs-string">&quot;Tom asked his teacher for advice.&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>tgt_texts = [<span class="hljs-string">&quot;Ich bin ein kleiner Frosch.&quot;</span>, <span class="hljs-string">&quot;Tom bat seinen Lehrer um Rat.&quot;</span>] <span class="hljs-comment"># optional</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(src_texts, text_target=tgt_texts, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-comment"># keys [input_ids, attention_mask, labels].</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-comment"># should work</span>`}}),{c(){h=o("p"),k=r("Examples:"),g=l(),M(_.$$.fragment)},l(d){h=s(d,"P",{});var u=a(h);k=i(u,"Examples:"),u.forEach(t),g=c(d),b(_.$$.fragment,d)},m(d,u){f(d,h,u),e(h,k),f(d,g,u),y(_,d,u),v=!0},p:Vt,i(d){v||(x(_.$$.fragment,d),v=!0)},o(d){w(_.$$.fragment,d),v=!1},d(d){d&&t(h),d&&t(g),$(_,d)}}}function ym(E){let h,k,g,_,v;return{c(){h=o("p"),k=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=o("code"),_=r("Module"),v=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(d){h=s(d,"P",{});var u=a(h);k=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var q=a(g);_=i(q,"Module"),q.forEach(t),v=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(d,u){f(d,h,u),e(h,k),e(h,g),e(g,_),e(h,v)},d(d){d&&t(h)}}}function xm(E){let h,k,g,_,v;return _=new Fe({props:{code:`from transformers import MarianTokenizer, MarianModel tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de") model = MarianModel.from_pretrained("Helsinki-NLP/opus-mt-en-de") inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt") decoder_inputs = tokenizer( "<pad> Studien haben gezeigt dass es hilfreich ist einen Hund zu besitzen", return_tensors="pt", add_special_tokens=False, ) outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids) last_hidden_states = outputs.last_hidden_state list(last_hidden_states.shape)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MarianTokenizer, MarianModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MarianTokenizer.from_pretrained(<span class="hljs-string">&quot;Helsinki-NLP/opus-mt-en-de&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MarianModel.from_pretrained(<span class="hljs-string">&quot;Helsinki-NLP/opus-mt-en-de&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Studies have been shown that owning a dog is good for you&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_inputs = tokenizer( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;&lt;pad&gt; Studien haben gezeigt dass es hilfreich ist einen Hund zu besitzen&quot;</span>, <span class="hljs-meta">... </span> return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, <span class="hljs-meta">... </span> add_special_tokens=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(last_hidden_states.shape) [<span class="hljs-number">1</span>, <span class="hljs-number">26</span>, <span class="hljs-number">512</span>]`}}),{c(){h=o("p"),k=r("Example:"),g=l(),M(_.$$.fragment)},l(d){h=s(d,"P",{});var u=a(h);k=i(u,"Example:"),u.forEach(t),g=c(d),b(_.$$.fragment,d)},m(d,u){f(d,h,u),e(h,k),f(d,g,u),y(_,d,u),v=!0},p:Vt,i(d){v||(x(_.$$.fragment,d),v=!0)},o(d){w(_.$$.fragment,d),v=!1},d(d){d&&t(h),d&&t(g),$(_,d)}}}function wm(E){let h,k,g,_,v;return{c(){h=o("p"),k=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=o("code"),_=r("Module"),v=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(d){h=s(d,"P",{});var u=a(h);k=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var q=a(g);_=i(q,"Module"),q.forEach(t),v=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(d,u){f(d,h,u),e(h,k),e(h,g),e(g,_),e(h,v)},d(d){d&&t(h)}}}function $m(E){let h,k,g,_,v;return _=new Fe({props:{code:`from transformers import MarianTokenizer, MarianMTModel src = "fr" # source language trg = "en" # target language model_name = f"Helsinki-NLP/opus-mt-{src}-{trg}" model = MarianMTModel.from_pretrained(model_name) tokenizer = MarianTokenizer.from_pretrained(model_name) sample_text = "o\xF9 est l'arr\xEAt de bus ?" batch = tokenizer([sample_text], return_tensors="pt") generated_ids = model.generate(**batch) tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MarianTokenizer, MarianMTModel <span class="hljs-meta">&gt;&gt;&gt; </span>src = <span class="hljs-string">&quot;fr&quot;</span> <span class="hljs-comment"># source language</span> <span class="hljs-meta">&gt;&gt;&gt; </span>trg = <span class="hljs-string">&quot;en&quot;</span> <span class="hljs-comment"># target language</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">f&quot;Helsinki-NLP/opus-mt-<span class="hljs-subst">{src}</span>-<span class="hljs-subst">{trg}</span>&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = MarianMTModel.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MarianTokenizer.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span>sample_text = <span class="hljs-string">&quot;o\xF9 est l&#x27;arr\xEAt de bus ?&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>batch = tokenizer([sample_text], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>generated_ids = model.generate(**batch) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(generated_ids, skip_special_tokens=<span class="hljs-literal">True</span>)[<span class="hljs-number">0</span>] <span class="hljs-string">&quot;Where&#x27;s the bus stop?&quot;</span>`}}),{c(){h=o("p"),k=r("Examples:"),g=l(),M(_.$$.fragment)},l(d){h=s(d,"P",{});var u=a(h);k=i(u,"Examples:"),u.forEach(t),g=c(d),b(_.$$.fragment,d)},m(d,u){f(d,h,u),e(h,k),f(d,g,u),y(_,d,u),v=!0},p:Vt,i(d){v||(x(_.$$.fragment,d),v=!0)},o(d){w(_.$$.fragment,d),v=!1},d(d){d&&t(h),d&&t(g),$(_,d)}}}function zm(E){let h,k,g,_,v;return _=new Fe({props:{code:`from transformers import MarianTokenizer, MarianForCausalLM tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-fr-en") model = MarianForCausalLM.from_pretrained("Helsinki-NLP/opus-mt-fr-en", add_cross_attention=False) assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) logits = outputs.logits expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size] list(logits.shape) == expected_shape`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MarianTokenizer, MarianForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MarianTokenizer.from_pretrained(<span class="hljs-string">&quot;Helsinki-NLP/opus-mt-fr-en&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MarianForCausalLM.from_pretrained(<span class="hljs-string">&quot;Helsinki-NLP/opus-mt-fr-en&quot;</span>, add_cross_attention=<span class="hljs-literal">False</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> model.config.is_decoder, <span class="hljs-string">f&quot;<span class="hljs-subst">{model.__class__}</span> has to be configured as a decoder.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits <span class="hljs-meta">&gt;&gt;&gt; </span>expected_shape = [<span class="hljs-number">1</span>, inputs.input_ids.shape[-<span class="hljs-number">1</span>], model.config.vocab_size] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">list</span>(logits.shape) == expected_shape <span class="hljs-literal">True</span>`}}),{c(){h=o("p"),k=r("Example:"),g=l(),M(_.$$.fragment)},l(d){h=s(d,"P",{});var u=a(h);k=i(u,"Example:"),u.forEach(t),g=c(d),b(_.$$.fragment,d)},m(d,u){f(d,h,u),e(h,k),f(d,g,u),y(_,d,u),v=!0},p:Vt,i(d){v||(x(_.$$.fragment,d),v=!0)},o(d){w(_.$$.fragment,d),v=!1},d(d){d&&t(h),d&&t(g),$(_,d)}}}function jm(E){let h,k,g,_,v,d,u,q,Ve,Ne,C,ve,ke,z,Q,ee,Ke,Ce,R,Ge,Le,L,X,Te,H,Je,Me,te,Ie,F,ne,N,I,Ye,V,oe,Ze,U,Xe,Qe,A,be,O,et,se,ae,tt,re,K,nt,ie,ye,de,S,ot,D,xe,st;return{c(){h=o("p"),k=r("TensorFlow models and layers in "),g=o("code"),_=r("transformers"),v=r(" accept two formats as input:"),d=l(),u=o("ul"),q=o("li"),Ve=r("having all inputs as keyword arguments (like PyTorch models), or"),Ne=l(),C=o("li"),ve=r("having all inputs as a list, tuple or dict in the first positional argument."),ke=l(),z=o("p"),Q=r(`The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `),ee=o("code"),Ke=r("model.fit()"),Ce=r(` things should \u201Cjust work\u201D for you - just pass your inputs and labels in any format that `),R=o("code"),Ge=r("model.fit()"),Le=r(` supports! If, however, you want to use the second format outside of Keras methods like `),L=o("code"),X=r("fit()"),Te=r(" and "),H=o("code"),Je=r("predict()"),Me=r(`, such as when creating your own layers or models with the Keras `),te=o("code"),Ie=r("Functional"),F=r(` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument:`),ne=l(),N=o("ul"),I=o("li"),Ye=r("a single Tensor with "),V=o("code"),oe=r("input_ids"),Ze=r(" only and nothing else: "),U=o("code"),Xe=r("model(input_ids)"),Qe=l(),A=o("li"),be=r(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),O=o("code"),et=r("model([input_ids, attention_mask])"),se=r(" or "),ae=o("code"),tt=r("model([input_ids, attention_mask, token_type_ids])"),re=l(),K=o("li"),nt=r(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),ie=o("code"),ye=r('model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),de=l(),S=o("p"),ot=r(`Note that when creating models and layers with `),D=o("a"),xe=r("subclassing"),st=r(` then you don\u2019t need to worry about any of this, as you can just pass inputs like you would to any other Python function!`),this.h()},l(T){h=s(T,"P",{});var j=a(h);k=i(j,"TensorFlow models and layers in "),g=s(j,"CODE",{});var we=a(g);_=i(we,"transformers"),we.forEach(t),v=i(j," accept two formats as input:"),j.forEach(t),d=c(T),u=s(T,"UL",{});var $e=a(u);q=s($e,"LI",{});var Ae=a(q);Ve=i(Ae,"having all inputs as keyword arguments (like PyTorch models), or"),Ae.forEach(t),Ne=c($e),C=s($e,"LI",{});var ft=a(C);ve=i(ft,"having all inputs as a list, tuple or dict in the first positional argument."),ft.forEach(t),$e.forEach(t),ke=c(T),z=s(T,"P",{});var P=a(z);Q=i(P,`The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `),ee=s(P,"CODE",{});var _t=a(ee);Ke=i(_t,"model.fit()"),_t.forEach(t),Ce=i(P,` things should \u201Cjust work\u201D for you - just pass your inputs and labels in any format that `),R=s(P,"CODE",{});var le=a(R);Ge=i(le,"model.fit()"),le.forEach(t),Le=i(P,` supports! If, however, you want to use the second format outside of Keras methods like `),L=s(P,"CODE",{});var ze=a(L);X=i(ze,"fit()"),ze.forEach(t),Te=i(P," and "),H=s(P,"CODE",{});var gt=a(H);Je=i(gt,"predict()"),gt.forEach(t),Me=i(P,`, such as when creating your own layers or models with the Keras `),te=s(P,"CODE",{});var at=a(te);Ie=i(at,"Functional"),at.forEach(t),F=i(P,` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument:`),P.forEach(t),ne=c(T),N=s(T,"UL",{});var G=a(N);I=s(G,"LI",{});var ce=a(I);Ye=i(ce,"a single Tensor with "),V=s(ce,"CODE",{});var vt=a(V);oe=i(vt,"input_ids"),vt.forEach(t),Ze=i(ce," only and nothing else: "),U=s(ce,"CODE",{});var je=a(U);Xe=i(je,"model(input_ids)"),je.forEach(t),ce.forEach(t),Qe=c(G),A=s(G,"LI",{});var pe=a(A);be=i(pe,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),O=s(pe,"CODE",{});var rt=a(O);et=i(rt,"model([input_ids, attention_mask])"),rt.forEach(t),se=i(pe," or "),ae=s(pe,"CODE",{});var kt=a(ae);tt=i(kt,"model([input_ids, attention_mask, token_type_ids])"),kt.forEach(t),pe.forEach(t),re=c(G),K=s(G,"LI",{});var it=a(K);nt=i(it,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),ie=s(it,"CODE",{});var Tt=a(ie);ye=i(Tt,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Tt.forEach(t),it.forEach(t),G.forEach(t),de=c(T),S=s(T,"P",{});var B=a(S);ot=i(B,`Note that when creating models and layers with `),D=s(B,"A",{href:!0,rel:!0});var Mt=a(D);xe=i(Mt,"subclassing"),Mt.forEach(t),st=i(B,` then you don\u2019t need to worry about any of this, as you can just pass inputs like you would to any other Python function!`),B.forEach(t),this.h()},h(){p(D,"href","https://keras.io/guides/making_new_layers_and_models_via_subclassing/"),p(D,"rel","nofollow")},m(T,j){f(T,h,j),e(h,k),e(h,g),e(g,_),e(h,v),f(T,d,j),f(T,u,j),e(u,q),e(q,Ve),e(u,Ne),e(u,C),e(C,ve),f(T,ke,j),f(T,z,j),e(z,Q),e(z,ee),e(ee,Ke),e(z,Ce),e(z,R),e(R,Ge),e(z,Le),e(z,L),e(L,X),e(z,Te),e(z,H),e(H,Je),e(z,Me),e(z,te),e(te,Ie),e(z,F),f(T,ne,j),f(T,N,j),e(N,I),e(I,Ye),e(I,V),e(V,oe),e(I,Ze),e(I,U),e(U,Xe),e(N,Qe),e(N,A),e(A,be),e(A,O),e(O,et),e(A,se),e(A,ae),e(ae,tt),e(N,re),e(N,K),e(K,nt),e(K,ie),e(ie,ye),f(T,de,j),f(T,S,j),e(S,ot),e(S,D),e(D,xe),e(S,st)},d(T){T&&t(h),T&&t(d),T&&t(u),T&&t(ke),T&&t(z),T&&t(ne),T&&t(N),T&&t(de),T&&t(S)}}}function Em(E){let h,k,g,_,v;return{c(){h=o("p"),k=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=o("code"),_=r("Module"),v=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(d){h=s(d,"P",{});var u=a(h);k=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var q=a(g);_=i(q,"Module"),q.forEach(t),v=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(d,u){f(d,h,u),e(h,k),e(h,g),e(g,_),e(h,v)},d(d){d&&t(h)}}}function qm(E){let h,k,g,_,v;return _=new Fe({props:{code:`from transformers import MarianTokenizer, TFMarianModel import tensorflow as tf tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de") model = TFMarianModel.from_pretrained("Helsinki-NLP/opus-mt-en-de") inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MarianTokenizer, TFMarianModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MarianTokenizer.from_pretrained(<span class="hljs-string">&quot;Helsinki-NLP/opus-mt-en-de&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFMarianModel.from_pretrained(<span class="hljs-string">&quot;Helsinki-NLP/opus-mt-en-de&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),{c(){h=o("p"),k=r("Example:"),g=l(),M(_.$$.fragment)},l(d){h=s(d,"P",{});var u=a(h);k=i(u,"Example:"),u.forEach(t),g=c(d),b(_.$$.fragment,d)},m(d,u){f(d,h,u),e(h,k),f(d,g,u),y(_,d,u),v=!0},p:Vt,i(d){v||(x(_.$$.fragment,d),v=!0)},o(d){w(_.$$.fragment,d),v=!1},d(d){d&&t(h),d&&t(g),$(_,d)}}}function Pm(E){let h,k,g,_,v,d,u,q,Ve,Ne,C,ve,ke,z,Q,ee,Ke,Ce,R,Ge,Le,L,X,Te,H,Je,Me,te,Ie,F,ne,N,I,Ye,V,oe,Ze,U,Xe,Qe,A,be,O,et,se,ae,tt,re,K,nt,ie,ye,de,S,ot,D,xe,st;return{c(){h=o("p"),k=r("TensorFlow models and layers in "),g=o("code"),_=r("transformers"),v=r(" accept two formats as input:"),d=l(),u=o("ul"),q=o("li"),Ve=r("having all inputs as keyword arguments (like PyTorch models), or"),Ne=l(),C=o("li"),ve=r("having all inputs as a list, tuple or dict in the first positional argument."),ke=l(),z=o("p"),Q=r(`The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `),ee=o("code"),Ke=r("model.fit()"),Ce=r(` things should \u201Cjust work\u201D for you - just pass your inputs and labels in any format that `),R=o("code"),Ge=r("model.fit()"),Le=r(` supports! If, however, you want to use the second format outside of Keras methods like `),L=o("code"),X=r("fit()"),Te=r(" and "),H=o("code"),Je=r("predict()"),Me=r(`, such as when creating your own layers or models with the Keras `),te=o("code"),Ie=r("Functional"),F=r(` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument:`),ne=l(),N=o("ul"),I=o("li"),Ye=r("a single Tensor with "),V=o("code"),oe=r("input_ids"),Ze=r(" only and nothing else: "),U=o("code"),Xe=r("model(input_ids)"),Qe=l(),A=o("li"),be=r(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),O=o("code"),et=r("model([input_ids, attention_mask])"),se=r(" or "),ae=o("code"),tt=r("model([input_ids, attention_mask, token_type_ids])"),re=l(),K=o("li"),nt=r(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),ie=o("code"),ye=r('model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),de=l(),S=o("p"),ot=r(`Note that when creating models and layers with `),D=o("a"),xe=r("subclassing"),st=r(` then you don\u2019t need to worry about any of this, as you can just pass inputs like you would to any other Python function!`),this.h()},l(T){h=s(T,"P",{});var j=a(h);k=i(j,"TensorFlow models and layers in "),g=s(j,"CODE",{});var we=a(g);_=i(we,"transformers"),we.forEach(t),v=i(j," accept two formats as input:"),j.forEach(t),d=c(T),u=s(T,"UL",{});var $e=a(u);q=s($e,"LI",{});var Ae=a(q);Ve=i(Ae,"having all inputs as keyword arguments (like PyTorch models), or"),Ae.forEach(t),Ne=c($e),C=s($e,"LI",{});var ft=a(C);ve=i(ft,"having all inputs as a list, tuple or dict in the first positional argument."),ft.forEach(t),$e.forEach(t),ke=c(T),z=s(T,"P",{});var P=a(z);Q=i(P,`The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `),ee=s(P,"CODE",{});var _t=a(ee);Ke=i(_t,"model.fit()"),_t.forEach(t),Ce=i(P,` things should \u201Cjust work\u201D for you - just pass your inputs and labels in any format that `),R=s(P,"CODE",{});var le=a(R);Ge=i(le,"model.fit()"),le.forEach(t),Le=i(P,` supports! If, however, you want to use the second format outside of Keras methods like `),L=s(P,"CODE",{});var ze=a(L);X=i(ze,"fit()"),ze.forEach(t),Te=i(P," and "),H=s(P,"CODE",{});var gt=a(H);Je=i(gt,"predict()"),gt.forEach(t),Me=i(P,`, such as when creating your own layers or models with the Keras `),te=s(P,"CODE",{});var at=a(te);Ie=i(at,"Functional"),at.forEach(t),F=i(P,` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument:`),P.forEach(t),ne=c(T),N=s(T,"UL",{});var G=a(N);I=s(G,"LI",{});var ce=a(I);Ye=i(ce,"a single Tensor with "),V=s(ce,"CODE",{});var vt=a(V);oe=i(vt,"input_ids"),vt.forEach(t),Ze=i(ce," only and nothing else: "),U=s(ce,"CODE",{});var je=a(U);Xe=i(je,"model(input_ids)"),je.forEach(t),ce.forEach(t),Qe=c(G),A=s(G,"LI",{});var pe=a(A);be=i(pe,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),O=s(pe,"CODE",{});var rt=a(O);et=i(rt,"model([input_ids, attention_mask])"),rt.forEach(t),se=i(pe," or "),ae=s(pe,"CODE",{});var kt=a(ae);tt=i(kt,"model([input_ids, attention_mask, token_type_ids])"),kt.forEach(t),pe.forEach(t),re=c(G),K=s(G,"LI",{});var it=a(K);nt=i(it,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),ie=s(it,"CODE",{});var Tt=a(ie);ye=i(Tt,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),Tt.forEach(t),it.forEach(t),G.forEach(t),de=c(T),S=s(T,"P",{});var B=a(S);ot=i(B,`Note that when creating models and layers with `),D=s(B,"A",{href:!0,rel:!0});var Mt=a(D);xe=i(Mt,"subclassing"),Mt.forEach(t),st=i(B,` then you don\u2019t need to worry about any of this, as you can just pass inputs like you would to any other Python function!`),B.forEach(t),this.h()},h(){p(D,"href","https://keras.io/guides/making_new_layers_and_models_via_subclassing/"),p(D,"rel","nofollow")},m(T,j){f(T,h,j),e(h,k),e(h,g),e(g,_),e(h,v),f(T,d,j),f(T,u,j),e(u,q),e(q,Ve),e(u,Ne),e(u,C),e(C,ve),f(T,ke,j),f(T,z,j),e(z,Q),e(z,ee),e(ee,Ke),e(z,Ce),e(z,R),e(R,Ge),e(z,Le),e(z,L),e(L,X),e(z,Te),e(z,H),e(H,Je),e(z,Me),e(z,te),e(te,Ie),e(z,F),f(T,ne,j),f(T,N,j),e(N,I),e(I,Ye),e(I,V),e(V,oe),e(I,Ze),e(I,U),e(U,Xe),e(N,Qe),e(N,A),e(A,be),e(A,O),e(O,et),e(A,se),e(A,ae),e(ae,tt),e(N,re),e(N,K),e(K,nt),e(K,ie),e(ie,ye),f(T,de,j),f(T,S,j),e(S,ot),e(S,D),e(D,xe),e(S,st)},d(T){T&&t(h),T&&t(d),T&&t(u),T&&t(ke),T&&t(z),T&&t(ne),T&&t(N),T&&t(de),T&&t(S)}}}function Fm(E){let h,k,g,_,v;return{c(){h=o("p"),k=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=o("code"),_=r("Module"),v=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(d){h=s(d,"P",{});var u=a(h);k=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var q=a(g);_=i(q,"Module"),q.forEach(t),v=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(d,u){f(d,h,u),e(h,k),e(h,g),e(g,_),e(h,v)},d(d){d&&t(h)}}}function Nm(E){let h,k,g,_,v;return _=new Fe({props:{code:`from transformers import MarianTokenizer, TFMarianMTModel from typing import List src = "fr" # source language trg = "en" # target language sample_text = "o\xF9 est l'arr\xEAt de bus ?" model_name = f"Helsinki-NLP/opus-mt-{src}-{trg}" model = TFMarianMTModel.from_pretrained(model_name) tokenizer = MarianTokenizer.from_pretrained(model_name) batch = tokenizer([sample_text], return_tensors="tf") gen = model.generate(**batch) tokenizer.batch_decode(gen, skip_special_tokens=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MarianTokenizer, TFMarianMTModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> typing <span class="hljs-keyword">import</span> <span class="hljs-type">List</span> <span class="hljs-meta">&gt;&gt;&gt; </span>src = <span class="hljs-string">&quot;fr&quot;</span> <span class="hljs-comment"># source language</span> <span class="hljs-meta">&gt;&gt;&gt; </span>trg = <span class="hljs-string">&quot;en&quot;</span> <span class="hljs-comment"># target language</span> <span class="hljs-meta">&gt;&gt;&gt; </span>sample_text = <span class="hljs-string">&quot;o\xF9 est l&#x27;arr\xEAt de bus ?&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">f&quot;Helsinki-NLP/opus-mt-<span class="hljs-subst">{src}</span>-<span class="hljs-subst">{trg}</span>&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFMarianMTModel.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MarianTokenizer.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span>batch = tokenizer([sample_text], return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>gen = model.generate(**batch) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(gen, skip_special_tokens=<span class="hljs-literal">True</span>) <span class="hljs-string">&quot;Where is the bus stop ?&quot;</span>`}}),{c(){h=o("p"),k=r("Examples:"),g=l(),M(_.$$.fragment)},l(d){h=s(d,"P",{});var u=a(h);k=i(u,"Examples:"),u.forEach(t),g=c(d),b(_.$$.fragment,d)},m(d,u){f(d,h,u),e(h,k),f(d,g,u),y(_,d,u),v=!0},p:Vt,i(d){v||(x(_.$$.fragment,d),v=!0)},o(d){w(_.$$.fragment,d),v=!1},d(d){d&&t(h),d&&t(g),$(_,d)}}}function Cm(E){let h,k,g,_,v;return{c(){h=o("p"),k=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=o("code"),_=r("Module"),v=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(d){h=s(d,"P",{});var u=a(h);k=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var q=a(g);_=i(q,"Module"),q.forEach(t),v=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(d,u){f(d,h,u),e(h,k),e(h,g),e(g,_),e(h,v)},d(d){d&&t(h)}}}function Lm(E){let h,k,g,_,v;return _=new Fe({props:{code:`from transformers import MarianTokenizer, FlaxMarianModel tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de") model = FlaxMarianModel.from_pretrained("Helsinki-NLP/opus-mt-en-de") inputs = tokenizer("Hello, my dog is cute", return_tensors="jax") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MarianTokenizer, FlaxMarianModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MarianTokenizer.from_pretrained(<span class="hljs-string">&quot;Helsinki-NLP/opus-mt-en-de&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxMarianModel.from_pretrained(<span class="hljs-string">&quot;Helsinki-NLP/opus-mt-en-de&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),{c(){h=o("p"),k=r("Example:"),g=l(),M(_.$$.fragment)},l(d){h=s(d,"P",{});var u=a(h);k=i(u,"Example:"),u.forEach(t),g=c(d),b(_.$$.fragment,d)},m(d,u){f(d,h,u),e(h,k),f(d,g,u),y(_,d,u),v=!0},p:Vt,i(d){v||(x(_.$$.fragment,d),v=!0)},o(d){w(_.$$.fragment,d),v=!1},d(d){d&&t(h),d&&t(g),$(_,d)}}}function Im(E){let h,k,g,_,v;return{c(){h=o("p"),k=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),g=o("code"),_=r("Module"),v=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(d){h=s(d,"P",{});var u=a(h);k=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),g=s(u,"CODE",{});var q=a(g);_=i(q,"Module"),q.forEach(t),v=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(d,u){f(d,h,u),e(h,k),e(h,g),e(g,_),e(h,v)},d(d){d&&t(h)}}}function Am(E){let h,k,g,_,v;return _=new Fe({props:{code:`from transformers import MarianTokenizer, FlaxMarianMTModel model = FlaxMarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-en-de") tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de") text = "My friends are cool but they eat too many carbs." input_ids = tokenizer(text, max_length=64, return_tensors="jax").input_ids sequences = model.generate(input_ids, max_length=64, num_beams=2).sequences outputs = tokenizer.batch_decode(sequences, skip_special_tokens=True) # should give *Meine Freunde sind cool, aber sie essen zu viele Kohlenhydrate.*`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MarianTokenizer, FlaxMarianMTModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxMarianMTModel.from_pretrained(<span class="hljs-string">&quot;Helsinki-NLP/opus-mt-en-de&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MarianTokenizer.from_pretrained(<span class="hljs-string">&quot;Helsinki-NLP/opus-mt-en-de&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(text, max_length=<span class="hljs-number">64</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>sequences = model.generate(input_ids, max_length=<span class="hljs-number">64</span>, num_beams=<span class="hljs-number">2</span>).sequences <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = tokenizer.batch_decode(sequences, skip_special_tokens=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># should give *Meine Freunde sind cool, aber sie essen zu viele Kohlenhydrate.*</span>`}}),{c(){h=o("p"),k=r("Example:"),g=l(),M(_.$$.fragment)},l(d){h=s(d,"P",{});var u=a(h);k=i(u,"Example:"),u.forEach(t),g=c(d),b(_.$$.fragment,d)},m(d,u){f(d,h,u),e(h,k),f(d,g,u),y(_,d,u),v=!0},p:Vt,i(d){v||(x(_.$$.fragment,d),v=!0)},o(d){w(_.$$.fragment,d),v=!1},d(d){d&&t(h),d&&t(g),$(_,d)}}}function Om(E){let h,k,g,_,v,d,u,q,Ve,Ne,C,ve,ke,z,Q,ee,Ke,Ce,R,Ge,Le,L,X,Te,H,Je,Me,te,Ie,F,ne,N,I,Ye,V,oe,Ze,U,Xe,Qe,A,be,O,et,se,ae,tt,re,K,nt,ie,ye,de,S,ot,D,xe,st,T,j,we,$e,Ae,ft,P,_t,le,ze,gt,at,G,ce,vt,je,pe,rt,kt,it,Tt,B,Mt,Ks,Li,Ii,Gs,Ai,Oi,Si,Js,Hn,Di,Ys,Hi,Ui,Bi,Zs,Un,Wi,Bn,Ri,Vi,cr,bt,Kt,Xs,Wn,Ki,Qs,Gi,pr,Oe,gs,Ji,ea,Yi,Zi,Rn,Xi,Vn,Qi,ed,td,yt,nd,ta,od,sd,na,ad,rd,id,oa,dd,hr,xt,Gt,sa,Kn,ld,aa,cd,ur,Jt,ra,pd,hd,ia,Gn,ud,mr,wt,Yt,da,Jn,md,la,fd,fr,Se,Yn,_d,ca,gd,vd,kd,Zn,Td,pa,Md,bd,yd,Xn,xd,Qn,wd,$d,zd,eo,jd,ha,Ed,qd,_r,Zt,Pd,to,Fd,Nd,gr,no,vr,vs,Cd,kr,oo,Tr,$t,Xt,ua,so,Ld,ma,Id,Mr,ks,Ad,br,ao,yr,Ts,Od,xr,ro,wr,zt,Qt,fa,io,Sd,_a,Dd,$r,Ee,lo,Hd,jt,Ud,Ms,Bd,Wd,co,Rd,Vd,Kd,Et,Gd,bs,Jd,Yd,ys,Zd,Xd,Qd,en,zr,qt,tn,ga,po,el,va,tl,jr,he,ho,nl,uo,ol,mo,sl,al,rl,fo,il,xs,dl,ll,cl,nn,pl,on,_o,hl,ka,ul,Er,Pt,sn,Ta,go,ml,Ma,fl,qr,qe,vo,_l,ko,gl,ws,vl,kl,Tl,To,Ml,Mo,bl,yl,xl,De,bo,wl,Ft,$l,$s,zl,jl,ba,El,ql,Pl,an,Fl,rn,Pr,Nt,dn,ya,yo,Nl,xa,Cl,Fr,Pe,xo,Ll,wo,Il,zs,Al,Ol,Sl,$o,Dl,zo,Hl,Ul,Bl,_e,jo,Wl,Ct,Rl,js,Vl,Kl,wa,Gl,Jl,Yl,ln,Zl,Eo,Xl,qo,Ql,ec,tc,cn,Nr,Lt,pn,$a,Po,nc,za,oc,Cr,It,Fo,sc,hn,No,ac,un,Lr,At,mn,ja,Co,rc,Ea,ic,Ir,ue,Lo,dc,Io,lc,Es,cc,pc,hc,Ao,uc,Oo,mc,fc,_c,fn,gc,He,So,vc,Ot,kc,qs,Tc,Mc,qa,bc,yc,xc,_n,wc,gn,Ar,St,vn,Pa,Do,$c,Fa,zc,Or,me,Ho,jc,Uo,Ec,Ps,qc,Pc,Fc,Bo,Nc,Wo,Cc,Lc,Ic,kn,Ac,ge,Ro,Oc,Dt,Sc,Fs,Dc,Hc,Na,Uc,Bc,Wc,Tn,Rc,Vo,Vc,Ko,Kc,Gc,Jc,Mn,Sr,Ht,bn,Ca,Go,Yc,La,Zc,Dr,J,Jo,Xc,Yo,Qc,Ns,ep,tp,np,Zo,op,Xo,sp,ap,rp,Ia,ip,dp,dt,Aa,Qo,lp,cp,Oa,es,pp,hp,Sa,ts,up,mp,Da,ns,fp,_p,Ue,os,gp,Ut,vp,Ha,kp,Tp,Ua,Mp,bp,yp,yn,xp,xn,Hr,Bt,wn,Ba,ss,wp,Wa,$p,Ur,Y,as,zp,rs,jp,Cs,Ep,qp,Pp,is,Fp,ds,Np,Cp,Lp,Ra,Ip,Ap,lt,Va,ls,Op,Sp,Ka,cs,Dp,Hp,Ga,ps,Up,Bp,Ja,hs,Wp,Rp,Be,us,Vp,Wt,Kp,Ya,Gp,Jp,Za,Yp,Zp,Xp,$n,Qp,zn,Br;return d=new fe({}),H=new fe({}),Wn=new fe({}),Kn=new fe({}),Jn=new fe({}),no=new Fe({props:{code:`from transformers import MarianMTModel, MarianTokenizer src_text = [ ">>fra<< this is a sentence in english that we want to translate to french", ">>por<< This should go to portuguese", ">>esp<< And this to Spanish", ] model_name = "Helsinki-NLP/opus-mt-en-roa" tokenizer = MarianTokenizer.from_pretrained(model_name) print(tokenizer.supported_language_codes) model = MarianMTModel.from_pretrained(model_name) translated = model.generate(**tokenizer(src_text, return_tensors="pt", padding=True)) [tokenizer.decode(t, skip_special_tokens=True) for t in translated]`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MarianMTModel, MarianTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>src_text = [ <span class="hljs-meta">... </span> <span class="hljs-string">&quot;&gt;&gt;fra&lt;&lt; this is a sentence in english that we want to translate to french&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;&gt;&gt;por&lt;&lt; This should go to portuguese&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;&gt;&gt;esp&lt;&lt; And this to Spanish&quot;</span>, <span class="hljs-meta">... </span>] <span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">&quot;Helsinki-NLP/opus-mt-en-roa&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MarianTokenizer.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer.supported_language_codes) [<span class="hljs-string">&#x27;&gt;&gt;zlm_Latn&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;mfe&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;hat&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;pap&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;ast&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;cat&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;ind&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;glg&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;wln&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;spa&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;fra&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;ron&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;por&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;ita&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;oci&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;arg&lt;&lt;&#x27;</span>, <span class="hljs-string">&#x27;&gt;&gt;min&lt;&lt;&#x27;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>model = MarianMTModel.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span>translated = model.generate(**tokenizer(src_text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>[tokenizer.decode(t, skip_special_tokens=<span class="hljs-literal">True</span>) <span class="hljs-keyword">for</span> t <span class="hljs-keyword">in</span> translated] [<span class="hljs-string">&quot;c&#x27;est une phrase en anglais que nous voulons traduire en fran\xE7ais&quot;</span>, <span class="hljs-string">&#x27;Isto deve ir para o portugu\xEAs.&#x27;</span>, <span class="hljs-string">&#x27;Y esto al espa\xF1ol&#x27;</span>]`}}),oo=new Fe({props:{code:`from huggingface_hub import list_models model_list = list_models() org = "Helsinki-NLP" model_ids = [x.modelId for x in model_list if x.modelId.startswith(org)] suffix = [x.split("/")[1] for x in model_ids] old_style_multi_models = [f"{org}/{s}" for s in suffix if s != s.lower()]`,highlighted:`<span class="hljs-keyword">from</span> huggingface_hub <span class="hljs-keyword">import</span> list_models model_list = list_models() org = <span class="hljs-string">&quot;Helsinki-NLP&quot;</span> model_ids = [x.modelId <span class="hljs-keyword">for</span> x <span class="hljs-keyword">in</span> model_list <span class="hljs-keyword">if</span> x.modelId.startswith(org)] suffix = [x.split(<span class="hljs-string">&quot;/&quot;</span>)[<span class="hljs-number">1</span>] <span class="hljs-keyword">for</span> x <span class="hljs-keyword">in</span> model_ids] old_style_multi_models = [<span class="hljs-string">f&quot;<span class="hljs-subst">{org}</span>/<span class="hljs-subst">{s}</span>&quot;</span> <span class="hljs-keyword">for</span> s <span class="hljs-keyword">in</span> suffix <span class="hljs-keyword">if</span> s != s.lower()]`}}),so=new fe({}),ao=new Fe({props:{code:`['Helsinki-NLP/opus-mt-NORTH_EU-NORTH_EU', 'Helsinki-NLP/opus-mt-ROMANCE-en', 'Helsinki-NLP/opus-mt-SCANDINAVIA-SCANDINAVIA', 'Helsinki-NLP/opus-mt-de-ZH', 'Helsinki-NLP/opus-mt-en-CELTIC', 'Helsinki-NLP/opus-mt-en-ROMANCE', 'Helsinki-NLP/opus-mt-es-NORWAY', 'Helsinki-NLP/opus-mt-fi-NORWAY', 'Helsinki-NLP/opus-mt-fi-ZH', 'Helsinki-NLP/opus-mt-fi_nb_no_nn_ru_sv_en-SAMI', 'Helsinki-NLP/opus-mt-sv-NORWAY', 'Helsinki-NLP/opus-mt-sv-ZH'] GROUP_MEMBERS = { 'ZH': ['cmn', 'cn', 'yue', 'ze_zh', 'zh_cn', 'zh_CN', 'zh_HK', 'zh_tw', 'zh_TW', 'zh_yue', 'zhs', 'zht', 'zh'], 'ROMANCE': ['fr', 'fr_BE', 'fr_CA', 'fr_FR', 'wa', 'frp', 'oc', 'ca', 'rm', 'lld', 'fur', 'lij', 'lmo', 'es', 'es_AR', 'es_CL', 'es_CO', 'es_CR', 'es_DO', 'es_EC', 'es_ES', 'es_GT', 'es_HN', 'es_MX', 'es_NI', 'es_PA', 'es_PE', 'es_PR', 'es_SV', 'es_UY', 'es_VE', 'pt', 'pt_br', 'pt_BR', 'pt_PT', 'gl', 'lad', 'an', 'mwl', 'it', 'it_IT', 'co', 'nap', 'scn', 'vec', 'sc', 'ro', 'la'], 'NORTH_EU': ['de', 'nl', 'fy', 'af', 'da', 'fo', 'is', 'no', 'nb', 'nn', 'sv'], 'SCANDINAVIA': ['da', 'fo', 'is', 'no', 'nb', 'nn', 'sv'], 'SAMI': ['se', 'sma', 'smj', 'smn', 'sms'], 'NORWAY': ['nb_NO', 'nb', 'nn_NO', 'nn', 'nog', 'no_nb', 'no'], 'CELTIC': ['ga', 'cy', 'br', 'gd', 'kw', 'gv'] }`,highlighted:`[<span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-NORTH_EU-NORTH_EU&#x27;</span>, <span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-ROMANCE-en&#x27;</span>, <span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-SCANDINAVIA-SCANDINAVIA&#x27;</span>, <span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-de-ZH&#x27;</span>, <span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-en-CELTIC&#x27;</span>, <span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-en-ROMANCE&#x27;</span>, <span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-es-NORWAY&#x27;</span>, <span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-fi-NORWAY&#x27;</span>, <span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-fi-ZH&#x27;</span>, <span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-fi_nb_no_nn_ru_sv_en-SAMI&#x27;</span>, <span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-sv-NORWAY&#x27;</span>, <span class="hljs-string">&#x27;Helsinki-NLP/opus-mt-sv-ZH&#x27;</span>] GROUP_MEMBERS = { <span class="hljs-string">&#x27;ZH&#x27;</span>: [<span class="hljs-string">&#x27;cmn&#x27;</span>, <span class="hljs-string">&#x27;cn&#x27;</span>, <span class="hljs-string">&#x27;yue&#x27;</span>, <span class="hljs-string">&#x27;ze_zh&#x27;</span>, <span class="hljs-string">&#x27;zh_cn&#x27;</span>, <span class="hljs-string">&#x27;zh_CN&#x27;</span>, <span class="hljs-string">&#x27;zh_HK&#x27;</span>, <span class="hljs-string">&#x27;zh_tw&#x27;</span>, <span class="hljs-string">&#x27;zh_TW&#x27;</span>, <span class="hljs-string">&#x27;zh_yue&#x27;</span>, <span class="hljs-string">&#x27;zhs&#x27;</span>, <span class="hljs-string">&#x27;zht&#x27;</span>, <span class="hljs-string">&#x27;zh&#x27;</span>], <span class="hljs-string">&#x27;ROMANCE&#x27;</span>: [<span class="hljs-string">&#x27;fr&#x27;</span>, <span class="hljs-string">&#x27;fr_BE&#x27;</span>, <span class="hljs-string">&#x27;fr_CA&#x27;</span>, <span class="hljs-string">&#x27;fr_FR&#x27;</span>, <span class="hljs-string">&#x27;wa&#x27;</span>, <span class="hljs-string">&#x27;frp&#x27;</span>, <span class="hljs-string">&#x27;oc&#x27;</span>, <span class="hljs-string">&#x27;ca&#x27;</span>, <span class="hljs-string">&#x27;rm&#x27;</span>, <span class="hljs-string">&#x27;lld&#x27;</span>, <span class="hljs-string">&#x27;fur&#x27;</span>, <span class="hljs-string">&#x27;lij&#x27;</span>, <span class="hljs-string">&#x27;lmo&#x27;</span>, <span class="hljs-string">&#x27;es&#x27;</span>, <span class="hljs-string">&#x27;es_AR&#x27;</span>, <span class="hljs-string">&#x27;es_CL&#x27;</span>, <span class="hljs-string">&#x27;es_CO&#x27;</span>, <span class="hljs-string">&#x27;es_CR&#x27;</span>, <span class="hljs-string">&#x27;es_DO&#x27;</span>, <span class="hljs-string">&#x27;es_EC&#x27;</span>, <span class="hljs-string">&#x27;es_ES&#x27;</span>, <span class="hljs-string">&#x27;es_GT&#x27;</span>, <span class="hljs-string">&#x27;es_HN&#x27;</span>, <span class="hljs-string">&#x27;es_MX&#x27;</span>, <span class="hljs-string">&#x27;es_NI&#x27;</span>, <span class="hljs-string">&#x27;es_PA&#x27;</span>, <span class="hljs-string">&#x27;es_PE&#x27;</span>, <span class="hljs-string">&#x27;es_PR&#x27;</span>, <span class="hljs-string">&#x27;es_SV&#x27;</span>, <span class="hljs-string">&#x27;es_UY&#x27;</span>, <span class="hljs-string">&#x27;es_VE&#x27;</span>, <span class="hljs-string">&#x27;pt&#x27;</span>, <span class="hljs-string">&#x27;pt_br&#x27;</span>, <span class="hljs-string">&#x27;pt_BR&#x27;</span>, <span class="hljs-string">&#x27;pt_PT&#x27;</span>, <span class="hljs-string">&#x27;gl&#x27;</span>, <span class="hljs-string">&#x27;lad&#x27;</span>, <span class="hljs-string">&#x27;an&#x27;</span>, <span class="hljs-string">&#x27;mwl&#x27;</span>, <span class="hljs-string">&#x27;it&#x27;</span>, <span class="hljs-string">&#x27;it_IT&#x27;</span>, <span class="hljs-string">&#x27;co&#x27;</span>, <span class="hljs-string">&#x27;nap&#x27;</span>, <span class="hljs-string">&#x27;scn&#x27;</span>, <span class="hljs-string">&#x27;vec&#x27;</span>, <span class="hljs-string">&#x27;sc&#x27;</span>, <span class="hljs-string">&#x27;ro&#x27;</span>, <span class="hljs-string">&#x27;la&#x27;</span>], <span class="hljs-string">&#x27;NORTH_EU&#x27;</span>: [<span class="hljs-string">&#x27;de&#x27;</span>, <span class="hljs-string">&#x27;nl&#x27;</span>, <span class="hljs-string">&#x27;fy&#x27;</span>, <span class="hljs-string">&#x27;af&#x27;</span>, <span class="hljs-string">&#x27;da&#x27;</span>, <span class="hljs-string">&#x27;fo&#x27;</span>, <span class="hljs-string">&#x27;is&#x27;</span>, <span class="hljs-string">&#x27;no&#x27;</span>, <span class="hljs-string">&#x27;nb&#x27;</span>, <span class="hljs-string">&#x27;nn&#x27;</span>, <span class="hljs-string">&#x27;sv&#x27;</span>], <span class="hljs-string">&#x27;SCANDINAVIA&#x27;</span>: [<span class="hljs-string">&#x27;da&#x27;</span>, <span class="hljs-string">&#x27;fo&#x27;</span>, <span class="hljs-string">&#x27;is&#x27;</span>, <span class="hljs-string">&#x27;no&#x27;</span>, <span class="hljs-string">&#x27;nb&#x27;</span>, <span class="hljs-string">&#x27;nn&#x27;</span>, <span class="hljs-string">&#x27;sv&#x27;</span>], <span class="hljs-string">&#x27;SAMI&#x27;</span>: [<span class="hljs-string">&#x27;se&#x27;</span>, <span class="hljs-string">&#x27;sma&#x27;</span>, <span class="hljs-string">&#x27;smj&#x27;</span>, <span class="hljs-string">&#x27;smn&#x27;</span>, <span class="hljs-string">&#x27;sms&#x27;</span>], <span class="hljs-string">&#x27;NORWAY&#x27;</span>: [<span class="hljs-string">&#x27;nb_NO&#x27;</span>, <span class="hljs-string">&#x27;nb&#x27;</span>, <span class="hljs-string">&#x27;nn_NO&#x27;</span>, <span class="hljs-string">&#x27;nn&#x27;</span>, <span class="hljs-string">&#x27;nog&#x27;</span>, <span class="hljs-string">&#x27;no_nb&#x27;</span>, <span class="hljs-string">&#x27;no&#x27;</span>], <span class="hljs-string">&#x27;CELTIC&#x27;</span>: [<span class="hljs-string">&#x27;ga&#x27;</span>, <span class="hljs-string">&#x27;cy&#x27;</span>, <span class="hljs-string">&#x27;br&#x27;</span>, <span class="hljs-string">&#x27;gd&#x27;</span>, <span class="hljs-string">&#x27;kw&#x27;</span>, <span class="hljs-string">&#x27;gv&#x27;</span>] }`}}),ro=new Fe({props:{code:`from transformers import MarianMTModel, MarianTokenizer src_text = [ ">>fr<< this is a sentence in english that we want to translate to french", ">>pt<< This should go to portuguese", ">>es<< And this to Spanish", ] model_name = "Helsinki-NLP/opus-mt-en-ROMANCE" tokenizer = MarianTokenizer.from_pretrained(model_name) model = MarianMTModel.from_pretrained(model_name) translated = model.generate(**tokenizer(src_text, return_tensors="pt", padding=True)) tgt_text = [tokenizer.decode(t, skip_special_tokens=True) for t in translated]`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> MarianMTModel, MarianTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>src_text = [ <span class="hljs-meta">... </span> <span class="hljs-string">&quot;&gt;&gt;fr&lt;&lt; this is a sentence in english that we want to translate to french&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;&gt;&gt;pt&lt;&lt; This should go to portuguese&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;&gt;&gt;es&lt;&lt; And this to Spanish&quot;</span>, <span class="hljs-meta">... </span>] <span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">&quot;Helsinki-NLP/opus-mt-en-ROMANCE&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = MarianTokenizer.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span>model = MarianMTModel.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span>translated = model.generate(**tokenizer(src_text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>tgt_text = [tokenizer.decode(t, skip_special_tokens=<span class="hljs-literal">True</span>) <span class="hljs-keyword">for</span> t <span class="hljs-keyword">in</span> translated] [<span class="hljs-string">&quot;c&#x27;est une phrase en anglais que nous voulons traduire en fran\xE7ais&quot;</span>, <span class="hljs-string">&#x27;Isto deve ir para o portugu\xEAs.&#x27;</span>, <span class="hljs-string">&#x27;Y esto al espa\xF1ol&#x27;</span>]`}}),io=new fe({}),lo=new Z({props:{name:"class transformers.MarianConfig",anchor:"transformers.MarianConfig",parameters:[{name:"vocab_size",val:" = 50265"},{name:"decoder_vocab_size",val:" = None"},{name:"max_position_embeddings",val:" = 1024"},{name:"encoder_layers",val:" = 12"},{name:"encoder_ffn_dim",val:" = 4096"},{name:"encoder_attention_heads",val:" = 16"},{name:"decoder_layers",val:" = 12"},{name:"decoder_ffn_dim",val:" = 4096"},{name:"decoder_attention_heads",val:" = 16"},{name:"encoder_layerdrop",val:" = 0.0"},{name:"decoder_layerdrop",val:" = 0.0"},{name:"use_cache",val:" = True"},{name:"is_encoder_decoder",val:" = True"},{name:"activation_function",val:" = 'gelu'"},{name:"d_model",val:" = 1024"},{name:"dropout",val:" = 0.1"},{name:"attention_dropout",val:" = 0.0"},{name:"activation_dropout",val:" = 0.0"},{name:"init_std",val:" = 0.02"},{name:"decoder_start_token_id",val:" = 58100"},{name:"classifier_dropout",val:" = 0.0"},{name:"scale_embedding",val:" = False"},{name:"pad_token_id",val:" = 58100"},{name:"eos_token_id",val:" = 0"},{name:"forced_eos_token_id",val:" = 0"},{name:"share_encoder_decoder_embeddings",val:" = True"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.MarianConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 50265) &#x2014; Vocabulary size of the Marian model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_19429/en/model_doc/marian#transformers.MarianModel">MarianModel</a> or <a href="/docs/transformers/pr_19429/en/model_doc/marian#transformers.TFMarianModel">TFMarianModel</a>.`,name:"vocab_size"},{anchor:"transformers.MarianConfig.d_model",description:`<strong>d_model</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; Dimensionality of the layers and the pooler layer.`,name:"d_model"},{anchor:"transformers.MarianConfig.encoder_layers",description:`<strong>encoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of encoder layers.`,name:"encoder_layers"},{anchor:"transformers.MarianConfig.decoder_layers",description:`<strong>decoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of decoder layers.`,name:"decoder_layers"},{anchor:"transformers.MarianConfig.encoder_attention_heads",description:`<strong>encoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"encoder_attention_heads"},{anchor:"transformers.MarianConfig.decoder_attention_heads",description:`<strong>decoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer decoder.`,name:"decoder_attention_heads"},{anchor:"transformers.MarianConfig.decoder_ffn_dim",description:`<strong>decoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"decoder_ffn_dim"},{anchor:"transformers.MarianConfig.encoder_ffn_dim",description:`<strong>encoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"encoder_ffn_dim"},{anchor:"transformers.MarianConfig.activation_function",description:`<strong>activation_function</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"activation_function"},{anchor:"transformers.MarianConfig.dropout",description:`<strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"dropout"},{anchor:"transformers.MarianConfig.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.MarianConfig.activation_dropout",description:`<strong>activation_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for activations inside the fully connected layer.`,name:"activation_dropout"},{anchor:"transformers.MarianConfig.classifier_dropout",description:`<strong>classifier_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for classifier.`,name:"classifier_dropout"},{anchor:"transformers.MarianConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.MarianConfig.init_std",description:`<strong>init_std</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"init_std"},{anchor:"transformers.MarianConfig.encoder_layerdrop",description:`<strong>encoder_layerdrop</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The LayerDrop probability for the encoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details.`,name:"encoder_layerdrop"},{anchor:"transformers.MarianConfig.decoder_layerdrop",description:`<strong>decoder_layerdrop</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The LayerDrop probability for the decoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details.`,name:"decoder_layerdrop"},{anchor:"transformers.MarianConfig.scale_embedding",description:`<strong>scale_embedding</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Scale embeddings by diving by sqrt(d_model).`,name:"scale_embedding"},{anchor:"transformers.MarianConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models)`,name:"use_cache"},{anchor:"transformers.MarianConfig.forced_eos_token_id",description:`<strong>forced_eos_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached. Usually set to <code>eos_token_id</code>.`,name:"forced_eos_token_id"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/marian/configuration_marian.py#L34"}}),en=new Rt({props:{anchor:"transformers.MarianConfig.example",$$slots:{default:[Mm]},$$scope:{ctx:E}}}),po=new fe({}),ho=new Z({props:{name:"class transformers.MarianTokenizer",anchor:"transformers.MarianTokenizer",parameters:[{name:"source_spm",val:""},{name:"target_spm",val:""},{name:"vocab",val:""},{name:"target_vocab_file",val:" = None"},{name:"source_lang",val:" = None"},{name:"target_lang",val:" = None"},{name:"unk_token",val:" = '<unk>'"},{name:"eos_token",val:" = '</s>'"},{name:"pad_token",val:" = '<pad>'"},{name:"model_max_length",val:" = 512"},{name:"sp_model_kwargs",val:": typing.Union[typing.Dict[str, typing.Any], NoneType] = None"},{name:"separate_vocabs",val:" = False"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.MarianTokenizer.source_spm",description:`<strong>source_spm</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a .spm extension) that contains the vocabulary for the source language.`,name:"source_spm"},{anchor:"transformers.MarianTokenizer.target_spm",description:`<strong>target_spm</strong> (<code>str</code>) &#x2014; <a href="https://github.com/google/sentencepiece" rel="nofollow">SentencePiece</a> file (generally has a .spm extension) that contains the vocabulary for the target language.`,name:"target_spm"},{anchor:"transformers.MarianTokenizer.source_lang",description:`<strong>source_lang</strong> (<code>str</code>, <em>optional</em>) &#x2014; A string representing the source language.`,name:"source_lang"},{anchor:"transformers.MarianTokenizer.target_lang",description:`<strong>target_lang</strong> (<code>str</code>, <em>optional</em>) &#x2014; A string representing the target language.`,name:"target_lang"},{anchor:"transformers.MarianTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.MarianTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.`,name:"eos_token"},{anchor:"transformers.MarianTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.MarianTokenizer.model_max_length",description:`<strong>model_max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sentence length the model accepts.`,name:"model_max_length"},{anchor:"transformers.MarianTokenizer.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (<code>List[str]</code>, <em>optional</em>, defaults to <code>[&quot;&lt;eop&gt;&quot;, &quot;&lt;eod&gt;&quot;]</code>) &#x2014; Additional special tokens used by the tokenizer.`,name:"additional_special_tokens"},{anchor:"transformers.MarianTokenizer.sp_model_kwargs",description:`<strong>sp_model_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Will be passed to the <code>SentencePieceProcessor.__init__()</code> method. The <a href="https://github.com/google/sentencepiece/tree/master/python" rel="nofollow">Python wrapper for SentencePiece</a> can be used, among other things, to set:</p> <ul> <li> <p><code>enable_sampling</code>: Enable subword regularization.</p> </li> <li> <p><code>nbest_size</code>: Sampling parameters for unigram. Invalid for BPE-Dropout.</p> <ul> <li><code>nbest_size = {0,1}</code>: No sampling is performed.</li> <li><code>nbest_size &gt; 1</code>: samples from the nbest_size results.</li> <li><code>nbest_size &lt; 0</code>: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm.</li> </ul> </li> <li> <p><code>alpha</code>: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout.</p> </li> </ul>`,name:"sp_model_kwargs"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/marian/tokenization_marian.py#L61"}}),nn=new Rt({props:{anchor:"transformers.MarianTokenizer.example",$$slots:{default:[bm]},$$scope:{ctx:E}}}),_o=new Z({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.MarianTokenizer.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:""},{name:"token_ids_1",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/marian/tokenization_marian.py#L273"}}),go=new fe({}),vo=new Z({props:{name:"class transformers.MarianModel",anchor:"transformers.MarianModel",parameters:[{name:"config",val:": MarianConfig"}],parametersDescription:[{anchor:"transformers.MarianModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/model_doc/marian#transformers.MarianConfig">MarianConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/marian/modeling_marian.py#L1089"}}),bo=new Z({props:{name:"forward",anchor:"transformers.MarianModel.forward",parameters:[{name:"input_ids",val:": LongTensor = None"},{name:"attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"decoder_input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"decoder_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"head_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"decoder_head_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"cross_attn_head_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"encoder_outputs",val:": typing.Union[typing.Tuple[torch.Tensor], transformers.modeling_outputs.BaseModelOutput, NoneType] = None"},{name:"past_key_values",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"decoder_inputs_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"use_cache",val:": typing.Optional[bool] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],parametersDescription:[{anchor:"transformers.MarianModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/marian#transformers.MarianTokenizer">MarianTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MarianModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MarianModel.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/marian#transformers.MarianTokenizer">MarianTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>Marian uses the <code>pad_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.MarianModel.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.MarianModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MarianModel.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.MarianModel.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.MarianModel.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.MarianModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.MarianModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.MarianModel.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.MarianModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.MarianModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MarianModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MarianModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/marian/modeling_marian.py#L1171",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/marian#transformers.MarianConfig" >MarianConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),an=new Dn({props:{$$slots:{default:[ym]},$$scope:{ctx:E}}}),rn=new Rt({props:{anchor:"transformers.MarianModel.forward.example",$$slots:{default:[xm]},$$scope:{ctx:E}}}),yo=new fe({}),xo=new Z({props:{name:"class transformers.MarianMTModel",anchor:"transformers.MarianMTModel",parameters:[{name:"config",val:": MarianConfig"}],parametersDescription:[{anchor:"transformers.MarianMTModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/model_doc/marian#transformers.MarianConfig">MarianConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/marian/modeling_marian.py#L1273"}}),jo=new Z({props:{name:"forward",anchor:"transformers.MarianMTModel.forward",parameters:[{name:"input_ids",val:": LongTensor = None"},{name:"attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"decoder_input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"decoder_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"head_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"decoder_head_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"cross_attn_head_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"encoder_outputs",val:": typing.Union[typing.Tuple[torch.Tensor], transformers.modeling_outputs.BaseModelOutput, NoneType] = None"},{name:"past_key_values",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"decoder_inputs_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"labels",val:": typing.Optional[torch.LongTensor] = None"},{name:"use_cache",val:": typing.Optional[bool] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],parametersDescription:[{anchor:"transformers.MarianMTModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/marian#transformers.MarianTokenizer">MarianTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MarianMTModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MarianMTModel.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/marian#transformers.MarianTokenizer">MarianTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>Marian uses the <code>pad_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.MarianMTModel.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.MarianMTModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MarianMTModel.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.MarianMTModel.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.MarianMTModel.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.MarianMTModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.MarianMTModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.MarianMTModel.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.MarianMTModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.MarianMTModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MarianMTModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MarianMTModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.MarianMTModel.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/marian/modeling_marian.py#L1398",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/marian#transformers.MarianConfig" >MarianConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ln=new Dn({props:{$$slots:{default:[wm]},$$scope:{ctx:E}}}),cn=new Rt({props:{anchor:"transformers.MarianMTModel.forward.example",$$slots:{default:[$m]},$$scope:{ctx:E}}}),Po=new fe({}),Fo=new Z({props:{name:"class transformers.MarianForCausalLM",anchor:"transformers.MarianForCausalLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/marian/modeling_marian.py#L1542"}}),No=new Z({props:{name:"forward",anchor:"transformers.MarianForCausalLM.forward",parameters:[{name:"input_ids",val:": LongTensor = None"},{name:"attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"encoder_hidden_states",val:": typing.Optional[torch.FloatTensor] = None"},{name:"encoder_attention_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"head_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"cross_attn_head_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"past_key_values",val:": typing.Optional[typing.List[torch.FloatTensor]] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"labels",val:": typing.Optional[torch.LongTensor] = None"},{name:"use_cache",val:": typing.Optional[bool] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],parametersDescription:[{anchor:"transformers.MarianForCausalLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/marian#transformers.MarianTokenizer">MarianTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MarianForCausalLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.MarianForCausalLM.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.MarianForCausalLM.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:`,name:"encoder_attention_mask"},{anchor:"transformers.MarianForCausalLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.MarianForCausalLM.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.MarianForCausalLM.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.MarianForCausalLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"},{anchor:"transformers.MarianForCausalLM.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"use_cache"},{anchor:"transformers.MarianForCausalLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.MarianForCausalLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.MarianForCausalLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/marian/modeling_marian.py#L1573",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/marian#transformers.MarianConfig" >MarianConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),un=new Rt({props:{anchor:"transformers.MarianForCausalLM.forward.example",$$slots:{default:[zm]},$$scope:{ctx:E}}}),Co=new fe({}),Lo=new Z({props:{name:"class transformers.TFMarianModel",anchor:"transformers.TFMarianModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.TFMarianModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/model_doc/marian#transformers.MarianConfig">MarianConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/marian/modeling_tf_marian.py#L1197"}}),fn=new Dn({props:{$$slots:{default:[jm]},$$scope:{ctx:E}}}),So=new Z({props:{name:"call",anchor:"transformers.TFMarianModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"decoder_position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:": typing.Union[typing.Tuple, transformers.modeling_tf_outputs.TFBaseModelOutput, NoneType] = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"training",val:" = False"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.TFMarianModel.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/marian#transformers.MarianTokenizer">MarianTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFMarianModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFMarianModel.call.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/marian#transformers.MarianTokenizer">MarianTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>Marian uses the <code>pad_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.TFMarianModel.call.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.`,name:"decoder_attention_mask"},{anchor:"transformers.TFMarianModel.call.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.TFMarianModel.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFMarianModel.call.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.TFMarianModel.call.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.TFMarianModel.call.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tf.FloatTensor</code>, <em>optional</em>) &#x2014; hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of`,name:"encoder_outputs"},{anchor:"transformers.TFMarianModel.call.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.TFMarianModel.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation`,name:"use_cache"},{anchor:"transformers.TFMarianModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFMarianModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFMarianModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFMarianModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/marian/modeling_tf_marian.py#L1209",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput" >transformers.modeling_tf_outputs.TFSeq2SeqModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/marian#transformers.MarianConfig" >MarianConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput" >transformers.modeling_tf_outputs.TFSeq2SeqModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),_n=new Dn({props:{$$slots:{default:[Em]},$$scope:{ctx:E}}}),gn=new Rt({props:{anchor:"transformers.TFMarianModel.call.example",$$slots:{default:[qm]},$$scope:{ctx:E}}}),Do=new fe({}),Ho=new Z({props:{name:"class transformers.TFMarianMTModel",anchor:"transformers.TFMarianMTModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.TFMarianMTModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/model_doc/marian#transformers.MarianConfig">MarianConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/marian/modeling_tf_marian.py#L1303"}}),kn=new Dn({props:{$$slots:{default:[Pm]},$$scope:{ctx:E}}}),Ro=new Z({props:{name:"call",anchor:"transformers.TFMarianMTModel.call",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_input_ids",val:" = None"},{name:"decoder_attention_mask",val:" = None"},{name:"decoder_position_ids",val:" = None"},{name:"head_mask",val:" = None"},{name:"decoder_head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"encoder_outputs",val:": typing.Optional[transformers.modeling_tf_outputs.TFBaseModelOutput] = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"decoder_inputs_embeds",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"},{name:"labels",val:" = None"},{name:"training",val:" = False"}],parametersDescription:[{anchor:"transformers.TFMarianMTModel.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>({0})</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/marian#transformers.MarianTokenizer">MarianTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFMarianMTModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFMarianMTModel.call.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/marian#transformers.MarianTokenizer">MarianTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>Marian uses the <code>pad_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).`,name:"decoder_input_ids"},{anchor:"transformers.TFMarianMTModel.call.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.`,name:"decoder_attention_mask"},{anchor:"transformers.TFMarianMTModel.call.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.TFMarianMTModel.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFMarianMTModel.call.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.TFMarianMTModel.call.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>tf.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.TFMarianMTModel.call.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tf.FloatTensor</code>, <em>optional</em>) &#x2014; hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of`,name:"encoder_outputs"},{anchor:"transformers.TFMarianMTModel.call.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.TFMarianMTModel.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation`,name:"use_cache"},{anchor:"transformers.TFMarianMTModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFMarianMTModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFMarianMTModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFMarianMTModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFMarianMTModel.call.labels",description:`<strong>labels</strong> (<code>tf.tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/marian/modeling_tf_marian.py#L1337",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/marian#transformers.MarianConfig" >MarianConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" >transformers.modeling_tf_outputs.TFSeq2SeqLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Tn=new Dn({props:{$$slots:{default:[Fm]},$$scope:{ctx:E}}}),Mn=new Rt({props:{anchor:"transformers.TFMarianMTModel.call.example",$$slots:{default:[Nm]},$$scope:{ctx:E}}}),Go=new fe({}),Jo=new Z({props:{name:"class transformers.FlaxMarianModel",anchor:"transformers.FlaxMarianModel",parameters:[{name:"config",val:": MarianConfig"},{name:"input_shape",val:": typing.Tuple[int] = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax.numpy.float32'>"},{name:"_do_init",val:": bool = True"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.FlaxMarianModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/model_doc/marian#transformers.MarianConfig">MarianConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxMarianModel.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/marian/modeling_flax_marian.py#L1207"}}),os=new Z({props:{name:"__call__",anchor:"transformers.FlaxMarianModel.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.ndarray.ndarray] = None"},{name:"decoder_input_ids",val:": typing.Optional[jax._src.numpy.ndarray.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.ndarray.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.ndarray.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.ndarray.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],parametersDescription:[{anchor:"transformers.FlaxMarianModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/marian#transformers.MarianTokenizer">MarianTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxMarianModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxMarianModel.__call__.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/marian#transformers.MarianTokenizer">MarianTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxMarianModel.__call__.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxMarianModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxMarianModel.__call__.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxMarianModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxMarianModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxMarianModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/marian/modeling_flax_marian.py#L1142",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/marian#transformers.MarianConfig" >MarianConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),yn=new Dn({props:{$$slots:{default:[Cm]},$$scope:{ctx:E}}}),xn=new Rt({props:{anchor:"transformers.FlaxMarianModel.__call__.example",$$slots:{default:[Lm]},$$scope:{ctx:E}}}),ss=new fe({}),as=new Z({props:{name:"class transformers.FlaxMarianMTModel",anchor:"transformers.FlaxMarianMTModel",parameters:[{name:"config",val:": MarianConfig"},{name:"input_shape",val:": typing.Tuple[int] = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax.numpy.float32'>"},{name:"_do_init",val:": bool = True"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.FlaxMarianMTModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/model_doc/marian#transformers.MarianConfig">MarianConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxMarianMTModel.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/marian/modeling_flax_marian.py#L1293"}}),us=new Z({props:{name:"__call__",anchor:"transformers.FlaxMarianMTModel.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.ndarray.ndarray] = None"},{name:"decoder_input_ids",val:": typing.Optional[jax._src.numpy.ndarray.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.ndarray.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.ndarray.ndarray] = None"},{name:"decoder_position_ids",val:": typing.Optional[jax._src.numpy.ndarray.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],parametersDescription:[{anchor:"transformers.FlaxMarianMTModel.__call__.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/marian#transformers.MarianTokenizer">MarianTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxMarianMTModel.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxMarianMTModel.__call__.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/marian#transformers.MarianTokenizer">MarianTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For translation and summarization training, <code>decoder_input_ids</code> should be provided. If no <code>decoder_input_ids</code> is provided, the model will create this tensor by shifting the <code>input_ids</code> to the right for denoising pre-training following the paper.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxMarianMTModel.__call__.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxMarianMTModel.__call__.position_ids",description:`<strong>position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"position_ids"},{anchor:"transformers.FlaxMarianMTModel.__call__.decoder_position_ids",description:`<strong>decoder_position_ids</strong> (<code>numpy.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.`,name:"decoder_position_ids"},{anchor:"transformers.FlaxMarianMTModel.__call__.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxMarianMTModel.__call__.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxMarianMTModel.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/marian/modeling_flax_marian.py#L1142",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/marian#transformers.MarianConfig" >MarianConfig</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),$n=new Dn({props:{$$slots:{default:[Im]},$$scope:{ctx:E}}}),zn=new Rt({props:{anchor:"transformers.FlaxMarianMTModel.__call__.example",$$slots:{default:[Am]},$$scope:{ctx:E}}}),{c(){h=o("meta"),k=l(),g=o("h1"),_=o("a"),v=o("span"),M(d.$$.fragment),u=l(),q=o("span"),Ve=r("MarianMT"),Ne=l(),C=o("p"),ve=o("strong"),ke=r("Bugs:"),z=r(" If you see something strange, file a "),Q=o("a"),ee=r("Github Issue"),Ke=r(` and assign @patrickvonplaten.`),Ce=l(),R=o("p"),Ge=r("Translations should be similar, but not identical to output in the test set linked to in each model card."),Le=l(),L=o("h2"),X=o("a"),Te=o("span"),M(H.$$.fragment),Je=l(),Me=o("span"),te=r("Implementation Notes"),Ie=l(),F=o("ul"),ne=o("li"),N=o("p"),I=r("Each model is about 298 MB on disk, there are more than 1,000 models."),Ye=l(),V=o("li"),oe=o("p"),Ze=r("The list of supported language pairs can be found "),U=o("a"),Xe=r("here"),Qe=r("."),A=l(),be=o("li"),O=o("p"),et=r("Models were originally trained by "),se=o("a"),ae=r("J\xF6rg Tiedemann"),tt=r(" using the "),re=o("a"),K=r("Marian"),nt=r(" C++ library, which supports fast training and translation."),ie=l(),ye=o("li"),de=o("p"),S=r(`All models are transformer encoder-decoders with 6 layers in each component. Each model\u2019s performance is documented in a model card.`),ot=l(),D=o("li"),xe=o("p"),st=r("The 80 opus models that require BPE preprocessing are not supported."),T=l(),j=o("li"),we=o("p"),$e=r("The modeling code is the same as "),Ae=o("a"),ft=r("BartForConditionalGeneration"),P=r(" with a few minor modifications:"),_t=l(),le=o("ul"),ze=o("li"),gt=r("static (sinusoid) positional embeddings ("),at=o("code"),G=r("MarianConfig.static_position_embeddings=True"),ce=r(")"),vt=l(),je=o("li"),pe=r("no layernorm_embedding ("),rt=o("code"),kt=r("MarianConfig.normalize_embedding=False"),it=r(")"),Tt=l(),B=o("li"),Mt=r("the model starts generating with "),Ks=o("code"),Li=r("pad_token_id"),Ii=r(` (which has 0 as a token_embedding) as the prefix (Bart uses `),Gs=o("code"),Ai=r("<s/>"),Oi=r("),"),Si=l(),Js=o("li"),Hn=o("p"),Di=r("Code to bulk convert models can be found in "),Ys=o("code"),Hi=r("convert_marian_to_pytorch.py"),Ui=r("."),Bi=l(),Zs=o("li"),Un=o("p"),Wi=r("This model was contributed by "),Bn=o("a"),Ri=r("sshleifer"),Vi=r("."),cr=l(),bt=o("h2"),Kt=o("a"),Xs=o("span"),M(Wn.$$.fragment),Ki=l(),Qs=o("span"),Gi=r("Naming"),pr=l(),Oe=o("ul"),gs=o("li"),Ji=r("All model names use the following format: "),ea=o("code"),Yi=r("Helsinki-NLP/opus-mt-{src}-{tgt}"),Zi=l(),Rn=o("li"),Xi=r("The language codes used to name models are inconsistent. Two digit codes can usually be found "),Vn=o("a"),Qi=r("here"),ed=r(`, three digit codes require googling \u201Clanguage code {code}\u201C.`),td=l(),yt=o("li"),nd=r("Codes formatted like "),ta=o("code"),od=r("es_AR"),sd=r(" are usually "),na=o("code"),ad=r("code_{region}"),rd=r(". That one is Spanish from Argentina."),id=l(),oa=o("li"),dd=r(`The models were converted in two stages. The first 1000 models use ISO-639-2 codes to identify languages, the second group use a combination of ISO-639-5 codes and ISO-639-2 codes.`),hr=l(),xt=o("h2"),Gt=o("a"),sa=o("span"),M(Kn.$$.fragment),ld=l(),aa=o("span"),cd=r("Examples"),ur=l(),Jt=o("ul"),ra=o("li"),pd=r(`Since Marian models are smaller than many other translation models available in the library, they can be useful for fine-tuning experiments and integration tests.`),hd=l(),ia=o("li"),Gn=o("a"),ud=r("Fine-tune on GPU"),mr=l(),wt=o("h2"),Yt=o("a"),da=o("span"),M(Jn.$$.fragment),md=l(),la=o("span"),fd=r("Multilingual Models"),fr=l(),Se=o("ul"),Yn=o("li"),_d=r("All model names use the following format: "),ca=o("code"),gd=r("Helsinki-NLP/opus-mt-{src}-{tgt}"),vd=r(":"),kd=l(),Zn=o("li"),Td=r(`If a model can output multiple languages, and you should specify a language code by prepending the desired output language to the `),pa=o("code"),Md=r("src_text"),bd=r("."),yd=l(),Xn=o("li"),xd=r("You can see a models\u2019s supported language codes in its model card, under target constituents, like in "),Qn=o("a"),wd=r("opus-mt-en-roa"),$d=r("."),zd=l(),eo=o("li"),jd=r("Note that if a model is only multilingual on the source side, like "),ha=o("code"),Ed=r("Helsinki-NLP/opus-mt-roa-en"),qd=r(`, no language codes are required.`),_r=l(),Zt=o("p"),Pd=r("New multi-lingual models from the "),to=o("a"),Fd=r("Tatoeba-Challenge repo"),Nd=r(` require 3 character language codes:`),gr=l(),M(no.$$.fragment),vr=l(),vs=o("p"),Cd=r("Here is the code to see all available pretrained models on the hub:"),kr=l(),M(oo.$$.fragment),Tr=l(),$t=o("h2"),Xt=o("a"),ua=o("span"),M(so.$$.fragment),Ld=l(),ma=o("span"),Id=r("Old Style Multi-Lingual Models"),Mr=l(),ks=o("p"),Ad=r(`These are the old style multi-lingual models ported from the OPUS-MT-Train repo: and the members of each language group:`),br=l(),M(ao.$$.fragment),yr=l(),Ts=o("p"),Od=r("Example of translating english to many romance languages, using old-style 2 character language codes"),xr=l(),M(ro.$$.fragment),wr=l(),zt=o("h2"),Qt=o("a"),fa=o("span"),M(io.$$.fragment),Sd=l(),_a=o("span"),Dd=r("MarianConfig"),$r=l(),Ee=o("div"),M(lo.$$.fragment),Hd=l(),jt=o("p"),Ud=r("This is the configuration class to store the configuration of a "),Ms=o("a"),Bd=r("MarianModel"),Wd=r(`. It is used to instantiate an Marian model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Marian `),co=o("a"),Rd=r("Helsinki-NLP/opus-mt-en-de"),Vd=r(" architecture."),Kd=l(),Et=o("p"),Gd=r("Configuration objects inherit from "),bs=o("a"),Jd=r("PretrainedConfig"),Yd=r(` and can be used to control the model outputs. Read the documentation from `),ys=o("a"),Zd=r("PretrainedConfig"),Xd=r(" for more information."),Qd=l(),M(en.$$.fragment),zr=l(),qt=o("h2"),tn=o("a"),ga=o("span"),M(po.$$.fragment),el=l(),va=o("span"),tl=r("MarianTokenizer"),jr=l(),he=o("div"),M(ho.$$.fragment),nl=l(),uo=o("p"),ol=r("Construct a Marian tokenizer. Based on "),mo=o("a"),sl=r("SentencePiece"),al=r("."),rl=l(),fo=o("p"),il=r("This tokenizer inherits from "),xs=o("a"),dl=r("PreTrainedTokenizer"),ll=r(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),cl=l(),M(nn.$$.fragment),pl=l(),on=o("div"),M(_o.$$.fragment),hl=l(),ka=o("p"),ul=r("Build model inputs from a sequence by appending eos_token_id."),Er=l(),Pt=o("h2"),sn=o("a"),Ta=o("span"),M(go.$$.fragment),ml=l(),Ma=o("span"),fl=r("MarianModel"),qr=l(),qe=o("div"),M(vo.$$.fragment),_l=l(),ko=o("p"),gl=r(`The bare Marian Model outputting raw hidden-states without any specific head on top. This model inherits from `),ws=o("a"),vl=r("PreTrainedModel"),kl=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Tl=l(),To=o("p"),Ml=r("This model is also a PyTorch "),Mo=o("a"),bl=r("torch.nn.Module"),yl=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),xl=l(),De=o("div"),M(bo.$$.fragment),wl=l(),Ft=o("p"),$l=r("The "),$s=o("a"),zl=r("MarianModel"),jl=r(" forward method, overrides the "),ba=o("code"),El=r("__call__"),ql=r(" special method."),Pl=l(),M(an.$$.fragment),Fl=l(),M(rn.$$.fragment),Pr=l(),Nt=o("h2"),dn=o("a"),ya=o("span"),M(yo.$$.fragment),Nl=l(),xa=o("span"),Cl=r("MarianMTModel"),Fr=l(),Pe=o("div"),M(xo.$$.fragment),Ll=l(),wo=o("p"),Il=r(`The Marian Model with a language modeling head. Can be used for summarization. This model inherits from `),zs=o("a"),Al=r("PreTrainedModel"),Ol=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Sl=l(),$o=o("p"),Dl=r("This model is also a PyTorch "),zo=o("a"),Hl=r("torch.nn.Module"),Ul=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Bl=l(),_e=o("div"),M(jo.$$.fragment),Wl=l(),Ct=o("p"),Rl=r("The "),js=o("a"),Vl=r("MarianMTModel"),Kl=r(" forward method, overrides the "),wa=o("code"),Gl=r("__call__"),Jl=r(" special method."),Yl=l(),M(ln.$$.fragment),Zl=l(),Eo=o("p"),Xl=r(`Pytorch version of marian-nmt\u2019s transformer.h (c++). Designed for the OPUS-NMT translation checkpoints. Available models are listed `),qo=o("a"),Ql=r("here"),ec=r("."),tc=l(),M(cn.$$.fragment),Nr=l(),Lt=o("h2"),pn=o("a"),$a=o("span"),M(Po.$$.fragment),nc=l(),za=o("span"),oc=r("MarianForCausalLM"),Cr=l(),It=o("div"),M(Fo.$$.fragment),sc=l(),hn=o("div"),M(No.$$.fragment),ac=l(),M(un.$$.fragment),Lr=l(),At=o("h2"),mn=o("a"),ja=o("span"),M(Co.$$.fragment),rc=l(),Ea=o("span"),ic=r("TFMarianModel"),Ir=l(),ue=o("div"),M(Lo.$$.fragment),dc=l(),Io=o("p"),lc=r(`The bare MARIAN Model outputting raw hidden-states without any specific head on top. This model inherits from `),Es=o("a"),cc=r("TFPreTrainedModel"),pc=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),hc=l(),Ao=o("p"),uc=r("This model is also a "),Oo=o("a"),mc=r("tf.keras.Model"),fc=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),_c=l(),M(fn.$$.fragment),gc=l(),He=o("div"),M(So.$$.fragment),vc=l(),Ot=o("p"),kc=r("The "),qs=o("a"),Tc=r("TFMarianModel"),Mc=r(" forward method, overrides the "),qa=o("code"),bc=r("__call__"),yc=r(" special method."),xc=l(),M(_n.$$.fragment),wc=l(),M(gn.$$.fragment),Ar=l(),St=o("h2"),vn=o("a"),Pa=o("span"),M(Do.$$.fragment),$c=l(),Fa=o("span"),zc=r("TFMarianMTModel"),Or=l(),me=o("div"),M(Ho.$$.fragment),jc=l(),Uo=o("p"),Ec=r(`The MARIAN Model with a language modeling head. Can be used for summarization. This model inherits from `),Ps=o("a"),qc=r("TFPreTrainedModel"),Pc=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Fc=l(),Bo=o("p"),Nc=r("This model is also a "),Wo=o("a"),Cc=r("tf.keras.Model"),Lc=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Ic=l(),M(kn.$$.fragment),Ac=l(),ge=o("div"),M(Ro.$$.fragment),Oc=l(),Dt=o("p"),Sc=r("The "),Fs=o("a"),Dc=r("TFMarianMTModel"),Hc=r(" forward method, overrides the "),Na=o("code"),Uc=r("__call__"),Bc=r(" special method."),Wc=l(),M(Tn.$$.fragment),Rc=l(),Vo=o("p"),Vc=r(`TF version of marian-nmt\u2019s transformer.h (c++). Designed for the OPUS-NMT translation checkpoints. Available models are listed `),Ko=o("a"),Kc=r("here"),Gc=r("."),Jc=l(),M(Mn.$$.fragment),Sr=l(),Ht=o("h2"),bn=o("a"),Ca=o("span"),M(Go.$$.fragment),Yc=l(),La=o("span"),Zc=r("FlaxMarianModel"),Dr=l(),J=o("div"),M(Jo.$$.fragment),Xc=l(),Yo=o("p"),Qc=r(`The bare Marian Model transformer outputting raw hidden-states without any specific head on top. This model inherits from `),Ns=o("a"),ep=r("FlaxPreTrainedModel"),tp=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),np=l(),Zo=o("p"),op=r(`This model is also a Flax Linen `),Xo=o("a"),sp=r("flax.nn.Module"),ap=r(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),rp=l(),Ia=o("p"),ip=r("Finally, this model supports inherent JAX features such as:"),dp=l(),dt=o("ul"),Aa=o("li"),Qo=o("a"),lp=r("Just-In-Time (JIT) compilation"),cp=l(),Oa=o("li"),es=o("a"),pp=r("Automatic Differentiation"),hp=l(),Sa=o("li"),ts=o("a"),up=r("Vectorization"),mp=l(),Da=o("li"),ns=o("a"),fp=r("Parallelization"),_p=l(),Ue=o("div"),M(os.$$.fragment),gp=l(),Ut=o("p"),vp=r("The "),Ha=o("code"),kp=r("FlaxMarianPreTrainedModel"),Tp=r(" forward method, overrides the "),Ua=o("code"),Mp=r("__call__"),bp=r(" special method."),yp=l(),M(yn.$$.fragment),xp=l(),M(xn.$$.fragment),Hr=l(),Bt=o("h2"),wn=o("a"),Ba=o("span"),M(ss.$$.fragment),wp=l(),Wa=o("span"),$p=r("FlaxMarianMTModel"),Ur=l(),Y=o("div"),M(as.$$.fragment),zp=l(),rs=o("p"),jp=r(`The MARIAN Model with a language modeling head. Can be used for translation. This model inherits from `),Cs=o("a"),Ep=r("FlaxPreTrainedModel"),qp=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Pp=l(),is=o("p"),Fp=r(`This model is also a Flax Linen `),ds=o("a"),Np=r("flax.nn.Module"),Cp=r(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Lp=l(),Ra=o("p"),Ip=r("Finally, this model supports inherent JAX features such as:"),Ap=l(),lt=o("ul"),Va=o("li"),ls=o("a"),Op=r("Just-In-Time (JIT) compilation"),Sp=l(),Ka=o("li"),cs=o("a"),Dp=r("Automatic Differentiation"),Hp=l(),Ga=o("li"),ps=o("a"),Up=r("Vectorization"),Bp=l(),Ja=o("li"),hs=o("a"),Wp=r("Parallelization"),Rp=l(),Be=o("div"),M(us.$$.fragment),Vp=l(),Wt=o("p"),Kp=r("The "),Ya=o("code"),Gp=r("FlaxMarianPreTrainedModel"),Jp=r(" forward method, overrides the "),Za=o("code"),Yp=r("__call__"),Zp=r(" special method."),Xp=l(),M($n.$$.fragment),Qp=l(),M(zn.$$.fragment),this.h()},l(n){const m=km('[data-svelte="svelte-1phssyn"]',document.head);h=s(m,"META",{name:!0,content:!0}),m.forEach(t),k=c(n),g=s(n,"H1",{class:!0});var ms=a(g);_=s(ms,"A",{id:!0,class:!0,href:!0});var Xa=a(_);v=s(Xa,"SPAN",{});var Qa=a(v);b(d.$$.fragment,Qa),Qa.forEach(t),Xa.forEach(t),u=c(ms),q=s(ms,"SPAN",{});var er=a(q);Ve=i(er,"MarianMT"),er.forEach(t),ms.forEach(t),Ne=c(n),C=s(n,"P",{});var jn=a(C);ve=s(jn,"STRONG",{});var tr=a(ve);ke=i(tr,"Bugs:"),tr.forEach(t),z=i(jn," If you see something strange, file a "),Q=s(jn,"A",{href:!0,rel:!0});var nr=a(Q);ee=i(nr,"Github Issue"),nr.forEach(t),Ke=i(jn,` and assign @patrickvonplaten.`),jn.forEach(t),Ce=c(n),R=s(n,"P",{});var or=a(R);Ge=i(or,"Translations should be similar, but not identical to output in the test set linked to in each model card."),or.forEach(t),Le=c(n),L=s(n,"H2",{class:!0});var fs=a(L);X=s(fs,"A",{id:!0,class:!0,href:!0});var sr=a(X);Te=s(sr,"SPAN",{});var ar=a(Te);b(H.$$.fragment,ar),ar.forEach(t),sr.forEach(t),Je=c(fs),Me=s(fs,"SPAN",{});var rr=a(Me);te=i(rr,"Implementation Notes"),rr.forEach(t),fs.forEach(t),Ie=c(n),F=s(n,"UL",{});var W=a(F);ne=s(W,"LI",{});var ir=a(ne);N=s(ir,"P",{});var dr=a(N);I=i(dr,"Each model is about 298 MB on disk, there are more than 1,000 models."),dr.forEach(t),ir.forEach(t),Ye=c(W),V=s(W,"LI",{});var lr=a(V);oe=s(lr,"P",{});var _s=a(oe);Ze=i(_s,"The list of supported language pairs can be found "),U=s(_s,"A",{href:!0,rel:!0});var th=a(U);Xe=i(th,"here"),th.forEach(t),Qe=i(_s,"."),_s.forEach(t),lr.forEach(t),A=c(W),be=s(W,"LI",{});var nh=a(be);O=s(nh,"P",{});var Ls=a(O);et=i(Ls,"Models were originally trained by "),se=s(Ls,"A",{href:!0,rel:!0});var oh=a(se);ae=i(oh,"J\xF6rg Tiedemann"),oh.forEach(t),tt=i(Ls," using the "),re=s(Ls,"A",{href:!0,rel:!0});var sh=a(re);K=i(sh,"Marian"),sh.forEach(t),nt=i(Ls," C++ library, which supports fast training and translation."),Ls.forEach(t),nh.forEach(t),ie=c(W),ye=s(W,"LI",{});var ah=a(ye);de=s(ah,"P",{});var rh=a(de);S=i(rh,`All models are transformer encoder-decoders with 6 layers in each component. Each model\u2019s performance is documented in a model card.`),rh.forEach(t),ah.forEach(t),ot=c(W),D=s(W,"LI",{});var ih=a(D);xe=s(ih,"P",{});var dh=a(xe);st=i(dh,"The 80 opus models that require BPE preprocessing are not supported."),dh.forEach(t),ih.forEach(t),T=c(W),j=s(W,"LI",{});var Wr=a(j);we=s(Wr,"P",{});var Rr=a(we);$e=i(Rr,"The modeling code is the same as "),Ae=s(Rr,"A",{href:!0});var lh=a(Ae);ft=i(lh,"BartForConditionalGeneration"),lh.forEach(t),P=i(Rr," with a few minor modifications:"),Rr.forEach(t),_t=c(Wr),le=s(Wr,"UL",{});var Is=a(le);ze=s(Is,"LI",{});var Vr=a(ze);gt=i(Vr,"static (sinusoid) positional embeddings ("),at=s(Vr,"CODE",{});var ch=a(at);G=i(ch,"MarianConfig.static_position_embeddings=True"),ch.forEach(t),ce=i(Vr,")"),Vr.forEach(t),vt=c(Is),je=s(Is,"LI",{});var Kr=a(je);pe=i(Kr,"no layernorm_embedding ("),rt=s(Kr,"CODE",{});var ph=a(rt);kt=i(ph,"MarianConfig.normalize_embedding=False"),ph.forEach(t),it=i(Kr,")"),Kr.forEach(t),Tt=c(Is),B=s(Is,"LI",{});var As=a(B);Mt=i(As,"the model starts generating with "),Ks=s(As,"CODE",{});var hh=a(Ks);Li=i(hh,"pad_token_id"),hh.forEach(t),Ii=i(As,` (which has 0 as a token_embedding) as the prefix (Bart uses `),Gs=s(As,"CODE",{});var uh=a(Gs);Ai=i(uh,"<s/>"),uh.forEach(t),Oi=i(As,"),"),As.forEach(t),Is.forEach(t),Wr.forEach(t),Si=c(W),Js=s(W,"LI",{});var mh=a(Js);Hn=s(mh,"P",{});var Gr=a(Hn);Di=i(Gr,"Code to bulk convert models can be found in "),Ys=s(Gr,"CODE",{});var fh=a(Ys);Hi=i(fh,"convert_marian_to_pytorch.py"),fh.forEach(t),Ui=i(Gr,"."),Gr.forEach(t),mh.forEach(t),Bi=c(W),Zs=s(W,"LI",{});var _h=a(Zs);Un=s(_h,"P",{});var Jr=a(Un);Wi=i(Jr,"This model was contributed by "),Bn=s(Jr,"A",{href:!0,rel:!0});var gh=a(Bn);Ri=i(gh,"sshleifer"),gh.forEach(t),Vi=i(Jr,"."),Jr.forEach(t),_h.forEach(t),W.forEach(t),cr=c(n),bt=s(n,"H2",{class:!0});var Yr=a(bt);Kt=s(Yr,"A",{id:!0,class:!0,href:!0});var vh=a(Kt);Xs=s(vh,"SPAN",{});var kh=a(Xs);b(Wn.$$.fragment,kh),kh.forEach(t),vh.forEach(t),Ki=c(Yr),Qs=s(Yr,"SPAN",{});var Th=a(Qs);Gi=i(Th,"Naming"),Th.forEach(t),Yr.forEach(t),pr=c(n),Oe=s(n,"UL",{});var En=a(Oe);gs=s(En,"LI",{});var eh=a(gs);Ji=i(eh,"All model names use the following format: "),ea=s(eh,"CODE",{});var Mh=a(ea);Yi=i(Mh,"Helsinki-NLP/opus-mt-{src}-{tgt}"),Mh.forEach(t),eh.forEach(t),Zi=c(En),Rn=s(En,"LI",{});var Zr=a(Rn);Xi=i(Zr,"The language codes used to name models are inconsistent. Two digit codes can usually be found "),Vn=s(Zr,"A",{href:!0,rel:!0});var bh=a(Vn);Qi=i(bh,"here"),bh.forEach(t),ed=i(Zr,`, three digit codes require googling \u201Clanguage code {code}\u201C.`),Zr.forEach(t),td=c(En),yt=s(En,"LI",{});var Os=a(yt);nd=i(Os,"Codes formatted like "),ta=s(Os,"CODE",{});var yh=a(ta);od=i(yh,"es_AR"),yh.forEach(t),sd=i(Os," are usually "),na=s(Os,"CODE",{});var xh=a(na);ad=i(xh,"code_{region}"),xh.forEach(t),rd=i(Os,". That one is Spanish from Argentina."),Os.forEach(t),id=c(En),oa=s(En,"LI",{});var wh=a(oa);dd=i(wh,`The models were converted in two stages. The first 1000 models use ISO-639-2 codes to identify languages, the second group use a combination of ISO-639-5 codes and ISO-639-2 codes.`),wh.forEach(t),En.forEach(t),hr=c(n),xt=s(n,"H2",{class:!0});var Xr=a(xt);Gt=s(Xr,"A",{id:!0,class:!0,href:!0});var $h=a(Gt);sa=s($h,"SPAN",{});var zh=a(sa);b(Kn.$$.fragment,zh),zh.forEach(t),$h.forEach(t),ld=c(Xr),aa=s(Xr,"SPAN",{});var jh=a(aa);cd=i(jh,"Examples"),jh.forEach(t),Xr.forEach(t),ur=c(n),Jt=s(n,"UL",{});var Qr=a(Jt);ra=s(Qr,"LI",{});var Eh=a(ra);pd=i(Eh,`Since Marian models are smaller than many other translation models available in the library, they can be useful for fine-tuning experiments and integration tests.`),Eh.forEach(t),hd=c(Qr),ia=s(Qr,"LI",{});var qh=a(ia);Gn=s(qh,"A",{href:!0,rel:!0});var Ph=a(Gn);ud=i(Ph,"Fine-tune on GPU"),Ph.forEach(t),qh.forEach(t),Qr.forEach(t),mr=c(n),wt=s(n,"H2",{class:!0});var ei=a(wt);Yt=s(ei,"A",{id:!0,class:!0,href:!0});var Fh=a(Yt);da=s(Fh,"SPAN",{});var Nh=a(da);b(Jn.$$.fragment,Nh),Nh.forEach(t),Fh.forEach(t),md=c(ei),la=s(ei,"SPAN",{});var Ch=a(la);fd=i(Ch,"Multilingual Models"),Ch.forEach(t),ei.forEach(t),fr=c(n),Se=s(n,"UL",{});var qn=a(Se);Yn=s(qn,"LI",{});var ti=a(Yn);_d=i(ti,"All model names use the following format: "),ca=s(ti,"CODE",{});var Lh=a(ca);gd=i(Lh,"Helsinki-NLP/opus-mt-{src}-{tgt}"),Lh.forEach(t),vd=i(ti,":"),ti.forEach(t),kd=c(qn),Zn=s(qn,"LI",{});var ni=a(Zn);Td=i(ni,`If a model can output multiple languages, and you should specify a language code by prepending the desired output language to the `),pa=s(ni,"CODE",{});var Ih=a(pa);Md=i(Ih,"src_text"),Ih.forEach(t),bd=i(ni,"."),ni.forEach(t),yd=c(qn),Xn=s(qn,"LI",{});var oi=a(Xn);xd=i(oi,"You can see a models\u2019s supported language codes in its model card, under target constituents, like in "),Qn=s(oi,"A",{href:!0,rel:!0});var Ah=a(Qn);wd=i(Ah,"opus-mt-en-roa"),Ah.forEach(t),$d=i(oi,"."),oi.forEach(t),zd=c(qn),eo=s(qn,"LI",{});var si=a(eo);jd=i(si,"Note that if a model is only multilingual on the source side, like "),ha=s(si,"CODE",{});var Oh=a(ha);Ed=i(Oh,"Helsinki-NLP/opus-mt-roa-en"),Oh.forEach(t),qd=i(si,`, no language codes are required.`),si.forEach(t),qn.forEach(t),_r=c(n),Zt=s(n,"P",{});var ai=a(Zt);Pd=i(ai,"New multi-lingual models from the "),to=s(ai,"A",{href:!0,rel:!0});var Sh=a(to);Fd=i(Sh,"Tatoeba-Challenge repo"),Sh.forEach(t),Nd=i(ai,` require 3 character language codes:`),ai.forEach(t),gr=c(n),b(no.$$.fragment,n),vr=c(n),vs=s(n,"P",{});var Dh=a(vs);Cd=i(Dh,"Here is the code to see all available pretrained models on the hub:"),Dh.forEach(t),kr=c(n),b(oo.$$.fragment,n),Tr=c(n),$t=s(n,"H2",{class:!0});var ri=a($t);Xt=s(ri,"A",{id:!0,class:!0,href:!0});var Hh=a(Xt);ua=s(Hh,"SPAN",{});var Uh=a(ua);b(so.$$.fragment,Uh),Uh.forEach(t),Hh.forEach(t),Ld=c(ri),ma=s(ri,"SPAN",{});var Bh=a(ma);Id=i(Bh,"Old Style Multi-Lingual Models"),Bh.forEach(t),ri.forEach(t),Mr=c(n),ks=s(n,"P",{});var Wh=a(ks);Ad=i(Wh,`These are the old style multi-lingual models ported from the OPUS-MT-Train repo: and the members of each language group:`),Wh.forEach(t),br=c(n),b(ao.$$.fragment,n),yr=c(n),Ts=s(n,"P",{});var Rh=a(Ts);Od=i(Rh,"Example of translating english to many romance languages, using old-style 2 character language codes"),Rh.forEach(t),xr=c(n),b(ro.$$.fragment,n),wr=c(n),zt=s(n,"H2",{class:!0});var ii=a(zt);Qt=s(ii,"A",{id:!0,class:!0,href:!0});var Vh=a(Qt);fa=s(Vh,"SPAN",{});var Kh=a(fa);b(io.$$.fragment,Kh),Kh.forEach(t),Vh.forEach(t),Sd=c(ii),_a=s(ii,"SPAN",{});var Gh=a(_a);Dd=i(Gh,"MarianConfig"),Gh.forEach(t),ii.forEach(t),$r=c(n),Ee=s(n,"DIV",{class:!0});var Pn=a(Ee);b(lo.$$.fragment,Pn),Hd=c(Pn),jt=s(Pn,"P",{});var Ss=a(jt);Ud=i(Ss,"This is the configuration class to store the configuration of a "),Ms=s(Ss,"A",{href:!0});var Jh=a(Ms);Bd=i(Jh,"MarianModel"),Jh.forEach(t),Wd=i(Ss,`. It is used to instantiate an Marian model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Marian `),co=s(Ss,"A",{href:!0,rel:!0});var Yh=a(co);Rd=i(Yh,"Helsinki-NLP/opus-mt-en-de"),Yh.forEach(t),Vd=i(Ss," architecture."),Ss.forEach(t),Kd=c(Pn),Et=s(Pn,"P",{});var Ds=a(Et);Gd=i(Ds,"Configuration objects inherit from "),bs=s(Ds,"A",{href:!0});var Zh=a(bs);Jd=i(Zh,"PretrainedConfig"),Zh.forEach(t),Yd=i(Ds,` and can be used to control the model outputs. Read the documentation from `),ys=s(Ds,"A",{href:!0});var Xh=a(ys);Zd=i(Xh,"PretrainedConfig"),Xh.forEach(t),Xd=i(Ds," for more information."),Ds.forEach(t),Qd=c(Pn),b(en.$$.fragment,Pn),Pn.forEach(t),zr=c(n),qt=s(n,"H2",{class:!0});var di=a(qt);tn=s(di,"A",{id:!0,class:!0,href:!0});var Qh=a(tn);ga=s(Qh,"SPAN",{});var eu=a(ga);b(po.$$.fragment,eu),eu.forEach(t),Qh.forEach(t),el=c(di),va=s(di,"SPAN",{});var tu=a(va);tl=i(tu,"MarianTokenizer"),tu.forEach(t),di.forEach(t),jr=c(n),he=s(n,"DIV",{class:!0});var ct=a(he);b(ho.$$.fragment,ct),nl=c(ct),uo=s(ct,"P",{});var li=a(uo);ol=i(li,"Construct a Marian tokenizer. Based on "),mo=s(li,"A",{href:!0,rel:!0});var nu=a(mo);sl=i(nu,"SentencePiece"),nu.forEach(t),al=i(li,"."),li.forEach(t),rl=c(ct),fo=s(ct,"P",{});var ci=a(fo);il=i(ci,"This tokenizer inherits from "),xs=s(ci,"A",{href:!0});var ou=a(xs);dl=i(ou,"PreTrainedTokenizer"),ou.forEach(t),ll=i(ci,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),ci.forEach(t),cl=c(ct),b(nn.$$.fragment,ct),pl=c(ct),on=s(ct,"DIV",{class:!0});var pi=a(on);b(_o.$$.fragment,pi),hl=c(pi),ka=s(pi,"P",{});var su=a(ka);ul=i(su,"Build model inputs from a sequence by appending eos_token_id."),su.forEach(t),pi.forEach(t),ct.forEach(t),Er=c(n),Pt=s(n,"H2",{class:!0});var hi=a(Pt);sn=s(hi,"A",{id:!0,class:!0,href:!0});var au=a(sn);Ta=s(au,"SPAN",{});var ru=a(Ta);b(go.$$.fragment,ru),ru.forEach(t),au.forEach(t),ml=c(hi),Ma=s(hi,"SPAN",{});var iu=a(Ma);fl=i(iu,"MarianModel"),iu.forEach(t),hi.forEach(t),qr=c(n),qe=s(n,"DIV",{class:!0});var Fn=a(qe);b(vo.$$.fragment,Fn),_l=c(Fn),ko=s(Fn,"P",{});var ui=a(ko);gl=i(ui,`The bare Marian Model outputting raw hidden-states without any specific head on top. This model inherits from `),ws=s(ui,"A",{href:!0});var du=a(ws);vl=i(du,"PreTrainedModel"),du.forEach(t),kl=i(ui,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ui.forEach(t),Tl=c(Fn),To=s(Fn,"P",{});var mi=a(To);Ml=i(mi,"This model is also a PyTorch "),Mo=s(mi,"A",{href:!0,rel:!0});var lu=a(Mo);bl=i(lu,"torch.nn.Module"),lu.forEach(t),yl=i(mi,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),mi.forEach(t),xl=c(Fn),De=s(Fn,"DIV",{class:!0});var Nn=a(De);b(bo.$$.fragment,Nn),wl=c(Nn),Ft=s(Nn,"P",{});var Hs=a(Ft);$l=i(Hs,"The "),$s=s(Hs,"A",{href:!0});var cu=a($s);zl=i(cu,"MarianModel"),cu.forEach(t),jl=i(Hs," forward method, overrides the "),ba=s(Hs,"CODE",{});var pu=a(ba);El=i(pu,"__call__"),pu.forEach(t),ql=i(Hs," special method."),Hs.forEach(t),Pl=c(Nn),b(an.$$.fragment,Nn),Fl=c(Nn),b(rn.$$.fragment,Nn),Nn.forEach(t),Fn.forEach(t),Pr=c(n),Nt=s(n,"H2",{class:!0});var fi=a(Nt);dn=s(fi,"A",{id:!0,class:!0,href:!0});var hu=a(dn);ya=s(hu,"SPAN",{});var uu=a(ya);b(yo.$$.fragment,uu),uu.forEach(t),hu.forEach(t),Nl=c(fi),xa=s(fi,"SPAN",{});var mu=a(xa);Cl=i(mu,"MarianMTModel"),mu.forEach(t),fi.forEach(t),Fr=c(n),Pe=s(n,"DIV",{class:!0});var Cn=a(Pe);b(xo.$$.fragment,Cn),Ll=c(Cn),wo=s(Cn,"P",{});var _i=a(wo);Il=i(_i,`The Marian Model with a language modeling head. Can be used for summarization. This model inherits from `),zs=s(_i,"A",{href:!0});var fu=a(zs);Al=i(fu,"PreTrainedModel"),fu.forEach(t),Ol=i(_i,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),_i.forEach(t),Sl=c(Cn),$o=s(Cn,"P",{});var gi=a($o);Dl=i(gi,"This model is also a PyTorch "),zo=s(gi,"A",{href:!0,rel:!0});var _u=a(zo);Hl=i(_u,"torch.nn.Module"),_u.forEach(t),Ul=i(gi,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),gi.forEach(t),Bl=c(Cn),_e=s(Cn,"DIV",{class:!0});var pt=a(_e);b(jo.$$.fragment,pt),Wl=c(pt),Ct=s(pt,"P",{});var Us=a(Ct);Rl=i(Us,"The "),js=s(Us,"A",{href:!0});var gu=a(js);Vl=i(gu,"MarianMTModel"),gu.forEach(t),Kl=i(Us," forward method, overrides the "),wa=s(Us,"CODE",{});var vu=a(wa);Gl=i(vu,"__call__"),vu.forEach(t),Jl=i(Us," special method."),Us.forEach(t),Yl=c(pt),b(ln.$$.fragment,pt),Zl=c(pt),Eo=s(pt,"P",{});var vi=a(Eo);Xl=i(vi,`Pytorch version of marian-nmt\u2019s transformer.h (c++). Designed for the OPUS-NMT translation checkpoints. Available models are listed `),qo=s(vi,"A",{href:!0,rel:!0});var ku=a(qo);Ql=i(ku,"here"),ku.forEach(t),ec=i(vi,"."),vi.forEach(t),tc=c(pt),b(cn.$$.fragment,pt),pt.forEach(t),Cn.forEach(t),Nr=c(n),Lt=s(n,"H2",{class:!0});var ki=a(Lt);pn=s(ki,"A",{id:!0,class:!0,href:!0});var Tu=a(pn);$a=s(Tu,"SPAN",{});var Mu=a($a);b(Po.$$.fragment,Mu),Mu.forEach(t),Tu.forEach(t),nc=c(ki),za=s(ki,"SPAN",{});var bu=a(za);oc=i(bu,"MarianForCausalLM"),bu.forEach(t),ki.forEach(t),Cr=c(n),It=s(n,"DIV",{class:!0});var Ti=a(It);b(Fo.$$.fragment,Ti),sc=c(Ti),hn=s(Ti,"DIV",{class:!0});var Mi=a(hn);b(No.$$.fragment,Mi),ac=c(Mi),b(un.$$.fragment,Mi),Mi.forEach(t),Ti.forEach(t),Lr=c(n),At=s(n,"H2",{class:!0});var bi=a(At);mn=s(bi,"A",{id:!0,class:!0,href:!0});var yu=a(mn);ja=s(yu,"SPAN",{});var xu=a(ja);b(Co.$$.fragment,xu),xu.forEach(t),yu.forEach(t),rc=c(bi),Ea=s(bi,"SPAN",{});var wu=a(Ea);ic=i(wu,"TFMarianModel"),wu.forEach(t),bi.forEach(t),Ir=c(n),ue=s(n,"DIV",{class:!0});var ht=a(ue);b(Lo.$$.fragment,ht),dc=c(ht),Io=s(ht,"P",{});var yi=a(Io);lc=i(yi,`The bare MARIAN Model outputting raw hidden-states without any specific head on top. This model inherits from `),Es=s(yi,"A",{href:!0});var $u=a(Es);cc=i($u,"TFPreTrainedModel"),$u.forEach(t),pc=i(yi,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),yi.forEach(t),hc=c(ht),Ao=s(ht,"P",{});var xi=a(Ao);uc=i(xi,"This model is also a "),Oo=s(xi,"A",{href:!0,rel:!0});var zu=a(Oo);mc=i(zu,"tf.keras.Model"),zu.forEach(t),fc=i(xi,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),xi.forEach(t),_c=c(ht),b(fn.$$.fragment,ht),gc=c(ht),He=s(ht,"DIV",{class:!0});var Ln=a(He);b(So.$$.fragment,Ln),vc=c(Ln),Ot=s(Ln,"P",{});var Bs=a(Ot);kc=i(Bs,"The "),qs=s(Bs,"A",{href:!0});var ju=a(qs);Tc=i(ju,"TFMarianModel"),ju.forEach(t),Mc=i(Bs," forward method, overrides the "),qa=s(Bs,"CODE",{});var Eu=a(qa);bc=i(Eu,"__call__"),Eu.forEach(t),yc=i(Bs," special method."),Bs.forEach(t),xc=c(Ln),b(_n.$$.fragment,Ln),wc=c(Ln),b(gn.$$.fragment,Ln),Ln.forEach(t),ht.forEach(t),Ar=c(n),St=s(n,"H2",{class:!0});var wi=a(St);vn=s(wi,"A",{id:!0,class:!0,href:!0});var qu=a(vn);Pa=s(qu,"SPAN",{});var Pu=a(Pa);b(Do.$$.fragment,Pu),Pu.forEach(t),qu.forEach(t),$c=c(wi),Fa=s(wi,"SPAN",{});var Fu=a(Fa);zc=i(Fu,"TFMarianMTModel"),Fu.forEach(t),wi.forEach(t),Or=c(n),me=s(n,"DIV",{class:!0});var ut=a(me);b(Ho.$$.fragment,ut),jc=c(ut),Uo=s(ut,"P",{});var $i=a(Uo);Ec=i($i,`The MARIAN Model with a language modeling head. Can be used for summarization. This model inherits from `),Ps=s($i,"A",{href:!0});var Nu=a(Ps);qc=i(Nu,"TFPreTrainedModel"),Nu.forEach(t),Pc=i($i,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),$i.forEach(t),Fc=c(ut),Bo=s(ut,"P",{});var zi=a(Bo);Nc=i(zi,"This model is also a "),Wo=s(zi,"A",{href:!0,rel:!0});var Cu=a(Wo);Cc=i(Cu,"tf.keras.Model"),Cu.forEach(t),Lc=i(zi,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),zi.forEach(t),Ic=c(ut),b(kn.$$.fragment,ut),Ac=c(ut),ge=s(ut,"DIV",{class:!0});var mt=a(ge);b(Ro.$$.fragment,mt),Oc=c(mt),Dt=s(mt,"P",{});var Ws=a(Dt);Sc=i(Ws,"The "),Fs=s(Ws,"A",{href:!0});var Lu=a(Fs);Dc=i(Lu,"TFMarianMTModel"),Lu.forEach(t),Hc=i(Ws," forward method, overrides the "),Na=s(Ws,"CODE",{});var Iu=a(Na);Uc=i(Iu,"__call__"),Iu.forEach(t),Bc=i(Ws," special method."),Ws.forEach(t),Wc=c(mt),b(Tn.$$.fragment,mt),Rc=c(mt),Vo=s(mt,"P",{});var ji=a(Vo);Vc=i(ji,`TF version of marian-nmt\u2019s transformer.h (c++). Designed for the OPUS-NMT translation checkpoints. Available models are listed `),Ko=s(ji,"A",{href:!0,rel:!0});var Au=a(Ko);Kc=i(Au,"here"),Au.forEach(t),Gc=i(ji,"."),ji.forEach(t),Jc=c(mt),b(Mn.$$.fragment,mt),mt.forEach(t),ut.forEach(t),Sr=c(n),Ht=s(n,"H2",{class:!0});var Ei=a(Ht);bn=s(Ei,"A",{id:!0,class:!0,href:!0});var Ou=a(bn);Ca=s(Ou,"SPAN",{});var Su=a(Ca);b(Go.$$.fragment,Su),Su.forEach(t),Ou.forEach(t),Yc=c(Ei),La=s(Ei,"SPAN",{});var Du=a(La);Zc=i(Du,"FlaxMarianModel"),Du.forEach(t),Ei.forEach(t),Dr=c(n),J=s(n,"DIV",{class:!0});var We=a(J);b(Jo.$$.fragment,We),Xc=c(We),Yo=s(We,"P",{});var qi=a(Yo);Qc=i(qi,`The bare Marian Model transformer outputting raw hidden-states without any specific head on top. This model inherits from `),Ns=s(qi,"A",{href:!0});var Hu=a(Ns);ep=i(Hu,"FlaxPreTrainedModel"),Hu.forEach(t),tp=i(qi,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),qi.forEach(t),np=c(We),Zo=s(We,"P",{});var Pi=a(Zo);op=i(Pi,`This model is also a Flax Linen `),Xo=s(Pi,"A",{href:!0,rel:!0});var Uu=a(Xo);sp=i(Uu,"flax.nn.Module"),Uu.forEach(t),ap=i(Pi,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Pi.forEach(t),rp=c(We),Ia=s(We,"P",{});var Bu=a(Ia);ip=i(Bu,"Finally, this model supports inherent JAX features such as:"),Bu.forEach(t),dp=c(We),dt=s(We,"UL",{});var In=a(dt);Aa=s(In,"LI",{});var Wu=a(Aa);Qo=s(Wu,"A",{href:!0,rel:!0});var Ru=a(Qo);lp=i(Ru,"Just-In-Time (JIT) compilation"),Ru.forEach(t),Wu.forEach(t),cp=c(In),Oa=s(In,"LI",{});var Vu=a(Oa);es=s(Vu,"A",{href:!0,rel:!0});var Ku=a(es);pp=i(Ku,"Automatic Differentiation"),Ku.forEach(t),Vu.forEach(t),hp=c(In),Sa=s(In,"LI",{});var Gu=a(Sa);ts=s(Gu,"A",{href:!0,rel:!0});var Ju=a(ts);up=i(Ju,"Vectorization"),Ju.forEach(t),Gu.forEach(t),mp=c(In),Da=s(In,"LI",{});var Yu=a(Da);ns=s(Yu,"A",{href:!0,rel:!0});var Zu=a(ns);fp=i(Zu,"Parallelization"),Zu.forEach(t),Yu.forEach(t),In.forEach(t),_p=c(We),Ue=s(We,"DIV",{class:!0});var An=a(Ue);b(os.$$.fragment,An),gp=c(An),Ut=s(An,"P",{});var Rs=a(Ut);vp=i(Rs,"The "),Ha=s(Rs,"CODE",{});var Xu=a(Ha);kp=i(Xu,"FlaxMarianPreTrainedModel"),Xu.forEach(t),Tp=i(Rs," forward method, overrides the "),Ua=s(Rs,"CODE",{});var Qu=a(Ua);Mp=i(Qu,"__call__"),Qu.forEach(t),bp=i(Rs," special method."),Rs.forEach(t),yp=c(An),b(yn.$$.fragment,An),xp=c(An),b(xn.$$.fragment,An),An.forEach(t),We.forEach(t),Hr=c(n),Bt=s(n,"H2",{class:!0});var Fi=a(Bt);wn=s(Fi,"A",{id:!0,class:!0,href:!0});var em=a(wn);Ba=s(em,"SPAN",{});var tm=a(Ba);b(ss.$$.fragment,tm),tm.forEach(t),em.forEach(t),wp=c(Fi),Wa=s(Fi,"SPAN",{});var nm=a(Wa);$p=i(nm,"FlaxMarianMTModel"),nm.forEach(t),Fi.forEach(t),Ur=c(n),Y=s(n,"DIV",{class:!0});var Re=a(Y);b(as.$$.fragment,Re),zp=c(Re),rs=s(Re,"P",{});var Ni=a(rs);jp=i(Ni,`The MARIAN Model with a language modeling head. Can be used for translation. This model inherits from `),Cs=s(Ni,"A",{href:!0});var om=a(Cs);Ep=i(om,"FlaxPreTrainedModel"),om.forEach(t),qp=i(Ni,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ni.forEach(t),Pp=c(Re),is=s(Re,"P",{});var Ci=a(is);Fp=i(Ci,`This model is also a Flax Linen `),ds=s(Ci,"A",{href:!0,rel:!0});var sm=a(ds);Np=i(sm,"flax.nn.Module"),sm.forEach(t),Cp=i(Ci,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Ci.forEach(t),Lp=c(Re),Ra=s(Re,"P",{});var am=a(Ra);Ip=i(am,"Finally, this model supports inherent JAX features such as:"),am.forEach(t),Ap=c(Re),lt=s(Re,"UL",{});var On=a(lt);Va=s(On,"LI",{});var rm=a(Va);ls=s(rm,"A",{href:!0,rel:!0});var im=a(ls);Op=i(im,"Just-In-Time (JIT) compilation"),im.forEach(t),rm.forEach(t),Sp=c(On),Ka=s(On,"LI",{});var dm=a(Ka);cs=s(dm,"A",{href:!0,rel:!0});var lm=a(cs);Dp=i(lm,"Automatic Differentiation"),lm.forEach(t),dm.forEach(t),Hp=c(On),Ga=s(On,"LI",{});var cm=a(Ga);ps=s(cm,"A",{href:!0,rel:!0});var pm=a(ps);Up=i(pm,"Vectorization"),pm.forEach(t),cm.forEach(t),Bp=c(On),Ja=s(On,"LI",{});var hm=a(Ja);hs=s(hm,"A",{href:!0,rel:!0});var um=a(hs);Wp=i(um,"Parallelization"),um.forEach(t),hm.forEach(t),On.forEach(t),Rp=c(Re),Be=s(Re,"DIV",{class:!0});var Sn=a(Be);b(us.$$.fragment,Sn),Vp=c(Sn),Wt=s(Sn,"P",{});var Vs=a(Wt);Kp=i(Vs,"The "),Ya=s(Vs,"CODE",{});var mm=a(Ya);Gp=i(mm,"FlaxMarianPreTrainedModel"),mm.forEach(t),Jp=i(Vs," forward method, overrides the "),Za=s(Vs,"CODE",{});var fm=a(Za);Yp=i(fm,"__call__"),fm.forEach(t),Zp=i(Vs," special method."),Vs.forEach(t),Xp=c(Sn),b($n.$$.fragment,Sn),Qp=c(Sn),b(zn.$$.fragment,Sn),Sn.forEach(t),Re.forEach(t),this.h()},h(){p(h,"name","hf:doc:metadata"),p(h,"content",JSON.stringify(Sm)),p(_,"id","marianmt"),p(_,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(_,"href","#marianmt"),p(g,"class","relative group"),p(Q,"href","https://github.com/huggingface/transformers/issues/new?assignees=sshleifer&labels=&template=bug-report.md&title"),p(Q,"rel","nofollow"),p(X,"id","implementation-notes"),p(X,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(X,"href","#implementation-notes"),p(L,"class","relative group"),p(U,"href","https://huggingface.co/Helsinki-NLP"),p(U,"rel","nofollow"),p(se,"href","https://researchportal.helsinki.fi/en/persons/j%C3%B6rg-tiedemann"),p(se,"rel","nofollow"),p(re,"href","https://marian-nmt.github.io/"),p(re,"rel","nofollow"),p(Ae,"href","/docs/transformers/pr_19429/en/model_doc/bart#transformers.BartForConditionalGeneration"),p(Bn,"href","https://huggingface.co/sshleifer"),p(Bn,"rel","nofollow"),p(Kt,"id","naming"),p(Kt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Kt,"href","#naming"),p(bt,"class","relative group"),p(Vn,"href","https://developers.google.com/admin-sdk/directory/v1/languages"),p(Vn,"rel","nofollow"),p(Gt,"id","examples"),p(Gt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Gt,"href","#examples"),p(xt,"class","relative group"),p(Gn,"href","https://github.com/huggingface/transformers/blob/master/examples/legacy/seq2seq/train_distil_marian_enro.sh"),p(Gn,"rel","nofollow"),p(Yt,"id","multilingual-models"),p(Yt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Yt,"href","#multilingual-models"),p(wt,"class","relative group"),p(Qn,"href","https://huggingface.co/Helsinki-NLP/opus-mt-en-roa"),p(Qn,"rel","nofollow"),p(to,"href","https://github.com/Helsinki-NLP/Tatoeba-Challenge"),p(to,"rel","nofollow"),p(Xt,"id","old-style-multilingual-models"),p(Xt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Xt,"href","#old-style-multilingual-models"),p($t,"class","relative group"),p(Qt,"id","transformers.MarianConfig"),p(Qt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(Qt,"href","#transformers.MarianConfig"),p(zt,"class","relative group"),p(Ms,"href","/docs/transformers/pr_19429/en/model_doc/marian#transformers.MarianModel"),p(co,"href","https://huggingface.co/Helsinki-NLP/opus-mt-en-de"),p(co,"rel","nofollow"),p(bs,"href","/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig"),p(ys,"href","/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig"),p(Ee,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),p(tn,"id","transformers.MarianTokenizer"),p(tn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(tn,"href","#transformers.MarianTokenizer"),p(qt,"class","relative group"),p(mo,"href","https://github.com/google/sentencepiece"),p(mo,"rel","nofollow"),p(xs,"href","/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),p(on,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),p(he,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),p(sn,"id","transformers.MarianModel"),p(sn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(sn,"href","#transformers.MarianModel"),p(Pt,"class","relative group"),p(ws,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel"),p(Mo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(Mo,"rel","nofollow"),p($s,"href","/docs/transformers/pr_19429/en/model_doc/marian#transformers.MarianModel"),p(De,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),p(qe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),p(dn,"id","transformers.MarianMTModel"),p(dn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(dn,"href","#transformers.MarianMTModel"),p(Nt,"class","relative group"),p(zs,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel"),p(zo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),p(zo,"rel","nofollow"),p(js,"href","/docs/transformers/pr_19429/en/model_doc/marian#transformers.MarianMTModel"),p(qo,"href","https://huggingface.co/models?search=Helsinki-NLP"),p(qo,"rel","nofollow"),p(_e,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),p(Pe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),p(pn,"id","transformers.MarianForCausalLM"),p(pn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(pn,"href","#transformers.MarianForCausalLM"),p(Lt,"class","relative group"),p(hn,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),p(It,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),p(mn,"id","transformers.TFMarianModel"),p(mn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(mn,"href","#transformers.TFMarianModel"),p(At,"class","relative group"),p(Es,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel"),p(Oo,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(Oo,"rel","nofollow"),p(qs,"href","/docs/transformers/pr_19429/en/model_doc/marian#transformers.TFMarianModel"),p(He,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),p(ue,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),p(vn,"id","transformers.TFMarianMTModel"),p(vn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(vn,"href","#transformers.TFMarianMTModel"),p(St,"class","relative group"),p(Ps,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel"),p(Wo,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),p(Wo,"rel","nofollow"),p(Fs,"href","/docs/transformers/pr_19429/en/model_doc/marian#transformers.TFMarianMTModel"),p(Ko,"href","https://huggingface.co/models?search=Helsinki-NLP"),p(Ko,"rel","nofollow"),p(ge,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),p(me,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),p(bn,"id","transformers.FlaxMarianModel"),p(bn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(bn,"href","#transformers.FlaxMarianModel"),p(Ht,"class","relative group"),p(Ns,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.FlaxPreTrainedModel"),p(Xo,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),p(Xo,"rel","nofollow"),p(Qo,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),p(Qo,"rel","nofollow"),p(es,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),p(es,"rel","nofollow"),p(ts,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),p(ts,"rel","nofollow"),p(ns,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),p(ns,"rel","nofollow"),p(Ue,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),p(J,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),p(wn,"id","transformers.FlaxMarianMTModel"),p(wn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(wn,"href","#transformers.FlaxMarianMTModel"),p(Bt,"class","relative group"),p(Cs,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.FlaxPreTrainedModel"),p(ds,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),p(ds,"rel","nofollow"),p(ls,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),p(ls,"rel","nofollow"),p(cs,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),p(cs,"rel","nofollow"),p(ps,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),p(ps,"rel","nofollow"),p(hs,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),p(hs,"rel","nofollow"),p(Be,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),p(Y,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(n,m){e(document.head,h),f(n,k,m),f(n,g,m),e(g,_),e(_,v),y(d,v,null),e(g,u),e(g,q),e(q,Ve),f(n,Ne,m),f(n,C,m),e(C,ve),e(ve,ke),e(C,z),e(C,Q),e(Q,ee),e(C,Ke),f(n,Ce,m),f(n,R,m),e(R,Ge),f(n,Le,m),f(n,L,m),e(L,X),e(X,Te),y(H,Te,null),e(L,Je),e(L,Me),e(Me,te),f(n,Ie,m),f(n,F,m),e(F,ne),e(ne,N),e(N,I),e(F,Ye),e(F,V),e(V,oe),e(oe,Ze),e(oe,U),e(U,Xe),e(oe,Qe),e(F,A),e(F,be),e(be,O),e(O,et),e(O,se),e(se,ae),e(O,tt),e(O,re),e(re,K),e(O,nt),e(F,ie),e(F,ye),e(ye,de),e(de,S),e(F,ot),e(F,D),e(D,xe),e(xe,st),e(F,T),e(F,j),e(j,we),e(we,$e),e(we,Ae),e(Ae,ft),e(we,P),e(j,_t),e(j,le),e(le,ze),e(ze,gt),e(ze,at),e(at,G),e(ze,ce),e(le,vt),e(le,je),e(je,pe),e(je,rt),e(rt,kt),e(je,it),e(le,Tt),e(le,B),e(B,Mt),e(B,Ks),e(Ks,Li),e(B,Ii),e(B,Gs),e(Gs,Ai),e(B,Oi),e(F,Si),e(F,Js),e(Js,Hn),e(Hn,Di),e(Hn,Ys),e(Ys,Hi),e(Hn,Ui),e(F,Bi),e(F,Zs),e(Zs,Un),e(Un,Wi),e(Un,Bn),e(Bn,Ri),e(Un,Vi),f(n,cr,m),f(n,bt,m),e(bt,Kt),e(Kt,Xs),y(Wn,Xs,null),e(bt,Ki),e(bt,Qs),e(Qs,Gi),f(n,pr,m),f(n,Oe,m),e(Oe,gs),e(gs,Ji),e(gs,ea),e(ea,Yi),e(Oe,Zi),e(Oe,Rn),e(Rn,Xi),e(Rn,Vn),e(Vn,Qi),e(Rn,ed),e(Oe,td),e(Oe,yt),e(yt,nd),e(yt,ta),e(ta,od),e(yt,sd),e(yt,na),e(na,ad),e(yt,rd),e(Oe,id),e(Oe,oa),e(oa,dd),f(n,hr,m),f(n,xt,m),e(xt,Gt),e(Gt,sa),y(Kn,sa,null),e(xt,ld),e(xt,aa),e(aa,cd),f(n,ur,m),f(n,Jt,m),e(Jt,ra),e(ra,pd),e(Jt,hd),e(Jt,ia),e(ia,Gn),e(Gn,ud),f(n,mr,m),f(n,wt,m),e(wt,Yt),e(Yt,da),y(Jn,da,null),e(wt,md),e(wt,la),e(la,fd),f(n,fr,m),f(n,Se,m),e(Se,Yn),e(Yn,_d),e(Yn,ca),e(ca,gd),e(Yn,vd),e(Se,kd),e(Se,Zn),e(Zn,Td),e(Zn,pa),e(pa,Md),e(Zn,bd),e(Se,yd),e(Se,Xn),e(Xn,xd),e(Xn,Qn),e(Qn,wd),e(Xn,$d),e(Se,zd),e(Se,eo),e(eo,jd),e(eo,ha),e(ha,Ed),e(eo,qd),f(n,_r,m),f(n,Zt,m),e(Zt,Pd),e(Zt,to),e(to,Fd),e(Zt,Nd),f(n,gr,m),y(no,n,m),f(n,vr,m),f(n,vs,m),e(vs,Cd),f(n,kr,m),y(oo,n,m),f(n,Tr,m),f(n,$t,m),e($t,Xt),e(Xt,ua),y(so,ua,null),e($t,Ld),e($t,ma),e(ma,Id),f(n,Mr,m),f(n,ks,m),e(ks,Ad),f(n,br,m),y(ao,n,m),f(n,yr,m),f(n,Ts,m),e(Ts,Od),f(n,xr,m),y(ro,n,m),f(n,wr,m),f(n,zt,m),e(zt,Qt),e(Qt,fa),y(io,fa,null),e(zt,Sd),e(zt,_a),e(_a,Dd),f(n,$r,m),f(n,Ee,m),y(lo,Ee,null),e(Ee,Hd),e(Ee,jt),e(jt,Ud),e(jt,Ms),e(Ms,Bd),e(jt,Wd),e(jt,co),e(co,Rd),e(jt,Vd),e(Ee,Kd),e(Ee,Et),e(Et,Gd),e(Et,bs),e(bs,Jd),e(Et,Yd),e(Et,ys),e(ys,Zd),e(Et,Xd),e(Ee,Qd),y(en,Ee,null),f(n,zr,m),f(n,qt,m),e(qt,tn),e(tn,ga),y(po,ga,null),e(qt,el),e(qt,va),e(va,tl),f(n,jr,m),f(n,he,m),y(ho,he,null),e(he,nl),e(he,uo),e(uo,ol),e(uo,mo),e(mo,sl),e(uo,al),e(he,rl),e(he,fo),e(fo,il),e(fo,xs),e(xs,dl),e(fo,ll),e(he,cl),y(nn,he,null),e(he,pl),e(he,on),y(_o,on,null),e(on,hl),e(on,ka),e(ka,ul),f(n,Er,m),f(n,Pt,m),e(Pt,sn),e(sn,Ta),y(go,Ta,null),e(Pt,ml),e(Pt,Ma),e(Ma,fl),f(n,qr,m),f(n,qe,m),y(vo,qe,null),e(qe,_l),e(qe,ko),e(ko,gl),e(ko,ws),e(ws,vl),e(ko,kl),e(qe,Tl),e(qe,To),e(To,Ml),e(To,Mo),e(Mo,bl),e(To,yl),e(qe,xl),e(qe,De),y(bo,De,null),e(De,wl),e(De,Ft),e(Ft,$l),e(Ft,$s),e($s,zl),e(Ft,jl),e(Ft,ba),e(ba,El),e(Ft,ql),e(De,Pl),y(an,De,null),e(De,Fl),y(rn,De,null),f(n,Pr,m),f(n,Nt,m),e(Nt,dn),e(dn,ya),y(yo,ya,null),e(Nt,Nl),e(Nt,xa),e(xa,Cl),f(n,Fr,m),f(n,Pe,m),y(xo,Pe,null),e(Pe,Ll),e(Pe,wo),e(wo,Il),e(wo,zs),e(zs,Al),e(wo,Ol),e(Pe,Sl),e(Pe,$o),e($o,Dl),e($o,zo),e(zo,Hl),e($o,Ul),e(Pe,Bl),e(Pe,_e),y(jo,_e,null),e(_e,Wl),e(_e,Ct),e(Ct,Rl),e(Ct,js),e(js,Vl),e(Ct,Kl),e(Ct,wa),e(wa,Gl),e(Ct,Jl),e(_e,Yl),y(ln,_e,null),e(_e,Zl),e(_e,Eo),e(Eo,Xl),e(Eo,qo),e(qo,Ql),e(Eo,ec),e(_e,tc),y(cn,_e,null),f(n,Nr,m),f(n,Lt,m),e(Lt,pn),e(pn,$a),y(Po,$a,null),e(Lt,nc),e(Lt,za),e(za,oc),f(n,Cr,m),f(n,It,m),y(Fo,It,null),e(It,sc),e(It,hn),y(No,hn,null),e(hn,ac),y(un,hn,null),f(n,Lr,m),f(n,At,m),e(At,mn),e(mn,ja),y(Co,ja,null),e(At,rc),e(At,Ea),e(Ea,ic),f(n,Ir,m),f(n,ue,m),y(Lo,ue,null),e(ue,dc),e(ue,Io),e(Io,lc),e(Io,Es),e(Es,cc),e(Io,pc),e(ue,hc),e(ue,Ao),e(Ao,uc),e(Ao,Oo),e(Oo,mc),e(Ao,fc),e(ue,_c),y(fn,ue,null),e(ue,gc),e(ue,He),y(So,He,null),e(He,vc),e(He,Ot),e(Ot,kc),e(Ot,qs),e(qs,Tc),e(Ot,Mc),e(Ot,qa),e(qa,bc),e(Ot,yc),e(He,xc),y(_n,He,null),e(He,wc),y(gn,He,null),f(n,Ar,m),f(n,St,m),e(St,vn),e(vn,Pa),y(Do,Pa,null),e(St,$c),e(St,Fa),e(Fa,zc),f(n,Or,m),f(n,me,m),y(Ho,me,null),e(me,jc),e(me,Uo),e(Uo,Ec),e(Uo,Ps),e(Ps,qc),e(Uo,Pc),e(me,Fc),e(me,Bo),e(Bo,Nc),e(Bo,Wo),e(Wo,Cc),e(Bo,Lc),e(me,Ic),y(kn,me,null),e(me,Ac),e(me,ge),y(Ro,ge,null),e(ge,Oc),e(ge,Dt),e(Dt,Sc),e(Dt,Fs),e(Fs,Dc),e(Dt,Hc),e(Dt,Na),e(Na,Uc),e(Dt,Bc),e(ge,Wc),y(Tn,ge,null),e(ge,Rc),e(ge,Vo),e(Vo,Vc),e(Vo,Ko),e(Ko,Kc),e(Vo,Gc),e(ge,Jc),y(Mn,ge,null),f(n,Sr,m),f(n,Ht,m),e(Ht,bn),e(bn,Ca),y(Go,Ca,null),e(Ht,Yc),e(Ht,La),e(La,Zc),f(n,Dr,m),f(n,J,m),y(Jo,J,null),e(J,Xc),e(J,Yo),e(Yo,Qc),e(Yo,Ns),e(Ns,ep),e(Yo,tp),e(J,np),e(J,Zo),e(Zo,op),e(Zo,Xo),e(Xo,sp),e(Zo,ap),e(J,rp),e(J,Ia),e(Ia,ip),e(J,dp),e(J,dt),e(dt,Aa),e(Aa,Qo),e(Qo,lp),e(dt,cp),e(dt,Oa),e(Oa,es),e(es,pp),e(dt,hp),e(dt,Sa),e(Sa,ts),e(ts,up),e(dt,mp),e(dt,Da),e(Da,ns),e(ns,fp),e(J,_p),e(J,Ue),y(os,Ue,null),e(Ue,gp),e(Ue,Ut),e(Ut,vp),e(Ut,Ha),e(Ha,kp),e(Ut,Tp),e(Ut,Ua),e(Ua,Mp),e(Ut,bp),e(Ue,yp),y(yn,Ue,null),e(Ue,xp),y(xn,Ue,null),f(n,Hr,m),f(n,Bt,m),e(Bt,wn),e(wn,Ba),y(ss,Ba,null),e(Bt,wp),e(Bt,Wa),e(Wa,$p),f(n,Ur,m),f(n,Y,m),y(as,Y,null),e(Y,zp),e(Y,rs),e(rs,jp),e(rs,Cs),e(Cs,Ep),e(rs,qp),e(Y,Pp),e(Y,is),e(is,Fp),e(is,ds),e(ds,Np),e(is,Cp),e(Y,Lp),e(Y,Ra),e(Ra,Ip),e(Y,Ap),e(Y,lt),e(lt,Va),e(Va,ls),e(ls,Op),e(lt,Sp),e(lt,Ka),e(Ka,cs),e(cs,Dp),e(lt,Hp),e(lt,Ga),e(Ga,ps),e(ps,Up),e(lt,Bp),e(lt,Ja),e(Ja,hs),e(hs,Wp),e(Y,Rp),e(Y,Be),y(us,Be,null),e(Be,Vp),e(Be,Wt),e(Wt,Kp),e(Wt,Ya),e(Ya,Gp),e(Wt,Jp),e(Wt,Za),e(Za,Yp),e(Wt,Zp),e(Be,Xp),y($n,Be,null),e(Be,Qp),y(zn,Be,null),Br=!0},p(n,[m]){const ms={};m&2&&(ms.$$scope={dirty:m,ctx:n}),en.$set(ms);const Xa={};m&2&&(Xa.$$scope={dirty:m,ctx:n}),nn.$set(Xa);const Qa={};m&2&&(Qa.$$scope={dirty:m,ctx:n}),an.$set(Qa);const er={};m&2&&(er.$$scope={dirty:m,ctx:n}),rn.$set(er);const jn={};m&2&&(jn.$$scope={dirty:m,ctx:n}),ln.$set(jn);const tr={};m&2&&(tr.$$scope={dirty:m,ctx:n}),cn.$set(tr);const nr={};m&2&&(nr.$$scope={dirty:m,ctx:n}),un.$set(nr);const or={};m&2&&(or.$$scope={dirty:m,ctx:n}),fn.$set(or);const fs={};m&2&&(fs.$$scope={dirty:m,ctx:n}),_n.$set(fs);const sr={};m&2&&(sr.$$scope={dirty:m,ctx:n}),gn.$set(sr);const ar={};m&2&&(ar.$$scope={dirty:m,ctx:n}),kn.$set(ar);const rr={};m&2&&(rr.$$scope={dirty:m,ctx:n}),Tn.$set(rr);const W={};m&2&&(W.$$scope={dirty:m,ctx:n}),Mn.$set(W);const ir={};m&2&&(ir.$$scope={dirty:m,ctx:n}),yn.$set(ir);const dr={};m&2&&(dr.$$scope={dirty:m,ctx:n}),xn.$set(dr);const lr={};m&2&&(lr.$$scope={dirty:m,ctx:n}),$n.$set(lr);const _s={};m&2&&(_s.$$scope={dirty:m,ctx:n}),zn.$set(_s)},i(n){Br||(x(d.$$.fragment,n),x(H.$$.fragment,n),x(Wn.$$.fragment,n),x(Kn.$$.fragment,n),x(Jn.$$.fragment,n),x(no.$$.fragment,n),x(oo.$$.fragment,n),x(so.$$.fragment,n),x(ao.$$.fragment,n),x(ro.$$.fragment,n),x(io.$$.fragment,n),x(lo.$$.fragment,n),x(en.$$.fragment,n),x(po.$$.fragment,n),x(ho.$$.fragment,n),x(nn.$$.fragment,n),x(_o.$$.fragment,n),x(go.$$.fragment,n),x(vo.$$.fragment,n),x(bo.$$.fragment,n),x(an.$$.fragment,n),x(rn.$$.fragment,n),x(yo.$$.fragment,n),x(xo.$$.fragment,n),x(jo.$$.fragment,n),x(ln.$$.fragment,n),x(cn.$$.fragment,n),x(Po.$$.fragment,n),x(Fo.$$.fragment,n),x(No.$$.fragment,n),x(un.$$.fragment,n),x(Co.$$.fragment,n),x(Lo.$$.fragment,n),x(fn.$$.fragment,n),x(So.$$.fragment,n),x(_n.$$.fragment,n),x(gn.$$.fragment,n),x(Do.$$.fragment,n),x(Ho.$$.fragment,n),x(kn.$$.fragment,n),x(Ro.$$.fragment,n),x(Tn.$$.fragment,n),x(Mn.$$.fragment,n),x(Go.$$.fragment,n),x(Jo.$$.fragment,n),x(os.$$.fragment,n),x(yn.$$.fragment,n),x(xn.$$.fragment,n),x(ss.$$.fragment,n),x(as.$$.fragment,n),x(us.$$.fragment,n),x($n.$$.fragment,n),x(zn.$$.fragment,n),Br=!0)},o(n){w(d.$$.fragment,n),w(H.$$.fragment,n),w(Wn.$$.fragment,n),w(Kn.$$.fragment,n),w(Jn.$$.fragment,n),w(no.$$.fragment,n),w(oo.$$.fragment,n),w(so.$$.fragment,n),w(ao.$$.fragment,n),w(ro.$$.fragment,n),w(io.$$.fragment,n),w(lo.$$.fragment,n),w(en.$$.fragment,n),w(po.$$.fragment,n),w(ho.$$.fragment,n),w(nn.$$.fragment,n),w(_o.$$.fragment,n),w(go.$$.fragment,n),w(vo.$$.fragment,n),w(bo.$$.fragment,n),w(an.$$.fragment,n),w(rn.$$.fragment,n),w(yo.$$.fragment,n),w(xo.$$.fragment,n),w(jo.$$.fragment,n),w(ln.$$.fragment,n),w(cn.$$.fragment,n),w(Po.$$.fragment,n),w(Fo.$$.fragment,n),w(No.$$.fragment,n),w(un.$$.fragment,n),w(Co.$$.fragment,n),w(Lo.$$.fragment,n),w(fn.$$.fragment,n),w(So.$$.fragment,n),w(_n.$$.fragment,n),w(gn.$$.fragment,n),w(Do.$$.fragment,n),w(Ho.$$.fragment,n),w(kn.$$.fragment,n),w(Ro.$$.fragment,n),w(Tn.$$.fragment,n),w(Mn.$$.fragment,n),w(Go.$$.fragment,n),w(Jo.$$.fragment,n),w(os.$$.fragment,n),w(yn.$$.fragment,n),w(xn.$$.fragment,n),w(ss.$$.fragment,n),w(as.$$.fragment,n),w(us.$$.fragment,n),w($n.$$.fragment,n),w(zn.$$.fragment,n),Br=!1},d(n){t(h),n&&t(k),n&&t(g),$(d),n&&t(Ne),n&&t(C),n&&t(Ce),n&&t(R),n&&t(Le),n&&t(L),$(H),n&&t(Ie),n&&t(F),n&&t(cr),n&&t(bt),$(Wn),n&&t(pr),n&&t(Oe),n&&t(hr),n&&t(xt),$(Kn),n&&t(ur),n&&t(Jt),n&&t(mr),n&&t(wt),$(Jn),n&&t(fr),n&&t(Se),n&&t(_r),n&&t(Zt),n&&t(gr),$(no,n),n&&t(vr),n&&t(vs),n&&t(kr),$(oo,n),n&&t(Tr),n&&t($t),$(so),n&&t(Mr),n&&t(ks),n&&t(br),$(ao,n),n&&t(yr),n&&t(Ts),n&&t(xr),$(ro,n),n&&t(wr),n&&t(zt),$(io),n&&t($r),n&&t(Ee),$(lo),$(en),n&&t(zr),n&&t(qt),$(po),n&&t(jr),n&&t(he),$(ho),$(nn),$(_o),n&&t(Er),n&&t(Pt),$(go),n&&t(qr),n&&t(qe),$(vo),$(bo),$(an),$(rn),n&&t(Pr),n&&t(Nt),$(yo),n&&t(Fr),n&&t(Pe),$(xo),$(jo),$(ln),$(cn),n&&t(Nr),n&&t(Lt),$(Po),n&&t(Cr),n&&t(It),$(Fo),$(No),$(un),n&&t(Lr),n&&t(At),$(Co),n&&t(Ir),n&&t(ue),$(Lo),$(fn),$(So),$(_n),$(gn),n&&t(Ar),n&&t(St),$(Do),n&&t(Or),n&&t(me),$(Ho),$(kn),$(Ro),$(Tn),$(Mn),n&&t(Sr),n&&t(Ht),$(Go),n&&t(Dr),n&&t(J),$(Jo),$(os),$(yn),$(xn),n&&t(Hr),n&&t(Bt),$(ss),n&&t(Ur),n&&t(Y),$(as),$(us),$($n),$(zn)}}}const Sm={local:"marianmt",sections:[{local:"implementation-notes",title:"Implementation Notes"},{local:"naming",title:"Naming"},{local:"examples",title:"Examples"},{local:"multilingual-models",title:"Multilingual Models"},{local:"old-style-multilingual-models",title:"Old Style Multi-Lingual Models"},{local:"transformers.MarianConfig",title:"MarianConfig"},{local:"transformers.MarianTokenizer",title:"MarianTokenizer"},{local:"transformers.MarianModel",title:"MarianModel"},{local:"transformers.MarianMTModel",title:"MarianMTModel"},{local:"transformers.MarianForCausalLM",title:"MarianForCausalLM"},{local:"transformers.TFMarianModel",title:"TFMarianModel"},{local:"transformers.TFMarianMTModel",title:"TFMarianMTModel"},{local:"transformers.FlaxMarianModel",title:"FlaxMarianModel"},{local:"transformers.FlaxMarianMTModel",title:"FlaxMarianMTModel"}],title:"MarianMT"};function Dm(E){return Tm(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Km extends _m{constructor(h){super();gm(this,h,Dm,Om,vm,{})}}export{Km as default,Sm as metadata};
0
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/model_doc/speech_to_text_2.mdx-hf-doc-builder.js
import{S as Ji,i as Gi,s as Yi,e as a,k as l,w as g,t as r,M as Ki,c as n,d as o,m as d,a as i,x as v,h as s,b as c,G as e,g as h,y as T,q as x,o as k,B as b,v as Qi,L as Hi}from"../../chunks/vendor-hf-doc-builder.js";import{T as Ri}from"../../chunks/Tip-hf-doc-builder.js";import{D as V}from"../../chunks/Docstring-hf-doc-builder.js";import{C as $r}from"../../chunks/CodeBlock-hf-doc-builder.js";import{I as ut}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{E as Ui}from"../../chunks/ExampleCodeBlock-hf-doc-builder.js";function Xi(H){let m,E,u,_,w;return _=new $r({props:{code:`from transformers import Speech2Text2ForCausalLM, Speech2Text2Config # Initializing a Speech2Text2 s2t_transformer_s style configuration configuration = Speech2Text2Config() # Initializing a model from the s2t_transformer_s style configuration model = Speech2Text2ForCausalLM(configuration) # Accessing the model configuration configuration = model.config`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Speech2Text2ForCausalLM, Speech2Text2Config <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a Speech2Text2 s2t_transformer_s style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = Speech2Text2Config() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the s2t_transformer_s style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = Speech2Text2ForCausalLM(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),{c(){m=a("p"),E=r("Example:"),u=l(),g(_.$$.fragment)},l(f){m=n(f,"P",{});var S=i(m);E=s(S,"Example:"),S.forEach(o),u=d(f),v(_.$$.fragment,f)},m(f,S){h(f,m,S),e(m,E),h(f,u,S),T(_,f,S),w=!0},p:Hi,i(f){w||(x(_.$$.fragment,f),w=!0)},o(f){k(_.$$.fragment,f),w=!1},d(f){f&&o(m),f&&o(u),b(_,f)}}}function Zi(H){let m,E,u,_,w,f,S,D;return{c(){m=a("p"),E=r(`This class method is simply calling the feature extractor `),u=a("a"),_=r("from_pretrained()"),w=r(` and the tokenizer `),f=a("code"),S=r("~tokenization_utils_base.PreTrainedTokenizer.from_pretrained"),D=r(` methods. Please refer to the docstrings of the methods above for more information.`),this.h()},l(q){m=n(q,"P",{});var $=i(m);E=s($,`This class method is simply calling the feature extractor `),u=n($,"A",{href:!0});var M=i(u);_=s(M,"from_pretrained()"),M.forEach(o),w=s($,` and the tokenizer `),f=n($,"CODE",{});var F=i(f);S=s(F,"~tokenization_utils_base.PreTrainedTokenizer.from_pretrained"),F.forEach(o),D=s($,` methods. Please refer to the docstrings of the methods above for more information.`),$.forEach(o),this.h()},h(){c(u,"href","/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.from_pretrained")},m(q,$){h(q,m,$),e(m,E),e(m,u),e(u,_),e(m,w),e(m,f),e(f,S),e(m,D)},d(q){q&&o(m)}}}function ec(H){let m,E,u,_,w,f,S,D;return{c(){m=a("p"),E=r("This class method is simply calling "),u=a("a"),_=r("save_pretrained()"),w=r(` and `),f=a("code"),S=r("~tokenization_utils_base.PreTrainedTokenizer.save_pretrained"),D=r(`. Please refer to the docstrings of the methods above for more information.`),this.h()},l(q){m=n(q,"P",{});var $=i(m);E=s($,"This class method is simply calling "),u=n($,"A",{href:!0});var M=i(u);_=s(M,"save_pretrained()"),M.forEach(o),w=s($,` and `),f=n($,"CODE",{});var F=i(f);S=s(F,"~tokenization_utils_base.PreTrainedTokenizer.save_pretrained"),F.forEach(o),D=s($,`. Please refer to the docstrings of the methods above for more information.`),$.forEach(o),this.h()},h(){c(u,"href","/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.save_pretrained")},m(q,$){h(q,m,$),e(m,E),e(m,u),e(u,_),e(m,w),e(m,f),e(f,S),e(m,D)},d(q){q&&o(m)}}}function tc(H){let m,E,u,_,w;return _=new $r({props:{code:`from transformers import ( SpeechEncoderDecoderModel, Speech2Text2ForCausalLM, Wav2Vec2Model, Speech2Text2Config, Wav2Vec2Config, Wav2Vec2FeatureExtractor, Speech2Text2Tokenizer, ) from datasets import load_dataset feature_extractor = Wav2Vec2FeatureExtractor() tokenizer = Speech2Text2Tokenizer.from_pretrained("facebook/s2t-wav2vec2-large-en-de") encoder = Wav2Vec2Model(Wav2Vec2Config()) decoder = Speech2Text2ForCausalLM(Speech2Text2Config()) # init random speech2text model model = SpeechEncoderDecoderModel(encoder=encoder, decoder=decoder) model.config.pad_token_id = tokenizer.pad_token_id model.config.decoder_start_token_id = tokenizer.bos_token_id # pre-process inputs and labels ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") inputs = feature_extractor( ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], return_tensors="pt" ) input_values = inputs.input_values decoder_input_ids = tokenizer(ds[0]["text"], return_tensors="pt").input_ids # compute loss loss = model(inputs=input_values, labels=decoder_input_ids).loss # backprop loss loss.backward()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ( <span class="hljs-meta">... </span> SpeechEncoderDecoderModel, <span class="hljs-meta">... </span> Speech2Text2ForCausalLM, <span class="hljs-meta">... </span> Wav2Vec2Model, <span class="hljs-meta">... </span> Speech2Text2Config, <span class="hljs-meta">... </span> Wav2Vec2Config, <span class="hljs-meta">... </span> Wav2Vec2FeatureExtractor, <span class="hljs-meta">... </span> Speech2Text2Tokenizer, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor() <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = Speech2Text2Tokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/s2t-wav2vec2-large-en-de&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder = Wav2Vec2Model(Wav2Vec2Config()) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder = Speech2Text2ForCausalLM(Speech2Text2Config()) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># init random speech2text model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = SpeechEncoderDecoderModel(encoder=encoder, decoder=decoder) <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.pad_token_id = tokenizer.pad_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.decoder_start_token_id = tokenizer.bos_token_id <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># pre-process inputs and labels</span> <span class="hljs-meta">&gt;&gt;&gt; </span>ds = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_dummy&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor( <span class="hljs-meta">... </span> ds[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=ds[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;sampling_rate&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_values = inputs.input_values <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = tokenizer(ds[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;text&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute loss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(inputs=input_values, labels=decoder_input_ids).loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># backprop loss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss.backward()`}}),{c(){m=a("p"),E=r("Example:"),u=l(),g(_.$$.fragment)},l(f){m=n(f,"P",{});var S=i(m);E=s(S,"Example:"),S.forEach(o),u=d(f),v(_.$$.fragment,f)},m(f,S){h(f,m,S),e(m,E),h(f,u,S),T(_,f,S),w=!0},p:Hi,i(f){w||(x(_.$$.fragment,f),w=!0)},o(f){k(_.$$.fragment,f),w=!1},d(f){f&&o(m),f&&o(u),b(_,f)}}}function oc(H){let m,E,u,_,w,f,S,D,q,$,M,F,Zt,Ee,yr,eo,Er,qo,J,zr,_t,Pr,jr,ze,Cr,qr,Mo,z,Mr,to,Fr,Ar,oo,Lr,Dr,gt,Wr,Ir,vt,Vr,Nr,Tt,Or,Br,ro,Rr,Ur,Fo,ne,Hr,Pe,Jr,Gr,Ao,ie,Yr,je,Kr,Qr,Lo,xt,Xr,Do,G,Ce,Zr,qe,es,ts,os,Me,rs,kt,ss,as,ns,Fe,is,Ae,cs,ls,Wo,Z,ce,so,Le,ds,ao,ps,Io,Y,hs,bt,fs,ms,St,us,_s,Vo,j,gs,wt,vs,Ts,$t,xs,ks,yt,bs,Ss,Et,ws,$s,zt,ys,Es,No,Pt,no,zs,Oo,De,Bo,jt,We,io,Ps,js,co,Cs,Ro,Ie,Uo,le,qs,Ve,Ms,Fs,Ho,ee,de,lo,Ne,As,po,Ls,Jo,W,Oe,Ds,te,Ws,Ct,Is,Vs,Be,Ns,Os,Bs,oe,Rs,qt,Us,Hs,Mt,Js,Gs,Ys,pe,Go,re,he,ho,Re,Ks,fo,Qs,Yo,P,Ue,Xs,mo,Zs,ea,He,ta,Ft,oa,ra,sa,fe,Je,aa,uo,na,ia,K,Ge,ca,_o,la,da,Ye,pa,go,ha,fa,ma,At,Ke,Ko,se,me,vo,Qe,ua,To,_a,Qo,y,Xe,ga,xo,va,Ta,A,Lt,xa,ka,Dt,ba,Sa,Wt,wa,$a,Ze,ko,ya,Ea,za,It,Pa,ja,Ca,ue,et,qa,B,Ma,bo,Fa,Aa,So,La,Da,tt,wo,Wa,Ia,Va,Na,Q,ot,Oa,$o,Ba,Ra,_e,Ua,X,rt,Ha,st,Ja,Vt,Ga,Ya,Ka,ge,Qa,ve,at,Xa,nt,Za,Nt,en,tn,on,Te,it,rn,ct,sn,Ot,an,nn,Xo,ae,xe,yo,lt,cn,Eo,ln,Zo,I,dt,dn,R,pn,Bt,hn,fn,zo,mn,un,Rt,_n,gn,vn,pt,Tn,ht,xn,kn,bn,ke,ft,Sn,be,er;return f=new ut({}),Ee=new ut({}),Le=new ut({}),De=new $r({props:{code:`import torch from transformers import Speech2Text2Processor, SpeechEncoderDecoderModel from datasets import load_dataset import soundfile as sf model = SpeechEncoderDecoderModel.from_pretrained("facebook/s2t-wav2vec2-large-en-de") processor = Speech2Text2Processor.from_pretrained("facebook/s2t-wav2vec2-large-en-de") def map_to_array(batch): speech, _ = sf.read(batch["file"]) batch["speech"] = speech return batch ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.map(map_to_array) inputs = processor(ds["speech"][0], sampling_rate=16_000, return_tensors="pt") generated_ids = model.generate(inputs=inputs["input_values"], attention_mask=inputs["attention_mask"]) transcription = processor.batch_decode(generated_ids)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Speech2Text2Processor, SpeechEncoderDecoderModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> soundfile <span class="hljs-keyword">as</span> sf <span class="hljs-meta">&gt;&gt;&gt; </span>model = SpeechEncoderDecoderModel.from_pretrained(<span class="hljs-string">&quot;facebook/s2t-wav2vec2-large-en-de&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Speech2Text2Processor.from_pretrained(<span class="hljs-string">&quot;facebook/s2t-wav2vec2-large-en-de&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">map_to_array</span>(<span class="hljs-params">batch</span>): <span class="hljs-meta">... </span> speech, _ = sf.read(batch[<span class="hljs-string">&quot;file&quot;</span>]) <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;speech&quot;</span>] = speech <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch <span class="hljs-meta">&gt;&gt;&gt; </span>ds = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_dummy&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ds = ds.<span class="hljs-built_in">map</span>(map_to_array) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(ds[<span class="hljs-string">&quot;speech&quot;</span>][<span class="hljs-number">0</span>], sampling_rate=<span class="hljs-number">16_000</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>generated_ids = model.generate(inputs=inputs[<span class="hljs-string">&quot;input_values&quot;</span>], attention_mask=inputs[<span class="hljs-string">&quot;attention_mask&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>transcription = processor.batch_decode(generated_ids)`}}),Ie=new $r({props:{code:`from datasets import load_dataset from transformers import pipeline librispeech_en = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") asr = pipeline( "automatic-speech-recognition", model="facebook/s2t-wav2vec2-large-en-de", feature_extractor="facebook/s2t-wav2vec2-large-en-de", ) translation_de = asr(librispeech_en[0]["file"])`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>librispeech_en = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_dummy&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>asr = pipeline( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;automatic-speech-recognition&quot;</span>, <span class="hljs-meta">... </span> model=<span class="hljs-string">&quot;facebook/s2t-wav2vec2-large-en-de&quot;</span>, <span class="hljs-meta">... </span> feature_extractor=<span class="hljs-string">&quot;facebook/s2t-wav2vec2-large-en-de&quot;</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>translation_de = asr(librispeech_en[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;file&quot;</span>])`}}),Ne=new ut({}),Oe=new V({props:{name:"class transformers.Speech2Text2Config",anchor:"transformers.Speech2Text2Config",parameters:[{name:"vocab_size",val:" = 10000"},{name:"decoder_layers",val:" = 6"},{name:"decoder_ffn_dim",val:" = 2048"},{name:"decoder_attention_heads",val:" = 4"},{name:"decoder_layerdrop",val:" = 0.0"},{name:"use_cache",val:" = True"},{name:"activation_function",val:" = 'relu'"},{name:"d_model",val:" = 256"},{name:"dropout",val:" = 0.1"},{name:"attention_dropout",val:" = 0.0"},{name:"activation_dropout",val:" = 0.0"},{name:"init_std",val:" = 0.02"},{name:"decoder_start_token_id",val:" = 2"},{name:"classifier_dropout",val:" = 0.0"},{name:"scale_embedding",val:" = True"},{name:"pad_token_id",val:" = 1"},{name:"bos_token_id",val:" = 0"},{name:"eos_token_id",val:" = 2"},{name:"max_source_positions",val:" = 6000"},{name:"max_target_positions",val:" = 1024"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.Speech2Text2Config.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 50265) &#x2014; Vocabulary size of the Speech2Text model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_19429/en/model_doc/speech_to_text#transformers.Speech2TextModel">Speech2TextModel</a>`,name:"vocab_size"},{anchor:"transformers.Speech2Text2Config.d_model",description:`<strong>d_model</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; Dimensionality of the layers and the pooler layer.`,name:"d_model"},{anchor:"transformers.Speech2Text2Config.decoder_layers",description:`<strong>decoder_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of decoder layers.`,name:"decoder_layers"},{anchor:"transformers.Speech2Text2Config.decoder_attention_heads",description:`<strong>decoder_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of attention heads for each attention layer in the Transformer decoder.`,name:"decoder_attention_heads"},{anchor:"transformers.Speech2Text2Config.decoder_ffn_dim",description:`<strong>decoder_ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 4096) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"decoder_ffn_dim"},{anchor:"transformers.Speech2Text2Config.activation_function",description:`<strong>activation_function</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"activation_function"},{anchor:"transformers.Speech2Text2Config.dropout",description:`<strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, and pooler.`,name:"dropout"},{anchor:"transformers.Speech2Text2Config.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout"},{anchor:"transformers.Speech2Text2Config.activation_dropout",description:`<strong>activation_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for activations inside the fully connected layer.`,name:"activation_dropout"},{anchor:"transformers.Speech2Text2Config.classifier_dropout",description:`<strong>classifier_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for classifier.`,name:"classifier_dropout"},{anchor:"transformers.Speech2Text2Config.init_std",description:`<strong>init_std</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices. <a href="https://arxiv.org/abs/1909.11556%3E%60" rel="nofollow">https://arxiv.org/abs/1909.11556&gt;\`</a>__ for more details.`,name:"init_std"},{anchor:"transformers.Speech2Text2Config.decoder_layerdrop",description:`<strong>decoder_layerdrop</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The LayerDrop probability for the decoder. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details.`,name:"decoder_layerdrop"},{anchor:"transformers.Speech2Text2Config.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models).`,name:"use_cache"},{anchor:"transformers.Speech2Text2Config.max_source_positions",description:`<strong>max_source_positions</strong> (<code>int</code>, <em>optional</em>, defaults to 6000) &#x2014; The maximum sequence length of log-mel filter-bank features that this model might ever be used with.`,name:"max_source_positions"},{anchor:"transformers.Speech2Text2Config.max_target_positions",description:`<strong>max_target_positions</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_target_positions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/speech_to_text_2/configuration_speech_to_text_2.py#L31"}}),pe=new Ui({props:{anchor:"transformers.Speech2Text2Config.example",$$slots:{default:[Xi]},$$scope:{ctx:H}}}),Re=new ut({}),Ue=new V({props:{name:"class transformers.Speech2Text2Tokenizer",anchor:"transformers.Speech2Text2Tokenizer",parameters:[{name:"vocab_file",val:""},{name:"bos_token",val:" = '<s>'"},{name:"pad_token",val:" = '<pad>'"},{name:"eos_token",val:" = '</s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"do_lower_case",val:" = False"},{name:"merges_file",val:" = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.Speech2Text2Tokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; File containing the vocabulary.`,name:"vocab_file"},{anchor:"transformers.Speech2Text2Tokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sentence token.`,name:"bos_token"},{anchor:"transformers.Speech2Text2Tokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sentence token.`,name:"eos_token"},{anchor:"transformers.Speech2Text2Tokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.Speech2Text2Tokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.</p> <p>**kwargs &#x2014; Additional keyword arguments passed along to <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>`,name:"pad_token"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/speech_to_text_2/tokenization_speech_to_text_2.py#L73"}}),Je=new V({props:{name:"batch_decode",anchor:"transformers.Speech2Text2Tokenizer.batch_decode",parameters:[{name:"sequences",val:": typing.Union[typing.List[int], typing.List[typing.List[int]], ForwardRef('np.ndarray'), ForwardRef('torch.Tensor'), ForwardRef('tf.Tensor')]"},{name:"skip_special_tokens",val:": bool = False"},{name:"clean_up_tokenization_spaces",val:": bool = True"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.Speech2Text2Tokenizer.batch_decode.sequences",description:`<strong>sequences</strong> (<code>Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.`,name:"sequences"},{anchor:"transformers.Speech2Text2Tokenizer.batch_decode.skip_special_tokens",description:`<strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.`,name:"skip_special_tokens"},{anchor:"transformers.Speech2Text2Tokenizer.batch_decode.clean_up_tokenization_spaces",description:`<strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.`,name:"clean_up_tokenization_spaces"},{anchor:"transformers.Speech2Text2Tokenizer.batch_decode.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.`,name:"kwargs"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3370",returnDescription:` <p>The list of decoded sentences.</p> `,returnType:` <p><code>List[str]</code></p> `}}),Ge=new V({props:{name:"decode",anchor:"transformers.Speech2Text2Tokenizer.decode",parameters:[{name:"token_ids",val:": typing.Union[int, typing.List[int], ForwardRef('np.ndarray'), ForwardRef('torch.Tensor'), ForwardRef('tf.Tensor')]"},{name:"skip_special_tokens",val:": bool = False"},{name:"clean_up_tokenization_spaces",val:": bool = True"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.Speech2Text2Tokenizer.decode.token_ids",description:`<strong>token_ids</strong> (<code>Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.`,name:"token_ids"},{anchor:"transformers.Speech2Text2Tokenizer.decode.skip_special_tokens",description:`<strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.`,name:"skip_special_tokens"},{anchor:"transformers.Speech2Text2Tokenizer.decode.clean_up_tokenization_spaces",description:`<strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.`,name:"clean_up_tokenization_spaces"},{anchor:"transformers.Speech2Text2Tokenizer.decode.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.`,name:"kwargs"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3403",returnDescription:` <p>The decoded sentence.</p> `,returnType:` <p><code>str</code></p> `}}),Ke=new V({props:{name:"save_vocabulary",anchor:"transformers.Speech2Text2Tokenizer.save_vocabulary",parameters:[{name:"save_directory",val:": str"},{name:"filename_prefix",val:": typing.Optional[str] = None"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/speech_to_text_2/tokenization_speech_to_text_2.py#L241"}}),Qe=new ut({}),Xe=new V({props:{name:"class transformers.Speech2Text2Processor",anchor:"transformers.Speech2Text2Processor",parameters:[{name:"feature_extractor",val:""},{name:"tokenizer",val:""}],parametersDescription:[{anchor:"transformers.Speech2Text2Processor.feature_extractor",description:`<strong>feature_extractor</strong> (<code>AutoFeatureExtractor</code>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoFeatureExtractor">AutoFeatureExtractor</a>. The feature extractor is a required input.`,name:"feature_extractor"},{anchor:"transformers.Speech2Text2Processor.tokenizer",description:`<strong>tokenizer</strong> (<code>Speech2Text2Tokenizer</code>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/model_doc/speech_to_text_2#transformers.Speech2Text2Tokenizer">Speech2Text2Tokenizer</a>. The tokenizer is a required input.`,name:"tokenizer"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/speech_to_text_2/processing_speech_to_text_2.py#L24"}}),et=new V({props:{name:"__call__",anchor:"transformers.Speech2Text2Processor.__call__",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/speech_to_text_2/processing_speech_to_text_2.py#L46"}}),ot=new V({props:{name:"from_pretrained",anchor:"transformers.Speech2Text2Processor.from_pretrained",parameters:[{name:"pretrained_model_name_or_path",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.Speech2Text2Processor.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; This can be either:</p> <ul> <li>a string, the <em>model id</em> of a pretrained feature_extractor hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>a path to a <em>directory</em> containing a feature extractor file saved using the <a href="/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.save_pretrained">save_pretrained()</a> method, e.g., <code>./my_model_directory/</code>.</li> <li>a path or url to a saved feature extractor JSON <em>file</em>, e.g., <code>./my_model_directory/preprocessor_config.json</code>. **kwargs &#x2014; Additional keyword arguments passed along to both <a href="/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.from_pretrained">from_pretrained()</a> and <code>~tokenization_utils_base.PreTrainedTokenizer.from_pretrained</code>.</li> </ul>`,name:"pretrained_model_name_or_path"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/processing_utils.py#L152"}}),_e=new Ri({props:{$$slots:{default:[Zi]},$$scope:{ctx:H}}}),rt=new V({props:{name:"save_pretrained",anchor:"transformers.Speech2Text2Processor.save_pretrained",parameters:[{name:"save_directory",val:""},{name:"push_to_hub",val:": bool = False"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.Speech2Text2Processor.save_pretrained.save_directory",description:`<strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Directory where the feature extractor JSON file and the tokenizer files will be saved (directory will be created if it does not exist).`,name:"save_directory"},{anchor:"transformers.Speech2Text2Processor.save_pretrained.push_to_hub",description:`<strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with <code>repo_id</code> (will default to the name of <code>save_directory</code> in your namespace). kwargs &#x2014; Additional key word arguments passed along to the <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.push_to_hub">push_to_hub()</a> method.`,name:"push_to_hub"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/processing_utils.py#L94"}}),ge=new Ri({props:{$$slots:{default:[ec]},$$scope:{ctx:H}}}),at=new V({props:{name:"batch_decode",anchor:"transformers.Speech2Text2Processor.batch_decode",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/speech_to_text_2/processing_speech_to_text_2.py#L84"}}),it=new V({props:{name:"decode",anchor:"transformers.Speech2Text2Processor.decode",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/speech_to_text_2/processing_speech_to_text_2.py#L91"}}),lt=new ut({}),dt=new V({props:{name:"class transformers.Speech2Text2ForCausalLM",anchor:"transformers.Speech2Text2ForCausalLM",parameters:[{name:"config",val:""}],parametersDescription:[{anchor:"transformers.Speech2Text2ForCausalLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/model_doc/speech_to_text_2#transformers.Speech2Text2Config">Speech2Text2Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py#L747"}}),ft=new V({props:{name:"forward",anchor:"transformers.Speech2Text2ForCausalLM.forward",parameters:[{name:"input_ids",val:" = None"},{name:"attention_mask",val:" = None"},{name:"encoder_hidden_states",val:" = None"},{name:"encoder_attention_mask",val:" = None"},{name:"head_mask",val:" = None"},{name:"cross_attn_head_mask",val:" = None"},{name:"past_key_values",val:" = None"},{name:"inputs_embeds",val:" = None"},{name:"labels",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict",val:" = None"}],parametersDescription:[{anchor:"transformers.Speech2Text2ForCausalLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/speech_to_text_2#transformers.Speech2Text2Tokenizer">Speech2Text2Tokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.Speech2Text2ForCausalLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.Speech2Text2ForCausalLM.forward.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder.`,name:"encoder_hidden_states"},{anchor:"transformers.Speech2Text2ForCausalLM.forward.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in <code>[0, 1]</code>:`,name:"encoder_attention_mask"},{anchor:"transformers.Speech2Text2ForCausalLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.Speech2Text2ForCausalLM.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(decoder_layers, decoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.Speech2Text2ForCausalLM.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.Speech2Text2ForCausalLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"},{anchor:"transformers.Speech2Text2ForCausalLM.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul>`,name:"use_cache"},{anchor:"transformers.Speech2Text2ForCausalLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.Speech2Text2ForCausalLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.Speech2Text2ForCausalLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py#L778",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/speech_to_text_2#transformers.Speech2Text2Config" >Speech2Text2Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" >transformers.modeling_outputs.CausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),be=new Ui({props:{anchor:"transformers.Speech2Text2ForCausalLM.forward.example",$$slots:{default:[tc]},$$scope:{ctx:H}}}),{c(){m=a("meta"),E=l(),u=a("h1"),_=a("a"),w=a("span"),g(f.$$.fragment),S=l(),D=a("span"),q=r("Speech2Text2"),$=l(),M=a("h2"),F=a("a"),Zt=a("span"),g(Ee.$$.fragment),yr=l(),eo=a("span"),Er=r("Overview"),qo=l(),J=a("p"),zr=r("The Speech2Text2 model is used together with "),_t=a("a"),Pr=r("Wav2Vec2"),jr=r(` for Speech Translation models proposed in `),ze=a("a"),Cr=r("Large-Scale Self- and Semi-Supervised Learning for Speech Translation"),qr=r(` by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau.`),Mo=l(),z=a("p"),Mr=r("Speech2Text2 is a "),to=a("em"),Fr=r("decoder-only"),Ar=r(" transformer model that can be used with any speech "),oo=a("em"),Lr=r("encoder-only"),Dr=r(`, such as `),gt=a("a"),Wr=r("Wav2Vec2"),Ir=r(" or "),vt=a("a"),Vr=r("HuBERT"),Nr=r(` for Speech-to-Text tasks. Please refer to the `),Tt=a("a"),Or=r("SpeechEncoderDecoder"),Br=r(" class on how to combine Speech2Text2 with any speech "),ro=a("em"),Rr=r("encoder-only"),Ur=r(` model.`),Fo=l(),ne=a("p"),Hr=r("This model was contributed by "),Pe=a("a"),Jr=r("Patrick von Platen"),Gr=r("."),Ao=l(),ie=a("p"),Yr=r("The original code can be found "),je=a("a"),Kr=r("here"),Qr=r("."),Lo=l(),xt=a("p"),Xr=r("Tips:"),Do=l(),G=a("ul"),Ce=a("li"),Zr=r(`Speech2Text2 achieves state-of-the-art results on the CoVoST Speech Translation dataset. For more information, see the `),qe=a("a"),es=r("official models"),ts=r(" ."),os=l(),Me=a("li"),rs=r("Speech2Text2 is always used within the "),kt=a("a"),ss=r("SpeechEncoderDecoder"),as=r(" framework."),ns=l(),Fe=a("li"),is=r("Speech2Text2\u2019s tokenizer is based on "),Ae=a("a"),cs=r("fastBPE"),ls=r("."),Wo=l(),Z=a("h2"),ce=a("a"),so=a("span"),g(Le.$$.fragment),ds=l(),ao=a("span"),ps=r("Inference"),Io=l(),Y=a("p"),hs=r("Speech2Text2\u2019s "),bt=a("a"),fs=r("SpeechEncoderDecoderModel"),ms=r(` model accepts raw waveform input values from speech and makes use of `),St=a("a"),us=r("generate()"),_s=r(` to translate the input speech autoregressively to the target language.`),Vo=l(),j=a("p"),gs=r("The "),wt=a("a"),vs=r("Wav2Vec2FeatureExtractor"),Ts=r(` class is responsible for preprocessing the input speech and `),$t=a("a"),xs=r("Speech2Text2Tokenizer"),ks=r(` decodes the generated target tokens to the target string. The `),yt=a("a"),bs=r("Speech2Text2Processor"),Ss=r(" wraps "),Et=a("a"),ws=r("Wav2Vec2FeatureExtractor"),$s=r(` and `),zt=a("a"),ys=r("Speech2Text2Tokenizer"),Es=r(` into a single instance to both extract the input features and decode the predicted token ids.`),No=l(),Pt=a("ul"),no=a("li"),zs=r("Step-by-step Speech Translation"),Oo=l(),g(De.$$.fragment),Bo=l(),jt=a("ul"),We=a("li"),io=a("p"),Ps=r("Speech Translation via Pipelines"),js=l(),co=a("p"),Cs=r("The automatic speech recognition pipeline can also be used to translate speech in just a couple lines of code"),Ro=l(),g(Ie.$$.fragment),Uo=l(),le=a("p"),qs=r("See "),Ve=a("a"),Ms=r("model hub"),Fs=r(" to look for Speech2Text2 checkpoints."),Ho=l(),ee=a("h2"),de=a("a"),lo=a("span"),g(Ne.$$.fragment),As=l(),po=a("span"),Ls=r("Speech2Text2Config"),Jo=l(),W=a("div"),g(Oe.$$.fragment),Ds=l(),te=a("p"),Ws=r("This is the configuration class to store the configuration of a "),Ct=a("a"),Is=r("Speech2Text2ForCausalLM"),Vs=r(`. It is used to instantiate an Speech2Text2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Speech2Text2 `),Be=a("a"),Ns=r("facebook/s2t-wav2vec2-large-en-de"),Os=r(" architecture."),Bs=l(),oe=a("p"),Rs=r("Configuration objects inherit from "),qt=a("a"),Us=r("PretrainedConfig"),Hs=r(` and can be used to control the model outputs. Read the documentation from `),Mt=a("a"),Js=r("PretrainedConfig"),Gs=r(" for more information."),Ys=l(),g(pe.$$.fragment),Go=l(),re=a("h2"),he=a("a"),ho=a("span"),g(Re.$$.fragment),Ks=l(),fo=a("span"),Qs=r("Speech2TextTokenizer"),Yo=l(),P=a("div"),g(Ue.$$.fragment),Xs=l(),mo=a("p"),Zs=r("Constructs a Speech2Text2Tokenizer."),ea=l(),He=a("p"),ta=r("This tokenizer inherits from "),Ft=a("a"),oa=r("PreTrainedTokenizer"),ra=r(` which contains some of the main methods. Users should refer to the superclass for more information regarding such methods.`),sa=l(),fe=a("div"),g(Je.$$.fragment),aa=l(),uo=a("p"),na=r("Convert a list of lists of token ids into a list of strings by calling decode."),ia=l(),K=a("div"),g(Ge.$$.fragment),ca=l(),_o=a("p"),la=r(`Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces.`),da=l(),Ye=a("p"),pa=r("Similar to doing "),go=a("code"),ha=r("self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))"),fa=r("."),ma=l(),At=a("div"),g(Ke.$$.fragment),Ko=l(),se=a("h2"),me=a("a"),vo=a("span"),g(Qe.$$.fragment),ua=l(),To=a("span"),_a=r("Speech2Text2Processor"),Qo=l(),y=a("div"),g(Xe.$$.fragment),ga=l(),xo=a("p"),va=r(`Constructs a Speech2Text2 processor which wraps a Speech2Text2 feature extractor and a Speech2Text2 tokenizer into a single processor.`),Ta=l(),A=a("p"),Lt=a("a"),xa=r("Speech2Text2Processor"),ka=r(" offers all the functionalities of "),Dt=a("a"),ba=r("AutoFeatureExtractor"),Sa=r(" and "),Wt=a("a"),wa=r("Speech2Text2Tokenizer"),$a=r(`. See the `),Ze=a("a"),ko=a("strong"),ya=r("call"),Ea=r("()"),za=r(" and "),It=a("a"),Pa=r("decode()"),ja=r(" for more information."),Ca=l(),ue=a("div"),g(et.$$.fragment),qa=l(),B=a("p"),Ma=r(`When used in normal mode, this method forwards all its arguments to AutoFeatureExtractor\u2019s `),bo=a("code"),Fa=r("__call__()"),Aa=r(` and returns its output. If used in the context `),So=a("code"),La=r("as_target_processor()"),Da=r(` this method forwards all its arguments to Speech2Text2Tokenizer\u2019s `),tt=a("a"),wo=a("strong"),Wa=r("call"),Ia=r("()"),Va=r(`. Please refer to the doctsring of the above two methods for more information.`),Na=l(),Q=a("div"),g(ot.$$.fragment),Oa=l(),$o=a("p"),Ba=r("Instantiate a processor associated with a pretrained model."),Ra=l(),g(_e.$$.fragment),Ua=l(),X=a("div"),g(rt.$$.fragment),Ha=l(),st=a("p"),Ja=r(`Saves the attributes of this processor (feature extractor, tokenizer\u2026) in the specified directory so that it can be reloaded using the `),Vt=a("a"),Ga=r("from_pretrained()"),Ya=r(" method."),Ka=l(),g(ge.$$.fragment),Qa=l(),ve=a("div"),g(at.$$.fragment),Xa=l(),nt=a("p"),Za=r("This method forwards all its arguments to Speech2Text2Tokenizer\u2019s "),Nt=a("a"),en=r("batch_decode()"),tn=r(`. Please refer to the docstring of this method for more information.`),on=l(),Te=a("div"),g(it.$$.fragment),rn=l(),ct=a("p"),sn=r("This method forwards all its arguments to Speech2Text2Tokenizer\u2019s "),Ot=a("a"),an=r("decode()"),nn=r(`. Please refer to the docstring of this method for more information.`),Xo=l(),ae=a("h2"),xe=a("a"),yo=a("span"),g(lt.$$.fragment),cn=l(),Eo=a("span"),ln=r("Speech2Text2ForCausalLM"),Zo=l(),I=a("div"),g(dt.$$.fragment),dn=l(),R=a("p"),pn=r("The Speech2Text2 Decoder with a language modeling head. Can be used as the decoder part of "),Bt=a("a"),hn=r("EncoderDecoderModel"),fn=r(" and "),zo=a("code"),mn=r("SpeechEncoderDecoder"),un=r(`. This model inherits from `),Rt=a("a"),_n=r("PreTrainedModel"),gn=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),vn=l(),pt=a("p"),Tn=r("This model is also a PyTorch "),ht=a("a"),xn=r("torch.nn.Module"),kn=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),bn=l(),ke=a("div"),g(ft.$$.fragment),Sn=l(),g(be.$$.fragment),this.h()},l(t){const p=Ki('[data-svelte="svelte-1phssyn"]',document.head);m=n(p,"META",{name:!0,content:!0}),p.forEach(o),E=d(t),u=n(t,"H1",{class:!0});var mt=i(u);_=n(mt,"A",{id:!0,class:!0,href:!0});var Po=i(_);w=n(Po,"SPAN",{});var jo=i(w);v(f.$$.fragment,jo),jo.forEach(o),Po.forEach(o),S=d(mt),D=n(mt,"SPAN",{});var Co=i(D);q=s(Co,"Speech2Text2"),Co.forEach(o),mt.forEach(o),$=d(t),M=n(t,"H2",{class:!0});var tr=i(M);F=n(tr,"A",{id:!0,class:!0,href:!0});var yn=i(F);Zt=n(yn,"SPAN",{});var En=i(Zt);v(Ee.$$.fragment,En),En.forEach(o),yn.forEach(o),yr=d(tr),eo=n(tr,"SPAN",{});var zn=i(eo);Er=s(zn,"Overview"),zn.forEach(o),tr.forEach(o),qo=d(t),J=n(t,"P",{});var Ut=i(J);zr=s(Ut,"The Speech2Text2 model is used together with "),_t=n(Ut,"A",{href:!0});var Pn=i(_t);Pr=s(Pn,"Wav2Vec2"),Pn.forEach(o),jr=s(Ut,` for Speech Translation models proposed in `),ze=n(Ut,"A",{href:!0,rel:!0});var jn=i(ze);Cr=s(jn,"Large-Scale Self- and Semi-Supervised Learning for Speech Translation"),jn.forEach(o),qr=s(Ut,` by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau.`),Ut.forEach(o),Mo=d(t),z=n(t,"P",{});var L=i(z);Mr=s(L,"Speech2Text2 is a "),to=n(L,"EM",{});var Cn=i(to);Fr=s(Cn,"decoder-only"),Cn.forEach(o),Ar=s(L," transformer model that can be used with any speech "),oo=n(L,"EM",{});var qn=i(oo);Lr=s(qn,"encoder-only"),qn.forEach(o),Dr=s(L,`, such as `),gt=n(L,"A",{href:!0});var Mn=i(gt);Wr=s(Mn,"Wav2Vec2"),Mn.forEach(o),Ir=s(L," or "),vt=n(L,"A",{href:!0});var Fn=i(vt);Vr=s(Fn,"HuBERT"),Fn.forEach(o),Nr=s(L,` for Speech-to-Text tasks. Please refer to the `),Tt=n(L,"A",{href:!0});var An=i(Tt);Or=s(An,"SpeechEncoderDecoder"),An.forEach(o),Br=s(L," class on how to combine Speech2Text2 with any speech "),ro=n(L,"EM",{});var Ln=i(ro);Rr=s(Ln,"encoder-only"),Ln.forEach(o),Ur=s(L,` model.`),L.forEach(o),Fo=d(t),ne=n(t,"P",{});var or=i(ne);Hr=s(or,"This model was contributed by "),Pe=n(or,"A",{href:!0,rel:!0});var Dn=i(Pe);Jr=s(Dn,"Patrick von Platen"),Dn.forEach(o),Gr=s(or,"."),or.forEach(o),Ao=d(t),ie=n(t,"P",{});var rr=i(ie);Yr=s(rr,"The original code can be found "),je=n(rr,"A",{href:!0,rel:!0});var Wn=i(je);Kr=s(Wn,"here"),Wn.forEach(o),Qr=s(rr,"."),rr.forEach(o),Lo=d(t),xt=n(t,"P",{});var In=i(xt);Xr=s(In,"Tips:"),In.forEach(o),Do=d(t),G=n(t,"UL",{});var Ht=i(G);Ce=n(Ht,"LI",{});var sr=i(Ce);Zr=s(sr,`Speech2Text2 achieves state-of-the-art results on the CoVoST Speech Translation dataset. For more information, see the `),qe=n(sr,"A",{href:!0,rel:!0});var Vn=i(qe);es=s(Vn,"official models"),Vn.forEach(o),ts=s(sr," ."),sr.forEach(o),os=d(Ht),Me=n(Ht,"LI",{});var ar=i(Me);rs=s(ar,"Speech2Text2 is always used within the "),kt=n(ar,"A",{href:!0});var Nn=i(kt);ss=s(Nn,"SpeechEncoderDecoder"),Nn.forEach(o),as=s(ar," framework."),ar.forEach(o),ns=d(Ht),Fe=n(Ht,"LI",{});var nr=i(Fe);is=s(nr,"Speech2Text2\u2019s tokenizer is based on "),Ae=n(nr,"A",{href:!0,rel:!0});var On=i(Ae);cs=s(On,"fastBPE"),On.forEach(o),ls=s(nr,"."),nr.forEach(o),Ht.forEach(o),Wo=d(t),Z=n(t,"H2",{class:!0});var ir=i(Z);ce=n(ir,"A",{id:!0,class:!0,href:!0});var Bn=i(ce);so=n(Bn,"SPAN",{});var Rn=i(so);v(Le.$$.fragment,Rn),Rn.forEach(o),Bn.forEach(o),ds=d(ir),ao=n(ir,"SPAN",{});var Un=i(ao);ps=s(Un,"Inference"),Un.forEach(o),ir.forEach(o),Io=d(t),Y=n(t,"P",{});var Jt=i(Y);hs=s(Jt,"Speech2Text2\u2019s "),bt=n(Jt,"A",{href:!0});var Hn=i(bt);fs=s(Hn,"SpeechEncoderDecoderModel"),Hn.forEach(o),ms=s(Jt,` model accepts raw waveform input values from speech and makes use of `),St=n(Jt,"A",{href:!0});var Jn=i(St);us=s(Jn,"generate()"),Jn.forEach(o),_s=s(Jt,` to translate the input speech autoregressively to the target language.`),Jt.forEach(o),Vo=d(t),j=n(t,"P",{});var N=i(j);gs=s(N,"The "),wt=n(N,"A",{href:!0});var Gn=i(wt);vs=s(Gn,"Wav2Vec2FeatureExtractor"),Gn.forEach(o),Ts=s(N,` class is responsible for preprocessing the input speech and `),$t=n(N,"A",{href:!0});var Yn=i($t);xs=s(Yn,"Speech2Text2Tokenizer"),Yn.forEach(o),ks=s(N,` decodes the generated target tokens to the target string. The `),yt=n(N,"A",{href:!0});var Kn=i(yt);bs=s(Kn,"Speech2Text2Processor"),Kn.forEach(o),Ss=s(N," wraps "),Et=n(N,"A",{href:!0});var Qn=i(Et);ws=s(Qn,"Wav2Vec2FeatureExtractor"),Qn.forEach(o),$s=s(N,` and `),zt=n(N,"A",{href:!0});var Xn=i(zt);ys=s(Xn,"Speech2Text2Tokenizer"),Xn.forEach(o),Es=s(N,` into a single instance to both extract the input features and decode the predicted token ids.`),N.forEach(o),No=d(t),Pt=n(t,"UL",{});var Zn=i(Pt);no=n(Zn,"LI",{});var ei=i(no);zs=s(ei,"Step-by-step Speech Translation"),ei.forEach(o),Zn.forEach(o),Oo=d(t),v(De.$$.fragment,t),Bo=d(t),jt=n(t,"UL",{});var ti=i(jt);We=n(ti,"LI",{});var cr=i(We);io=n(cr,"P",{});var oi=i(io);Ps=s(oi,"Speech Translation via Pipelines"),oi.forEach(o),js=d(cr),co=n(cr,"P",{});var ri=i(co);Cs=s(ri,"The automatic speech recognition pipeline can also be used to translate speech in just a couple lines of code"),ri.forEach(o),cr.forEach(o),ti.forEach(o),Ro=d(t),v(Ie.$$.fragment,t),Uo=d(t),le=n(t,"P",{});var lr=i(le);qs=s(lr,"See "),Ve=n(lr,"A",{href:!0,rel:!0});var si=i(Ve);Ms=s(si,"model hub"),si.forEach(o),Fs=s(lr," to look for Speech2Text2 checkpoints."),lr.forEach(o),Ho=d(t),ee=n(t,"H2",{class:!0});var dr=i(ee);de=n(dr,"A",{id:!0,class:!0,href:!0});var ai=i(de);lo=n(ai,"SPAN",{});var ni=i(lo);v(Ne.$$.fragment,ni),ni.forEach(o),ai.forEach(o),As=d(dr),po=n(dr,"SPAN",{});var ii=i(po);Ls=s(ii,"Speech2Text2Config"),ii.forEach(o),dr.forEach(o),Jo=d(t),W=n(t,"DIV",{class:!0});var Se=i(W);v(Oe.$$.fragment,Se),Ds=d(Se),te=n(Se,"P",{});var Gt=i(te);Ws=s(Gt,"This is the configuration class to store the configuration of a "),Ct=n(Gt,"A",{href:!0});var ci=i(Ct);Is=s(ci,"Speech2Text2ForCausalLM"),ci.forEach(o),Vs=s(Gt,`. It is used to instantiate an Speech2Text2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Speech2Text2 `),Be=n(Gt,"A",{href:!0,rel:!0});var li=i(Be);Ns=s(li,"facebook/s2t-wav2vec2-large-en-de"),li.forEach(o),Os=s(Gt," architecture."),Gt.forEach(o),Bs=d(Se),oe=n(Se,"P",{});var Yt=i(oe);Rs=s(Yt,"Configuration objects inherit from "),qt=n(Yt,"A",{href:!0});var di=i(qt);Us=s(di,"PretrainedConfig"),di.forEach(o),Hs=s(Yt,` and can be used to control the model outputs. Read the documentation from `),Mt=n(Yt,"A",{href:!0});var pi=i(Mt);Js=s(pi,"PretrainedConfig"),pi.forEach(o),Gs=s(Yt," for more information."),Yt.forEach(o),Ys=d(Se),v(pe.$$.fragment,Se),Se.forEach(o),Go=d(t),re=n(t,"H2",{class:!0});var pr=i(re);he=n(pr,"A",{id:!0,class:!0,href:!0});var hi=i(he);ho=n(hi,"SPAN",{});var fi=i(ho);v(Re.$$.fragment,fi),fi.forEach(o),hi.forEach(o),Ks=d(pr),fo=n(pr,"SPAN",{});var mi=i(fo);Qs=s(mi,"Speech2TextTokenizer"),mi.forEach(o),pr.forEach(o),Yo=d(t),P=n(t,"DIV",{class:!0});var O=i(P);v(Ue.$$.fragment,O),Xs=d(O),mo=n(O,"P",{});var ui=i(mo);Zs=s(ui,"Constructs a Speech2Text2Tokenizer."),ui.forEach(o),ea=d(O),He=n(O,"P",{});var hr=i(He);ta=s(hr,"This tokenizer inherits from "),Ft=n(hr,"A",{href:!0});var _i=i(Ft);oa=s(_i,"PreTrainedTokenizer"),_i.forEach(o),ra=s(hr,` which contains some of the main methods. Users should refer to the superclass for more information regarding such methods.`),hr.forEach(o),sa=d(O),fe=n(O,"DIV",{class:!0});var fr=i(fe);v(Je.$$.fragment,fr),aa=d(fr),uo=n(fr,"P",{});var gi=i(uo);na=s(gi,"Convert a list of lists of token ids into a list of strings by calling decode."),gi.forEach(o),fr.forEach(o),ia=d(O),K=n(O,"DIV",{class:!0});var Kt=i(K);v(Ge.$$.fragment,Kt),ca=d(Kt),_o=n(Kt,"P",{});var vi=i(_o);la=s(vi,`Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces.`),vi.forEach(o),da=d(Kt),Ye=n(Kt,"P",{});var mr=i(Ye);pa=s(mr,"Similar to doing "),go=n(mr,"CODE",{});var Ti=i(go);ha=s(Ti,"self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))"),Ti.forEach(o),fa=s(mr,"."),mr.forEach(o),Kt.forEach(o),ma=d(O),At=n(O,"DIV",{class:!0});var xi=i(At);v(Ke.$$.fragment,xi),xi.forEach(o),O.forEach(o),Ko=d(t),se=n(t,"H2",{class:!0});var ur=i(se);me=n(ur,"A",{id:!0,class:!0,href:!0});var ki=i(me);vo=n(ki,"SPAN",{});var bi=i(vo);v(Qe.$$.fragment,bi),bi.forEach(o),ki.forEach(o),ua=d(ur),To=n(ur,"SPAN",{});var Si=i(To);_a=s(Si,"Speech2Text2Processor"),Si.forEach(o),ur.forEach(o),Qo=d(t),y=n(t,"DIV",{class:!0});var C=i(y);v(Xe.$$.fragment,C),ga=d(C),xo=n(C,"P",{});var wi=i(xo);va=s(wi,`Constructs a Speech2Text2 processor which wraps a Speech2Text2 feature extractor and a Speech2Text2 tokenizer into a single processor.`),wi.forEach(o),Ta=d(C),A=n(C,"P",{});var U=i(A);Lt=n(U,"A",{href:!0});var $i=i(Lt);xa=s($i,"Speech2Text2Processor"),$i.forEach(o),ka=s(U," offers all the functionalities of "),Dt=n(U,"A",{href:!0});var yi=i(Dt);ba=s(yi,"AutoFeatureExtractor"),yi.forEach(o),Sa=s(U," and "),Wt=n(U,"A",{href:!0});var Ei=i(Wt);wa=s(Ei,"Speech2Text2Tokenizer"),Ei.forEach(o),$a=s(U,`. See the `),Ze=n(U,"A",{href:!0});var wn=i(Ze);ko=n(wn,"STRONG",{});var zi=i(ko);ya=s(zi,"call"),zi.forEach(o),Ea=s(wn,"()"),wn.forEach(o),za=s(U," and "),It=n(U,"A",{href:!0});var Pi=i(It);Pa=s(Pi,"decode()"),Pi.forEach(o),ja=s(U," for more information."),U.forEach(o),Ca=d(C),ue=n(C,"DIV",{class:!0});var _r=i(ue);v(et.$$.fragment,_r),qa=d(_r),B=n(_r,"P",{});var we=i(B);Ma=s(we,`When used in normal mode, this method forwards all its arguments to AutoFeatureExtractor\u2019s `),bo=n(we,"CODE",{});var ji=i(bo);Fa=s(ji,"__call__()"),ji.forEach(o),Aa=s(we,` and returns its output. If used in the context `),So=n(we,"CODE",{});var Ci=i(So);La=s(Ci,"as_target_processor()"),Ci.forEach(o),Da=s(we,` this method forwards all its arguments to Speech2Text2Tokenizer\u2019s `),tt=n(we,"A",{href:!0});var $n=i(tt);wo=n($n,"STRONG",{});var qi=i(wo);Wa=s(qi,"call"),qi.forEach(o),Ia=s($n,"()"),$n.forEach(o),Va=s(we,`. Please refer to the doctsring of the above two methods for more information.`),we.forEach(o),_r.forEach(o),Na=d(C),Q=n(C,"DIV",{class:!0});var Qt=i(Q);v(ot.$$.fragment,Qt),Oa=d(Qt),$o=n(Qt,"P",{});var Mi=i($o);Ba=s(Mi,"Instantiate a processor associated with a pretrained model."),Mi.forEach(o),Ra=d(Qt),v(_e.$$.fragment,Qt),Qt.forEach(o),Ua=d(C),X=n(C,"DIV",{class:!0});var Xt=i(X);v(rt.$$.fragment,Xt),Ha=d(Xt),st=n(Xt,"P",{});var gr=i(st);Ja=s(gr,`Saves the attributes of this processor (feature extractor, tokenizer\u2026) in the specified directory so that it can be reloaded using the `),Vt=n(gr,"A",{href:!0});var Fi=i(Vt);Ga=s(Fi,"from_pretrained()"),Fi.forEach(o),Ya=s(gr," method."),gr.forEach(o),Ka=d(Xt),v(ge.$$.fragment,Xt),Xt.forEach(o),Qa=d(C),ve=n(C,"DIV",{class:!0});var vr=i(ve);v(at.$$.fragment,vr),Xa=d(vr),nt=n(vr,"P",{});var Tr=i(nt);Za=s(Tr,"This method forwards all its arguments to Speech2Text2Tokenizer\u2019s "),Nt=n(Tr,"A",{href:!0});var Ai=i(Nt);en=s(Ai,"batch_decode()"),Ai.forEach(o),tn=s(Tr,`. Please refer to the docstring of this method for more information.`),Tr.forEach(o),vr.forEach(o),on=d(C),Te=n(C,"DIV",{class:!0});var xr=i(Te);v(it.$$.fragment,xr),rn=d(xr),ct=n(xr,"P",{});var kr=i(ct);sn=s(kr,"This method forwards all its arguments to Speech2Text2Tokenizer\u2019s "),Ot=n(kr,"A",{href:!0});var Li=i(Ot);an=s(Li,"decode()"),Li.forEach(o),nn=s(kr,`. Please refer to the docstring of this method for more information.`),kr.forEach(o),xr.forEach(o),C.forEach(o),Xo=d(t),ae=n(t,"H2",{class:!0});var br=i(ae);xe=n(br,"A",{id:!0,class:!0,href:!0});var Di=i(xe);yo=n(Di,"SPAN",{});var Wi=i(yo);v(lt.$$.fragment,Wi),Wi.forEach(o),Di.forEach(o),cn=d(br),Eo=n(br,"SPAN",{});var Ii=i(Eo);ln=s(Ii,"Speech2Text2ForCausalLM"),Ii.forEach(o),br.forEach(o),Zo=d(t),I=n(t,"DIV",{class:!0});var $e=i(I);v(dt.$$.fragment,$e),dn=d($e),R=n($e,"P",{});var ye=i(R);pn=s(ye,"The Speech2Text2 Decoder with a language modeling head. Can be used as the decoder part of "),Bt=n(ye,"A",{href:!0});var Vi=i(Bt);hn=s(Vi,"EncoderDecoderModel"),Vi.forEach(o),fn=s(ye," and "),zo=n(ye,"CODE",{});var Ni=i(zo);mn=s(Ni,"SpeechEncoderDecoder"),Ni.forEach(o),un=s(ye,`. This model inherits from `),Rt=n(ye,"A",{href:!0});var Oi=i(Rt);_n=s(Oi,"PreTrainedModel"),Oi.forEach(o),gn=s(ye,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ye.forEach(o),vn=d($e),pt=n($e,"P",{});var Sr=i(pt);Tn=s(Sr,"This model is also a PyTorch "),ht=n(Sr,"A",{href:!0,rel:!0});var Bi=i(ht);xn=s(Bi,"torch.nn.Module"),Bi.forEach(o),kn=s(Sr,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Sr.forEach(o),bn=d($e),ke=n($e,"DIV",{class:!0});var wr=i(ke);v(ft.$$.fragment,wr),Sn=d(wr),v(be.$$.fragment,wr),wr.forEach(o),$e.forEach(o),this.h()},h(){c(m,"name","hf:doc:metadata"),c(m,"content",JSON.stringify(rc)),c(_,"id","speech2text2"),c(_,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(_,"href","#speech2text2"),c(u,"class","relative group"),c(F,"id","overview"),c(F,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(F,"href","#overview"),c(M,"class","relative group"),c(_t,"href","wav2vec2"),c(ze,"href","https://arxiv.org/abs/2104.06678"),c(ze,"rel","nofollow"),c(gt,"href","wav2vec2"),c(vt,"href","hubert"),c(Tt,"href","speech-encoder-decoder"),c(Pe,"href","https://huggingface.co/patrickvonplaten"),c(Pe,"rel","nofollow"),c(je,"href","https://github.com/pytorch/fairseq/blob/1f7ef9ed1e1061f8c7f88f8b94c7186834398690/fairseq/models/wav2vec/wav2vec2_asr.py#L266"),c(je,"rel","nofollow"),c(qe,"href","https://huggingface.co/models?other=speech2text2"),c(qe,"rel","nofollow"),c(kt,"href","speech-encoder-decoder"),c(Ae,"href","https://github.com/glample/fastBPE"),c(Ae,"rel","nofollow"),c(ce,"id","inference"),c(ce,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ce,"href","#inference"),c(Z,"class","relative group"),c(bt,"href","/docs/transformers/pr_19429/en/model_doc/speech-encoder-decoder#transformers.SpeechEncoderDecoderModel"),c(St,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate"),c(wt,"href","/docs/transformers/pr_19429/en/model_doc/wav2vec2#transformers.Wav2Vec2FeatureExtractor"),c($t,"href","/docs/transformers/pr_19429/en/model_doc/speech_to_text_2#transformers.Speech2Text2Tokenizer"),c(yt,"href","/docs/transformers/pr_19429/en/model_doc/speech_to_text_2#transformers.Speech2Text2Processor"),c(Et,"href","/docs/transformers/pr_19429/en/model_doc/wav2vec2#transformers.Wav2Vec2FeatureExtractor"),c(zt,"href","/docs/transformers/pr_19429/en/model_doc/speech_to_text_2#transformers.Speech2Text2Tokenizer"),c(Ve,"href","https://huggingface.co/models?filter=speech2text2"),c(Ve,"rel","nofollow"),c(de,"id","transformers.Speech2Text2Config"),c(de,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(de,"href","#transformers.Speech2Text2Config"),c(ee,"class","relative group"),c(Ct,"href","/docs/transformers/pr_19429/en/model_doc/speech_to_text_2#transformers.Speech2Text2ForCausalLM"),c(Be,"href","https://huggingface.co/facebook/s2t-wav2vec2-large-en-de"),c(Be,"rel","nofollow"),c(qt,"href","/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig"),c(Mt,"href","/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig"),c(W,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(he,"id","transformers.Speech2Text2Tokenizer"),c(he,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(he,"href","#transformers.Speech2Text2Tokenizer"),c(re,"class","relative group"),c(Ft,"href","/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),c(fe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(K,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(At,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(P,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(me,"id","transformers.Speech2Text2Processor"),c(me,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(me,"href","#transformers.Speech2Text2Processor"),c(se,"class","relative group"),c(Lt,"href","/docs/transformers/pr_19429/en/model_doc/speech_to_text_2#transformers.Speech2Text2Processor"),c(Dt,"href","/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoFeatureExtractor"),c(Wt,"href","/docs/transformers/pr_19429/en/model_doc/speech_to_text_2#transformers.Speech2Text2Tokenizer"),c(Ze,"href","/docs/transformers/pr_19429/en/model_doc/speech_to_text_2#transformers.Speech2Text2Processor.__call__"),c(It,"href","/docs/transformers/pr_19429/en/model_doc/speech_to_text_2#transformers.Speech2Text2Processor.decode"),c(tt,"href","/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__"),c(ue,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Vt,"href","/docs/transformers/pr_19429/en/model_doc/trocr#transformers.TrOCRProcessor.from_pretrained"),c(X,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Nt,"href","/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.batch_decode"),c(ve,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Ot,"href","/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.decode"),c(Te,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(y,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(xe,"id","transformers.Speech2Text2ForCausalLM"),c(xe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(xe,"href","#transformers.Speech2Text2ForCausalLM"),c(ae,"class","relative group"),c(Bt,"href","/docs/transformers/pr_19429/en/model_doc/encoder-decoder#transformers.EncoderDecoderModel"),c(Rt,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel"),c(ht,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),c(ht,"rel","nofollow"),c(ke,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(I,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(t,p){e(document.head,m),h(t,E,p),h(t,u,p),e(u,_),e(_,w),T(f,w,null),e(u,S),e(u,D),e(D,q),h(t,$,p),h(t,M,p),e(M,F),e(F,Zt),T(Ee,Zt,null),e(M,yr),e(M,eo),e(eo,Er),h(t,qo,p),h(t,J,p),e(J,zr),e(J,_t),e(_t,Pr),e(J,jr),e(J,ze),e(ze,Cr),e(J,qr),h(t,Mo,p),h(t,z,p),e(z,Mr),e(z,to),e(to,Fr),e(z,Ar),e(z,oo),e(oo,Lr),e(z,Dr),e(z,gt),e(gt,Wr),e(z,Ir),e(z,vt),e(vt,Vr),e(z,Nr),e(z,Tt),e(Tt,Or),e(z,Br),e(z,ro),e(ro,Rr),e(z,Ur),h(t,Fo,p),h(t,ne,p),e(ne,Hr),e(ne,Pe),e(Pe,Jr),e(ne,Gr),h(t,Ao,p),h(t,ie,p),e(ie,Yr),e(ie,je),e(je,Kr),e(ie,Qr),h(t,Lo,p),h(t,xt,p),e(xt,Xr),h(t,Do,p),h(t,G,p),e(G,Ce),e(Ce,Zr),e(Ce,qe),e(qe,es),e(Ce,ts),e(G,os),e(G,Me),e(Me,rs),e(Me,kt),e(kt,ss),e(Me,as),e(G,ns),e(G,Fe),e(Fe,is),e(Fe,Ae),e(Ae,cs),e(Fe,ls),h(t,Wo,p),h(t,Z,p),e(Z,ce),e(ce,so),T(Le,so,null),e(Z,ds),e(Z,ao),e(ao,ps),h(t,Io,p),h(t,Y,p),e(Y,hs),e(Y,bt),e(bt,fs),e(Y,ms),e(Y,St),e(St,us),e(Y,_s),h(t,Vo,p),h(t,j,p),e(j,gs),e(j,wt),e(wt,vs),e(j,Ts),e(j,$t),e($t,xs),e(j,ks),e(j,yt),e(yt,bs),e(j,Ss),e(j,Et),e(Et,ws),e(j,$s),e(j,zt),e(zt,ys),e(j,Es),h(t,No,p),h(t,Pt,p),e(Pt,no),e(no,zs),h(t,Oo,p),T(De,t,p),h(t,Bo,p),h(t,jt,p),e(jt,We),e(We,io),e(io,Ps),e(We,js),e(We,co),e(co,Cs),h(t,Ro,p),T(Ie,t,p),h(t,Uo,p),h(t,le,p),e(le,qs),e(le,Ve),e(Ve,Ms),e(le,Fs),h(t,Ho,p),h(t,ee,p),e(ee,de),e(de,lo),T(Ne,lo,null),e(ee,As),e(ee,po),e(po,Ls),h(t,Jo,p),h(t,W,p),T(Oe,W,null),e(W,Ds),e(W,te),e(te,Ws),e(te,Ct),e(Ct,Is),e(te,Vs),e(te,Be),e(Be,Ns),e(te,Os),e(W,Bs),e(W,oe),e(oe,Rs),e(oe,qt),e(qt,Us),e(oe,Hs),e(oe,Mt),e(Mt,Js),e(oe,Gs),e(W,Ys),T(pe,W,null),h(t,Go,p),h(t,re,p),e(re,he),e(he,ho),T(Re,ho,null),e(re,Ks),e(re,fo),e(fo,Qs),h(t,Yo,p),h(t,P,p),T(Ue,P,null),e(P,Xs),e(P,mo),e(mo,Zs),e(P,ea),e(P,He),e(He,ta),e(He,Ft),e(Ft,oa),e(He,ra),e(P,sa),e(P,fe),T(Je,fe,null),e(fe,aa),e(fe,uo),e(uo,na),e(P,ia),e(P,K),T(Ge,K,null),e(K,ca),e(K,_o),e(_o,la),e(K,da),e(K,Ye),e(Ye,pa),e(Ye,go),e(go,ha),e(Ye,fa),e(P,ma),e(P,At),T(Ke,At,null),h(t,Ko,p),h(t,se,p),e(se,me),e(me,vo),T(Qe,vo,null),e(se,ua),e(se,To),e(To,_a),h(t,Qo,p),h(t,y,p),T(Xe,y,null),e(y,ga),e(y,xo),e(xo,va),e(y,Ta),e(y,A),e(A,Lt),e(Lt,xa),e(A,ka),e(A,Dt),e(Dt,ba),e(A,Sa),e(A,Wt),e(Wt,wa),e(A,$a),e(A,Ze),e(Ze,ko),e(ko,ya),e(Ze,Ea),e(A,za),e(A,It),e(It,Pa),e(A,ja),e(y,Ca),e(y,ue),T(et,ue,null),e(ue,qa),e(ue,B),e(B,Ma),e(B,bo),e(bo,Fa),e(B,Aa),e(B,So),e(So,La),e(B,Da),e(B,tt),e(tt,wo),e(wo,Wa),e(tt,Ia),e(B,Va),e(y,Na),e(y,Q),T(ot,Q,null),e(Q,Oa),e(Q,$o),e($o,Ba),e(Q,Ra),T(_e,Q,null),e(y,Ua),e(y,X),T(rt,X,null),e(X,Ha),e(X,st),e(st,Ja),e(st,Vt),e(Vt,Ga),e(st,Ya),e(X,Ka),T(ge,X,null),e(y,Qa),e(y,ve),T(at,ve,null),e(ve,Xa),e(ve,nt),e(nt,Za),e(nt,Nt),e(Nt,en),e(nt,tn),e(y,on),e(y,Te),T(it,Te,null),e(Te,rn),e(Te,ct),e(ct,sn),e(ct,Ot),e(Ot,an),e(ct,nn),h(t,Xo,p),h(t,ae,p),e(ae,xe),e(xe,yo),T(lt,yo,null),e(ae,cn),e(ae,Eo),e(Eo,ln),h(t,Zo,p),h(t,I,p),T(dt,I,null),e(I,dn),e(I,R),e(R,pn),e(R,Bt),e(Bt,hn),e(R,fn),e(R,zo),e(zo,mn),e(R,un),e(R,Rt),e(Rt,_n),e(R,gn),e(I,vn),e(I,pt),e(pt,Tn),e(pt,ht),e(ht,xn),e(pt,kn),e(I,bn),e(I,ke),T(ft,ke,null),e(ke,Sn),T(be,ke,null),er=!0},p(t,[p]){const mt={};p&2&&(mt.$$scope={dirty:p,ctx:t}),pe.$set(mt);const Po={};p&2&&(Po.$$scope={dirty:p,ctx:t}),_e.$set(Po);const jo={};p&2&&(jo.$$scope={dirty:p,ctx:t}),ge.$set(jo);const Co={};p&2&&(Co.$$scope={dirty:p,ctx:t}),be.$set(Co)},i(t){er||(x(f.$$.fragment,t),x(Ee.$$.fragment,t),x(Le.$$.fragment,t),x(De.$$.fragment,t),x(Ie.$$.fragment,t),x(Ne.$$.fragment,t),x(Oe.$$.fragment,t),x(pe.$$.fragment,t),x(Re.$$.fragment,t),x(Ue.$$.fragment,t),x(Je.$$.fragment,t),x(Ge.$$.fragment,t),x(Ke.$$.fragment,t),x(Qe.$$.fragment,t),x(Xe.$$.fragment,t),x(et.$$.fragment,t),x(ot.$$.fragment,t),x(_e.$$.fragment,t),x(rt.$$.fragment,t),x(ge.$$.fragment,t),x(at.$$.fragment,t),x(it.$$.fragment,t),x(lt.$$.fragment,t),x(dt.$$.fragment,t),x(ft.$$.fragment,t),x(be.$$.fragment,t),er=!0)},o(t){k(f.$$.fragment,t),k(Ee.$$.fragment,t),k(Le.$$.fragment,t),k(De.$$.fragment,t),k(Ie.$$.fragment,t),k(Ne.$$.fragment,t),k(Oe.$$.fragment,t),k(pe.$$.fragment,t),k(Re.$$.fragment,t),k(Ue.$$.fragment,t),k(Je.$$.fragment,t),k(Ge.$$.fragment,t),k(Ke.$$.fragment,t),k(Qe.$$.fragment,t),k(Xe.$$.fragment,t),k(et.$$.fragment,t),k(ot.$$.fragment,t),k(_e.$$.fragment,t),k(rt.$$.fragment,t),k(ge.$$.fragment,t),k(at.$$.fragment,t),k(it.$$.fragment,t),k(lt.$$.fragment,t),k(dt.$$.fragment,t),k(ft.$$.fragment,t),k(be.$$.fragment,t),er=!1},d(t){o(m),t&&o(E),t&&o(u),b(f),t&&o($),t&&o(M),b(Ee),t&&o(qo),t&&o(J),t&&o(Mo),t&&o(z),t&&o(Fo),t&&o(ne),t&&o(Ao),t&&o(ie),t&&o(Lo),t&&o(xt),t&&o(Do),t&&o(G),t&&o(Wo),t&&o(Z),b(Le),t&&o(Io),t&&o(Y),t&&o(Vo),t&&o(j),t&&o(No),t&&o(Pt),t&&o(Oo),b(De,t),t&&o(Bo),t&&o(jt),t&&o(Ro),b(Ie,t),t&&o(Uo),t&&o(le),t&&o(Ho),t&&o(ee),b(Ne),t&&o(Jo),t&&o(W),b(Oe),b(pe),t&&o(Go),t&&o(re),b(Re),t&&o(Yo),t&&o(P),b(Ue),b(Je),b(Ge),b(Ke),t&&o(Ko),t&&o(se),b(Qe),t&&o(Qo),t&&o(y),b(Xe),b(et),b(ot),b(_e),b(rt),b(ge),b(at),b(it),t&&o(Xo),t&&o(ae),b(lt),t&&o(Zo),t&&o(I),b(dt),b(ft),b(be)}}}const rc={local:"speech2text2",sections:[{local:"overview",title:"Overview"},{local:"inference",title:"Inference"},{local:"transformers.Speech2Text2Config",title:"Speech2Text2Config"},{local:"transformers.Speech2Text2Tokenizer",title:"Speech2TextTokenizer"},{local:"transformers.Speech2Text2Processor",title:"Speech2Text2Processor"},{local:"transformers.Speech2Text2ForCausalLM",title:"Speech2Text2ForCausalLM"}],title:"Speech2Text2"};function sc(H){return Qi(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class pc extends Ji{constructor(m){super();Gi(this,m,sc,oc,Yi,{})}}export{pc as default,rc as metadata};
1
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/model_doc/bort.mdx-hf-doc-builder.js
import{S as Ze,i as et,s as tt,e as r,k as c,w as je,t as i,M as at,c as o,d as a,m as u,a as n,x as Fe,h as s,b as l,G as t,g as f,y as Ke,L as rt,q as Qe,o as Ve,B as Xe,v as ot}from"../../chunks/vendor-hf-doc-builder.js";import{I as Ye}from"../../chunks/IconCopyLink-hf-doc-builder.js";function nt(xe){let d,M,v,w,q,T,re,N,oe,W,b,g,G,B,ne,C,ie,J,E,se,R,le,he,H,O,fe,j,$,D,ce,F,I,ue,K,p,_,pe,L,me,de,ve,y,be,z,we,ge,Ee,P,Te,k,Be,Re,Q,m,_e,A,ye,Pe,x,ke,Ae,V;return T=new Ye({}),B=new Ye({}),{c(){d=r("meta"),M=c(),v=r("h1"),w=r("a"),q=r("span"),je(T.$$.fragment),re=c(),N=r("span"),oe=i("BORT"),W=c(),b=r("h2"),g=r("a"),G=r("span"),je(B.$$.fragment),ne=c(),C=r("span"),ie=i("Overview"),J=c(),E=r("p"),se=i("The BORT model was proposed in "),R=r("a"),le=i("Optimal Subarchitecture Extraction for BERT"),he=i(` by Adrian de Wynter and Daniel J. Perry. It is an optimal subset of architectural parameters for the BERT, which the authors refer to as \u201CBort\u201D.`),H=c(),O=r("p"),fe=i("The abstract from the paper is the following:"),j=c(),$=r("p"),D=r("em"),ce=i(`We extract an optimal subset of architectural parameters for the BERT architecture from Devlin et al. (2018) by applying recent breakthroughs in algorithms for neural architecture search. This optimal subset, which we refer to as \u201CBort\u201D, is demonstrably smaller, having an effective (that is, not counting the embedding layer) size of 5.5% the original BERT-large architecture, and 16% of the net size. Bort is also able to be pretrained in 288 GPU hours, which is 1.2% of the time required to pretrain the highest-performing BERT parametric architectural variant, RoBERTa-large (Liu et al., 2019), and about 33% of that of the world-record, in GPU hours, required to train BERT-large on the same hardware. It is also 7.9x faster on a CPU, as well as being better performing than other compressed variants of the architecture, and some of the non-compressed variants: it obtains performance improvements of between 0.3% and 31%, absolute, with respect to BERT-large, on multiple public natural language understanding (NLU) benchmarks.`),F=c(),I=r("p"),ue=i("Tips:"),K=c(),p=r("ul"),_=r("li"),pe=i("BORT\u2019s model architecture is based on BERT, so one can refer to "),L=r("a"),me=i("BERT\u2019s documentation page"),de=i(` for the model\u2019s API as well as usage examples.`),ve=c(),y=r("li"),be=i("BORT uses the RoBERTa tokenizer instead of the BERT tokenizer, so one can refer to "),z=r("a"),we=i("RoBERTa\u2019s documentation page"),ge=i(" for the tokenizer\u2019s API as well as usage examples."),Ee=c(),P=r("li"),Te=i("BORT requires a specific fine-tuning algorithm, called "),k=r("a"),Be=i("Agora"),Re=i(` , that is sadly not open-sourced yet. It would be very useful for the community, if someone tries to implement the algorithm to make BORT fine-tuning work.`),Q=c(),m=r("p"),_e=i("This model was contributed by "),A=r("a"),ye=i("stefan-it"),Pe=i(". The original code can be found "),x=r("a"),ke=i("here"),Ae=i("."),this.h()},l(e){const h=at('[data-svelte="svelte-1phssyn"]',document.head);d=o(h,"META",{name:!0,content:!0}),h.forEach(a),M=u(e),v=o(e,"H1",{class:!0});var X=n(v);w=o(X,"A",{id:!0,class:!0,href:!0});var Oe=n(w);q=o(Oe,"SPAN",{});var $e=n(q);Fe(T.$$.fragment,$e),$e.forEach(a),Oe.forEach(a),re=u(X),N=o(X,"SPAN",{});var Ie=n(N);oe=s(Ie,"BORT"),Ie.forEach(a),X.forEach(a),W=u(e),b=o(e,"H2",{class:!0});var Y=n(b);g=o(Y,"A",{id:!0,class:!0,href:!0});var Le=n(g);G=o(Le,"SPAN",{});var ze=n(G);Fe(B.$$.fragment,ze),ze.forEach(a),Le.forEach(a),ne=u(Y),C=o(Y,"SPAN",{});var Se=n(C);ie=s(Se,"Overview"),Se.forEach(a),Y.forEach(a),J=u(e),E=o(e,"P",{});var Z=n(E);se=s(Z,"The BORT model was proposed in "),R=o(Z,"A",{href:!0,rel:!0});var Ue=n(R);le=s(Ue,"Optimal Subarchitecture Extraction for BERT"),Ue.forEach(a),he=s(Z,` by Adrian de Wynter and Daniel J. Perry. It is an optimal subset of architectural parameters for the BERT, which the authors refer to as \u201CBort\u201D.`),Z.forEach(a),H=u(e),O=o(e,"P",{});var qe=n(O);fe=s(qe,"The abstract from the paper is the following:"),qe.forEach(a),j=u(e),$=o(e,"P",{});var Ne=n($);D=o(Ne,"EM",{});var Ge=n(D);ce=s(Ge,`We extract an optimal subset of architectural parameters for the BERT architecture from Devlin et al. (2018) by applying recent breakthroughs in algorithms for neural architecture search. This optimal subset, which we refer to as \u201CBort\u201D, is demonstrably smaller, having an effective (that is, not counting the embedding layer) size of 5.5% the original BERT-large architecture, and 16% of the net size. Bort is also able to be pretrained in 288 GPU hours, which is 1.2% of the time required to pretrain the highest-performing BERT parametric architectural variant, RoBERTa-large (Liu et al., 2019), and about 33% of that of the world-record, in GPU hours, required to train BERT-large on the same hardware. It is also 7.9x faster on a CPU, as well as being better performing than other compressed variants of the architecture, and some of the non-compressed variants: it obtains performance improvements of between 0.3% and 31%, absolute, with respect to BERT-large, on multiple public natural language understanding (NLU) benchmarks.`),Ge.forEach(a),Ne.forEach(a),F=u(e),I=o(e,"P",{});var Ce=n(I);ue=s(Ce,"Tips:"),Ce.forEach(a),K=u(e),p=o(e,"UL",{});var S=n(p);_=o(S,"LI",{});var ee=n(_);pe=s(ee,"BORT\u2019s model architecture is based on BERT, so one can refer to "),L=o(ee,"A",{href:!0});var De=n(L);me=s(De,"BERT\u2019s documentation page"),De.forEach(a),de=s(ee,` for the model\u2019s API as well as usage examples.`),ee.forEach(a),ve=u(S),y=o(S,"LI",{});var te=n(y);be=s(te,"BORT uses the RoBERTa tokenizer instead of the BERT tokenizer, so one can refer to "),z=o(te,"A",{href:!0});var Me=n(z);we=s(Me,"RoBERTa\u2019s documentation page"),Me.forEach(a),ge=s(te," for the tokenizer\u2019s API as well as usage examples."),te.forEach(a),Ee=u(S),P=o(S,"LI",{});var ae=n(P);Te=s(ae,"BORT requires a specific fine-tuning algorithm, called "),k=o(ae,"A",{href:!0,rel:!0});var We=n(k);Be=s(We,"Agora"),We.forEach(a),Re=s(ae,` , that is sadly not open-sourced yet. It would be very useful for the community, if someone tries to implement the algorithm to make BORT fine-tuning work.`),ae.forEach(a),S.forEach(a),Q=u(e),m=o(e,"P",{});var U=n(m);_e=s(U,"This model was contributed by "),A=o(U,"A",{href:!0,rel:!0});var Je=n(A);ye=s(Je,"stefan-it"),Je.forEach(a),Pe=s(U,". The original code can be found "),x=o(U,"A",{href:!0,rel:!0});var He=n(x);ke=s(He,"here"),He.forEach(a),Ae=s(U,"."),U.forEach(a),this.h()},h(){l(d,"name","hf:doc:metadata"),l(d,"content",JSON.stringify(it)),l(w,"id","bort"),l(w,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(w,"href","#bort"),l(v,"class","relative group"),l(g,"id","overview"),l(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(g,"href","#overview"),l(b,"class","relative group"),l(R,"href","https://arxiv.org/abs/2010.10499"),l(R,"rel","nofollow"),l(L,"href","bert"),l(z,"href","roberta"),l(k,"href","https://adewynter.github.io/notes/bort_algorithms_and_applications.html#fine-tuning-with-algebraic-topology"),l(k,"rel","nofollow"),l(A,"href","https://huggingface.co/stefan-it"),l(A,"rel","nofollow"),l(x,"href","https://github.com/alexa/bort/"),l(x,"rel","nofollow")},m(e,h){t(document.head,d),f(e,M,h),f(e,v,h),t(v,w),t(w,q),Ke(T,q,null),t(v,re),t(v,N),t(N,oe),f(e,W,h),f(e,b,h),t(b,g),t(g,G),Ke(B,G,null),t(b,ne),t(b,C),t(C,ie),f(e,J,h),f(e,E,h),t(E,se),t(E,R),t(R,le),t(E,he),f(e,H,h),f(e,O,h),t(O,fe),f(e,j,h),f(e,$,h),t($,D),t(D,ce),f(e,F,h),f(e,I,h),t(I,ue),f(e,K,h),f(e,p,h),t(p,_),t(_,pe),t(_,L),t(L,me),t(_,de),t(p,ve),t(p,y),t(y,be),t(y,z),t(z,we),t(y,ge),t(p,Ee),t(p,P),t(P,Te),t(P,k),t(k,Be),t(P,Re),f(e,Q,h),f(e,m,h),t(m,_e),t(m,A),t(A,ye),t(m,Pe),t(m,x),t(x,ke),t(m,Ae),V=!0},p:rt,i(e){V||(Qe(T.$$.fragment,e),Qe(B.$$.fragment,e),V=!0)},o(e){Ve(T.$$.fragment,e),Ve(B.$$.fragment,e),V=!1},d(e){a(d),e&&a(M),e&&a(v),Xe(T),e&&a(W),e&&a(b),Xe(B),e&&a(J),e&&a(E),e&&a(H),e&&a(O),e&&a(j),e&&a($),e&&a(F),e&&a(I),e&&a(K),e&&a(p),e&&a(Q),e&&a(m)}}}const it={local:"bort",sections:[{local:"overview",title:"Overview"}],title:"BORT"};function st(xe){return ot(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class ft extends Ze{constructor(d){super();et(this,d,st,nt,tt,{})}}export{ft as default,it as metadata};
2
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/model_doc/cpm.mdx-hf-doc-builder.js
import{S as fa,i as ca,s as ga,e as n,k as h,w as I,t as m,M as da,c as r,d as a,m as p,a as o,x as J,h as u,b as i,G as t,g as l,y as F,L as va,q as Y,o as D,B as q,v as wa}from"../../chunks/vendor-hf-doc-builder.js";import{D as ua}from"../../chunks/Docstring-hf-doc-builder.js";import{I as Ce}from"../../chunks/IconCopyLink-hf-doc-builder.js";function Pa(Re){let f,te,c,C,R,T,ye,W,_e,ne,g,y,X,z,ke,B,$e,re,_,Te,E,ze,Ee,oe,N,Me,ie,H,O,Le,se,d,xe,M,Ge,Ae,L,Se,le,Z,Ne,he,v,k,Q,x,He,j,Ze,pe,w,G,Ie,K,Je,me,P,$,V,A,Fe,U,Ye,ue,b,S,De,ee,qe,fe;return T=new Ce({}),z=new Ce({}),x=new Ce({}),G=new ua({props:{name:"class transformers.CpmTokenizer",anchor:"transformers.CpmTokenizer",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/cpm/tokenization_cpm.py#L31"}}),A=new Ce({}),S=new ua({props:{name:"class transformers.CpmTokenizerFast",anchor:"transformers.CpmTokenizerFast",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/cpm/tokenization_cpm_fast.py#L34"}}),{c(){f=n("meta"),te=h(),c=n("h1"),C=n("a"),R=n("span"),I(T.$$.fragment),ye=h(),W=n("span"),_e=m("CPM"),ne=h(),g=n("h2"),y=n("a"),X=n("span"),I(z.$$.fragment),ke=h(),B=n("span"),$e=m("Overview"),re=h(),_=n("p"),Te=m("The CPM model was proposed in "),E=n("a"),ze=m("CPM: A Large-scale Generative Chinese Pre-trained Language Model"),Ee=m(` by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.`),oe=h(),N=n("p"),Me=m("The abstract from the paper is the following:"),ie=h(),H=n("p"),O=n("em"),Le=m(`Pre-trained Language Models (PLMs) have proven to be beneficial for various downstream NLP tasks. Recently, GPT-3, with 175 billion parameters and 570GB training data, drew a lot of attention due to the capacity of few-shot (even zero-shot) learning. However, applying GPT-3 to address Chinese NLP tasks is still challenging, as the training corpus of GPT-3 is primarily English, and the parameters are not publicly available. In this technical report, we release the Chinese Pre-trained Language Model (CPM) with generative pre-training on large-scale Chinese training data. To the best of our knowledge, CPM, with 2.6 billion parameters and 100GB Chinese training data, is the largest Chinese pre-trained language model, which could facilitate several downstream Chinese NLP tasks, such as conversation, essay generation, cloze test, and language understanding. Extensive experiments demonstrate that CPM achieves strong performance on many NLP tasks in the settings of few-shot (even zero-shot) learning.`),se=h(),d=n("p"),xe=m("This model was contributed by "),M=n("a"),Ge=m("canwenxu"),Ae=m(`. The original implementation can be found here: `),L=n("a"),Se=m("https://github.com/TsinghuaAI/CPM-Generate"),le=h(),Z=n("p"),Ne=m("Note: We only have a tokenizer here, since the model architecture is the same as GPT-2."),he=h(),v=n("h2"),k=n("a"),Q=n("span"),I(x.$$.fragment),He=h(),j=n("span"),Ze=m("CpmTokenizer"),pe=h(),w=n("div"),I(G.$$.fragment),Ie=h(),K=n("p"),Je=m("Runs pre-tokenization with Jieba segmentation tool. It is used in CPM models."),me=h(),P=n("h2"),$=n("a"),V=n("span"),I(A.$$.fragment),Fe=h(),U=n("span"),Ye=m("CpmTokenizerFast"),ue=h(),b=n("div"),I(S.$$.fragment),De=h(),ee=n("p"),qe=m("Runs pre-tokenization with Jieba segmentation tool. It is used in CPM models."),this.h()},l(e){const s=da('[data-svelte="svelte-1phssyn"]',document.head);f=r(s,"META",{name:!0,content:!0}),s.forEach(a),te=p(e),c=r(e,"H1",{class:!0});var ce=o(c);C=r(ce,"A",{id:!0,class:!0,href:!0});var We=o(C);R=r(We,"SPAN",{});var Xe=o(R);J(T.$$.fragment,Xe),Xe.forEach(a),We.forEach(a),ye=p(ce),W=r(ce,"SPAN",{});var Be=o(W);_e=u(Be,"CPM"),Be.forEach(a),ce.forEach(a),ne=p(e),g=r(e,"H2",{class:!0});var ge=o(g);y=r(ge,"A",{id:!0,class:!0,href:!0});var Oe=o(y);X=r(Oe,"SPAN",{});var Qe=o(X);J(z.$$.fragment,Qe),Qe.forEach(a),Oe.forEach(a),ke=p(ge),B=r(ge,"SPAN",{});var je=o(B);$e=u(je,"Overview"),je.forEach(a),ge.forEach(a),re=p(e),_=r(e,"P",{});var de=o(_);Te=u(de,"The CPM model was proposed in "),E=r(de,"A",{href:!0,rel:!0});var Ke=o(E);ze=u(Ke,"CPM: A Large-scale Generative Chinese Pre-trained Language Model"),Ke.forEach(a),Ee=u(de,` by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.`),de.forEach(a),oe=p(e),N=r(e,"P",{});var Ve=o(N);Me=u(Ve,"The abstract from the paper is the following:"),Ve.forEach(a),ie=p(e),H=r(e,"P",{});var Ue=o(H);O=r(Ue,"EM",{});var ea=o(O);Le=u(ea,`Pre-trained Language Models (PLMs) have proven to be beneficial for various downstream NLP tasks. Recently, GPT-3, with 175 billion parameters and 570GB training data, drew a lot of attention due to the capacity of few-shot (even zero-shot) learning. However, applying GPT-3 to address Chinese NLP tasks is still challenging, as the training corpus of GPT-3 is primarily English, and the parameters are not publicly available. In this technical report, we release the Chinese Pre-trained Language Model (CPM) with generative pre-training on large-scale Chinese training data. To the best of our knowledge, CPM, with 2.6 billion parameters and 100GB Chinese training data, is the largest Chinese pre-trained language model, which could facilitate several downstream Chinese NLP tasks, such as conversation, essay generation, cloze test, and language understanding. Extensive experiments demonstrate that CPM achieves strong performance on many NLP tasks in the settings of few-shot (even zero-shot) learning.`),ea.forEach(a),Ue.forEach(a),se=p(e),d=r(e,"P",{});var ae=o(d);xe=u(ae,"This model was contributed by "),M=r(ae,"A",{href:!0,rel:!0});var aa=o(M);Ge=u(aa,"canwenxu"),aa.forEach(a),Ae=u(ae,`. The original implementation can be found here: `),L=r(ae,"A",{href:!0,rel:!0});var ta=o(L);Se=u(ta,"https://github.com/TsinghuaAI/CPM-Generate"),ta.forEach(a),ae.forEach(a),le=p(e),Z=r(e,"P",{});var na=o(Z);Ne=u(na,"Note: We only have a tokenizer here, since the model architecture is the same as GPT-2."),na.forEach(a),he=p(e),v=r(e,"H2",{class:!0});var ve=o(v);k=r(ve,"A",{id:!0,class:!0,href:!0});var ra=o(k);Q=r(ra,"SPAN",{});var oa=o(Q);J(x.$$.fragment,oa),oa.forEach(a),ra.forEach(a),He=p(ve),j=r(ve,"SPAN",{});var ia=o(j);Ze=u(ia,"CpmTokenizer"),ia.forEach(a),ve.forEach(a),pe=p(e),w=r(e,"DIV",{class:!0});var we=o(w);J(G.$$.fragment,we),Ie=p(we),K=r(we,"P",{});var sa=o(K);Je=u(sa,"Runs pre-tokenization with Jieba segmentation tool. It is used in CPM models."),sa.forEach(a),we.forEach(a),me=p(e),P=r(e,"H2",{class:!0});var Pe=o(P);$=r(Pe,"A",{id:!0,class:!0,href:!0});var la=o($);V=r(la,"SPAN",{});var ha=o(V);J(A.$$.fragment,ha),ha.forEach(a),la.forEach(a),Fe=p(Pe),U=r(Pe,"SPAN",{});var pa=o(U);Ye=u(pa,"CpmTokenizerFast"),pa.forEach(a),Pe.forEach(a),ue=p(e),b=r(e,"DIV",{class:!0});var be=o(b);J(S.$$.fragment,be),De=p(be),ee=r(be,"P",{});var ma=o(ee);qe=u(ma,"Runs pre-tokenization with Jieba segmentation tool. It is used in CPM models."),ma.forEach(a),be.forEach(a),this.h()},h(){i(f,"name","hf:doc:metadata"),i(f,"content",JSON.stringify(ba)),i(C,"id","cpm"),i(C,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(C,"href","#cpm"),i(c,"class","relative group"),i(y,"id","overview"),i(y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(y,"href","#overview"),i(g,"class","relative group"),i(E,"href","https://arxiv.org/abs/2012.00413"),i(E,"rel","nofollow"),i(M,"href","https://huggingface.co/canwenxu"),i(M,"rel","nofollow"),i(L,"href","https://github.com/TsinghuaAI/CPM-Generate"),i(L,"rel","nofollow"),i(k,"id","transformers.CpmTokenizer"),i(k,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(k,"href","#transformers.CpmTokenizer"),i(v,"class","relative group"),i(w,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i($,"id","transformers.CpmTokenizerFast"),i($,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i($,"href","#transformers.CpmTokenizerFast"),i(P,"class","relative group"),i(b,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,s){t(document.head,f),l(e,te,s),l(e,c,s),t(c,C),t(C,R),F(T,R,null),t(c,ye),t(c,W),t(W,_e),l(e,ne,s),l(e,g,s),t(g,y),t(y,X),F(z,X,null),t(g,ke),t(g,B),t(B,$e),l(e,re,s),l(e,_,s),t(_,Te),t(_,E),t(E,ze),t(_,Ee),l(e,oe,s),l(e,N,s),t(N,Me),l(e,ie,s),l(e,H,s),t(H,O),t(O,Le),l(e,se,s),l(e,d,s),t(d,xe),t(d,M),t(M,Ge),t(d,Ae),t(d,L),t(L,Se),l(e,le,s),l(e,Z,s),t(Z,Ne),l(e,he,s),l(e,v,s),t(v,k),t(k,Q),F(x,Q,null),t(v,He),t(v,j),t(j,Ze),l(e,pe,s),l(e,w,s),F(G,w,null),t(w,Ie),t(w,K),t(K,Je),l(e,me,s),l(e,P,s),t(P,$),t($,V),F(A,V,null),t(P,Fe),t(P,U),t(U,Ye),l(e,ue,s),l(e,b,s),F(S,b,null),t(b,De),t(b,ee),t(ee,qe),fe=!0},p:va,i(e){fe||(Y(T.$$.fragment,e),Y(z.$$.fragment,e),Y(x.$$.fragment,e),Y(G.$$.fragment,e),Y(A.$$.fragment,e),Y(S.$$.fragment,e),fe=!0)},o(e){D(T.$$.fragment,e),D(z.$$.fragment,e),D(x.$$.fragment,e),D(G.$$.fragment,e),D(A.$$.fragment,e),D(S.$$.fragment,e),fe=!1},d(e){a(f),e&&a(te),e&&a(c),q(T),e&&a(ne),e&&a(g),q(z),e&&a(re),e&&a(_),e&&a(oe),e&&a(N),e&&a(ie),e&&a(H),e&&a(se),e&&a(d),e&&a(le),e&&a(Z),e&&a(he),e&&a(v),q(x),e&&a(pe),e&&a(w),q(G),e&&a(me),e&&a(P),q(A),e&&a(ue),e&&a(b),q(S)}}}const ba={local:"cpm",sections:[{local:"overview",title:"Overview"},{local:"transformers.CpmTokenizer",title:"CpmTokenizer"},{local:"transformers.CpmTokenizerFast",title:"CpmTokenizerFast"}],title:"CPM"};function Ca(Re){return wa(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class $a extends fa{constructor(f){super();ca(this,f,Ca,Pa,ga,{})}}export{$a as default,ba as metadata};
3
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/model_doc/longt5.mdx-hf-doc-builder.js
import{S as Qi,i as Xi,s as el,e as r,k as u,w as k,t as a,M as tl,c as d,d as o,m as h,a as i,x as y,h as s,b as g,G as e,g as T,y as v,q as w,o as $,B as x,v as ol,L as me}from"../../chunks/vendor-hf-doc-builder.js";import{T as Tn}from"../../chunks/Tip-hf-doc-builder.js";import{D as G}from"../../chunks/Docstring-hf-doc-builder.js";import{C as X}from"../../chunks/CodeBlock-hf-doc-builder.js";import{I as Ve}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{E as he}from"../../chunks/ExampleCodeBlock-hf-doc-builder.js";function nl(z){let l,b,m,p,_;return{c(){l=r("p"),b=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r("code"),p=a("Module"),_=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(n){l=d(n,"P",{});var c=i(l);b=s(c,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=d(c,"CODE",{});var L=i(m);p=s(L,"Module"),L.forEach(o),_=s(c,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),c.forEach(o)},m(n,c){T(n,l,c),e(l,b),e(l,m),e(m,p),e(l,_)},d(n){n&&o(l)}}}function al(z){let l,b,m,p,_;return p=new X({props:{code:`from transformers import T5Tokenizer, LongT5Model tokenizer = T5Tokenizer.from_pretrained("google/long-t5-local-base") model = LongT5Model.from_pretrained("google/long-t5-local-base") # Let's try a very long encoder input. input_ids = tokenizer( 100 * "Studies have been shown that owning a dog is good for you", return_tensors="pt" ).input_ids # Batch size 1 decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 # forward pass outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) last_hidden_states = outputs.last_hidden_state`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5Tokenizer, LongT5Model <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&quot;google/long-t5-local-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LongT5Model.from_pretrained(<span class="hljs-string">&quot;google/long-t5-local-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Let&#x27;s try a very long encoder input.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer( <span class="hljs-meta">... </span> <span class="hljs-number">100</span> * <span class="hljs-string">&quot;Studies have been shown that owning a dog is good for you&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span> <span class="hljs-meta">... </span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = tokenizer(<span class="hljs-string">&quot;Studies show that&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># forward pass</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),{c(){l=r("p"),b=a("Example:"),m=u(),k(p.$$.fragment)},l(n){l=d(n,"P",{});var c=i(l);b=s(c,"Example:"),c.forEach(o),m=h(n),y(p.$$.fragment,n)},m(n,c){T(n,l,c),e(l,b),T(n,m,c),v(p,n,c),_=!0},p:me,i(n){_||(w(p.$$.fragment,n),_=!0)},o(n){$(p.$$.fragment,n),_=!1},d(n){n&&o(l),n&&o(m),x(p,n)}}}function sl(z){let l,b,m,p,_;return{c(){l=r("p"),b=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r("code"),p=a("Module"),_=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(n){l=d(n,"P",{});var c=i(l);b=s(c,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=d(c,"CODE",{});var L=i(m);p=s(L,"Module"),L.forEach(o),_=s(c,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),c.forEach(o)},m(n,c){T(n,l,c),e(l,b),e(l,m),e(m,p),e(l,_)},d(n){n&&o(l)}}}function rl(z){let l,b,m,p,_;return p=new X({props:{code:`from transformers import AutoTokenizer, LongT5ForConditionalGeneration tokenizer = AutoTokenizer.from_pretrained("Stancld/longt5-tglobal-large-16384-pubmed-3k_steps") model = LongT5ForConditionalGeneration.from_pretrained( "Stancld/longt5-tglobal-large-16384-pubmed-3k_steps" ) # Let's try a very long input. inputs = tokenizer(100 * "studies have shown that owning a dog is good for you ", return_tensors="pt") input_ids = inputs.input_ids outputs = model.generate(input_ids) print(tokenizer.decode(outputs[0], skip_special_tokens=True))`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, LongT5ForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;Stancld/longt5-tglobal-large-16384-pubmed-3k_steps&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LongT5ForConditionalGeneration.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Stancld/longt5-tglobal-large-16384-pubmed-3k_steps&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Let&#x27;s try a very long input.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-number">100</span> * <span class="hljs-string">&quot;studies have shown that owning a dog is good for you &quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = inputs.input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.generate(input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer.decode(outputs[<span class="hljs-number">0</span>], skip_special_tokens=<span class="hljs-literal">True</span>)) abstractthe aim of this article <span class="hljs-keyword">is</span> to provide an overview of the literature on the role of dog`}}),{c(){l=r("p"),b=a("Examples:"),m=u(),k(p.$$.fragment)},l(n){l=d(n,"P",{});var c=i(l);b=s(c,"Examples:"),c.forEach(o),m=h(n),y(p.$$.fragment,n)},m(n,c){T(n,l,c),e(l,b),T(n,m,c),v(p,n,c),_=!0},p:me,i(n){_||(w(p.$$.fragment,n),_=!0)},o(n){$(p.$$.fragment,n),_=!1},d(n){n&&o(l),n&&o(m),x(p,n)}}}function dl(z){let l,b,m,p,_;return{c(){l=r("p"),b=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r("code"),p=a("Module"),_=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(n){l=d(n,"P",{});var c=i(l);b=s(c,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=d(c,"CODE",{});var L=i(m);p=s(L,"Module"),L.forEach(o),_=s(c,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),c.forEach(o)},m(n,c){T(n,l,c),e(l,b),e(l,m),e(m,p),e(l,_)},d(n){n&&o(l)}}}function il(z){let l,b,m,p,_;return p=new X({props:{code:`from transformers import AutoTokenizer, LongT5ForConditionalGeneration tokenizer = AutoTokenizer.from_pretrained("google/long-t5-local-base") model = LongT5EncoderModel.from_pretrained("google/long-t5-local-base") input_ids = tokenizer( 100 * "Studies have been shown that owning a dog is good for you ", return_tensors="pt" ).input_ids # Batch size 1 outputs = model(input_ids=input_ids) last_hidden_states = outputs.last_hidden_state`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, LongT5ForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;google/long-t5-local-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = LongT5EncoderModel.from_pretrained(<span class="hljs-string">&quot;google/long-t5-local-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer( <span class="hljs-meta">... </span> <span class="hljs-number">100</span> * <span class="hljs-string">&quot;Studies have been shown that owning a dog is good for you &quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span> <span class="hljs-meta">... </span>).input_ids <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),{c(){l=r("p"),b=a("Example:"),m=u(),k(p.$$.fragment)},l(n){l=d(n,"P",{});var c=i(l);b=s(c,"Example:"),c.forEach(o),m=h(n),y(p.$$.fragment,n)},m(n,c){T(n,l,c),e(l,b),T(n,m,c),v(p,n,c),_=!0},p:me,i(n){_||(w(p.$$.fragment,n),_=!0)},o(n){$(p.$$.fragment,n),_=!1},d(n){n&&o(l),n&&o(m),x(p,n)}}}function ll(z){let l,b,m,p,_;return{c(){l=r("p"),b=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r("code"),p=a("Module"),_=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(n){l=d(n,"P",{});var c=i(l);b=s(c,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=d(c,"CODE",{});var L=i(m);p=s(L,"Module"),L.forEach(o),_=s(c,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),c.forEach(o)},m(n,c){T(n,l,c),e(l,b),e(l,m),e(m,p),e(l,_)},d(n){n&&o(l)}}}function cl(z){let l,b,m,p,_;return p=new X({props:{code:`from transformers import T5Tokenizer, FlaxLongT5Model tokenizer = T5Tokenizer.from_pretrained("t5-base") model = FlaxLongT5Model.from_pretrained("google/long-t5-local-base") input_ids = tokenizer( "Studies have been shown that owning a dog is good for you", return_tensors="np" ).input_ids decoder_input_ids = tokenizer("Studies show that", return_tensors="np").input_ids # forward pass outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) last_hidden_states = outputs.last_hidden_state`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5Tokenizer, FlaxLongT5Model <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxLongT5Model.from_pretrained(<span class="hljs-string">&quot;google/long-t5-local-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Studies have been shown that owning a dog is good for you&quot;</span>, return_tensors=<span class="hljs-string">&quot;np&quot;</span> <span class="hljs-meta">... </span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = tokenizer(<span class="hljs-string">&quot;Studies show that&quot;</span>, return_tensors=<span class="hljs-string">&quot;np&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># forward pass</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),{c(){l=r("p"),b=a("Example:"),m=u(),k(p.$$.fragment)},l(n){l=d(n,"P",{});var c=i(l);b=s(c,"Example:"),c.forEach(o),m=h(n),y(p.$$.fragment,n)},m(n,c){T(n,l,c),e(l,b),T(n,m,c),v(p,n,c),_=!0},p:me,i(n){_||(w(p.$$.fragment,n),_=!0)},o(n){$(p.$$.fragment,n),_=!1},d(n){n&&o(l),n&&o(m),x(p,n)}}}function pl(z){let l,b,m,p,_;return p=new X({props:{code:`from transformers import T5Tokenizer, FlaxLongT5ForConditionalGeneration tokenizer = T5Tokenizer.from_pretrained("t5-base") model = FlaxLongT5ForConditionalGeneration.from_pretrained("google/long-t5-local-base") text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, return_tensors="np") encoder_outputs = model.encode(**inputs)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5Tokenizer, FlaxLongT5ForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxLongT5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;google/long-t5-local-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs)`}}),{c(){l=r("p"),b=a("Example:"),m=u(),k(p.$$.fragment)},l(n){l=d(n,"P",{});var c=i(l);b=s(c,"Example:"),c.forEach(o),m=h(n),y(p.$$.fragment,n)},m(n,c){T(n,l,c),e(l,b),T(n,m,c),v(p,n,c),_=!0},p:me,i(n){_||(w(p.$$.fragment,n),_=!0)},o(n){$(p.$$.fragment,n),_=!1},d(n){n&&o(l),n&&o(m),x(p,n)}}}function ul(z){let l,b,m,p,_;return p=new X({props:{code:`from transformers import T5Tokenizer, FlaxLongT5ForConditionalGeneration import jax.numpy as jnp tokenizer = T5Tokenizer.from_pretrained("t5-base") model = FlaxLongT5ForConditionalGeneration.from_pretrained("google/long-t5-local-base") text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, return_tensors="np") encoder_outputs = model.encode(**inputs) decoder_start_token_id = model.config.decoder_start_token_id decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id outputs = model.decode(decoder_input_ids, encoder_outputs) logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5Tokenizer, FlaxLongT5ForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> jax.numpy <span class="hljs-keyword">as</span> jnp <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxLongT5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;google/long-t5-local-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_start_token_id = model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = jnp.ones((inputs.input_ids.shape[<span class="hljs-number">0</span>], <span class="hljs-number">1</span>), dtype=<span class="hljs-string">&quot;i4&quot;</span>) * decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.decode(decoder_input_ids, encoder_outputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),{c(){l=r("p"),b=a("Example:"),m=u(),k(p.$$.fragment)},l(n){l=d(n,"P",{});var c=i(l);b=s(c,"Example:"),c.forEach(o),m=h(n),y(p.$$.fragment,n)},m(n,c){T(n,l,c),e(l,b),T(n,m,c),v(p,n,c),_=!0},p:me,i(n){_||(w(p.$$.fragment,n),_=!0)},o(n){$(p.$$.fragment,n),_=!1},d(n){n&&o(l),n&&o(m),x(p,n)}}}function hl(z){let l,b,m,p,_;return{c(){l=r("p"),b=a("Although the recipe for forward pass needs to be defined within this function, one should call the "),m=r("code"),p=a("Module"),_=a(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(n){l=d(n,"P",{});var c=i(l);b=s(c,"Although the recipe for forward pass needs to be defined within this function, one should call the "),m=d(c,"CODE",{});var L=i(m);p=s(L,"Module"),L.forEach(o),_=s(c,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),c.forEach(o)},m(n,c){T(n,l,c),e(l,b),e(l,m),e(m,p),e(l,_)},d(n){n&&o(l)}}}function ml(z){let l,b,m,p,_;return p=new X({props:{code:`from transformers import T5Tokenizer, FlaxLongT5ForConditionalGeneration tokenizer = T5Tokenizer.from_pretrained("t5-base") model = FlaxLongT5ForConditionalGeneration.from_pretrained("google/long-t5-local-base") ARTICLE_TO_SUMMARIZE = "summarize: My friends are cool but they eat too many carbs." inputs = tokenizer([ARTICLE_TO_SUMMARIZE], return_tensors="np") # Generate Summary summary_ids = model.generate(inputs["input_ids"]).sequences print(tokenizer.decode(summary_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=False))`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5Tokenizer, FlaxLongT5ForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxLongT5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;google/long-t5-local-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ARTICLE_TO_SUMMARIZE = <span class="hljs-string">&quot;summarize: My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer([ARTICLE_TO_SUMMARIZE], return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Generate Summary</span> <span class="hljs-meta">&gt;&gt;&gt; </span>summary_ids = model.generate(inputs[<span class="hljs-string">&quot;input_ids&quot;</span>]).sequences <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer.decode(summary_ids[<span class="hljs-number">0</span>], skip_special_tokens=<span class="hljs-literal">True</span>, clean_up_tokenization_spaces=<span class="hljs-literal">False</span>))`}}),{c(){l=r("p"),b=a("Example:"),m=u(),k(p.$$.fragment)},l(n){l=d(n,"P",{});var c=i(l);b=s(c,"Example:"),c.forEach(o),m=h(n),y(p.$$.fragment,n)},m(n,c){T(n,l,c),e(l,b),T(n,m,c),v(p,n,c),_=!0},p:me,i(n){_||(w(p.$$.fragment,n),_=!0)},o(n){$(p.$$.fragment,n),_=!1},d(n){n&&o(l),n&&o(m),x(p,n)}}}function gl(z){let l,b,m,p,_;return p=new X({props:{code:`from transformers import T5Tokenizer, FlaxLongT5ForConditionalGeneration tokenizer = T5Tokenizer.from_pretrained("t5-base") model = FlaxLongT5ForConditionalGeneration.from_pretrained("google/long-t5-local-base") text = "My friends are cool but they eat too many carbs." inputs = tokenizer(text, return_tensors="np") encoder_outputs = model.encode(**inputs)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5Tokenizer, FlaxLongT5ForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxLongT5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;google/long-t5-local-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs)`}}),{c(){l=r("p"),b=a("Example:"),m=u(),k(p.$$.fragment)},l(n){l=d(n,"P",{});var c=i(l);b=s(c,"Example:"),c.forEach(o),m=h(n),y(p.$$.fragment,n)},m(n,c){T(n,l,c),e(l,b),T(n,m,c),v(p,n,c),_=!0},p:me,i(n){_||(w(p.$$.fragment,n),_=!0)},o(n){$(p.$$.fragment,n),_=!1},d(n){n&&o(l),n&&o(m),x(p,n)}}}function fl(z){let l,b,m,p,_;return p=new X({props:{code:`from transformers import T5Tokenizer, FlaxLongT5ForConditionalGeneration import jax.numpy as jnp tokenizer = T5Tokenizer.from_pretrained("t5-base") model = FlaxLongT5ForConditionalGeneration.from_pretrained("google/long-t5-local-base") text = "summarize: My friends are cool but they eat too many carbs." inputs = tokenizer(text, return_tensors="np") encoder_outputs = model.encode(**inputs) decoder_start_token_id = model.config.decoder_start_token_id decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id outputs = model.decode(decoder_input_ids, encoder_outputs) logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5Tokenizer, FlaxLongT5ForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> jax.numpy <span class="hljs-keyword">as</span> jnp <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = T5Tokenizer.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxLongT5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;google/long-t5-local-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>text = <span class="hljs-string">&quot;summarize: My friends are cool but they eat too many carbs.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(text, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_outputs = model.encode(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_start_token_id = model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>decoder_input_ids = jnp.ones((inputs.input_ids.shape[<span class="hljs-number">0</span>], <span class="hljs-number">1</span>), dtype=<span class="hljs-string">&quot;i4&quot;</span>) * decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.decode(decoder_input_ids, encoder_outputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),{c(){l=r("p"),b=a("Example:"),m=u(),k(p.$$.fragment)},l(n){l=d(n,"P",{});var c=i(l);b=s(c,"Example:"),c.forEach(o),m=h(n),y(p.$$.fragment,n)},m(n,c){T(n,l,c),e(l,b),T(n,m,c),v(p,n,c),_=!0},p:me,i(n){_||(w(p.$$.fragment,n),_=!0)},o(n){$(p.$$.fragment,n),_=!1},d(n){n&&o(l),n&&o(m),x(p,n)}}}function _l(z){let l,b,m,p,_,n,c,L,la,bn,ee,ge,_o,Ke,ca,To,pa,kn,fe,ua,Ze,ha,ma,yn,Yt,ga,vn,Rt,bo,fa,wn,Jt,_a,$n,C,O,Vt,Ta,ba,Kt,ka,ya,ko,va,wa,yo,$a,xa,vo,za,La,wo,qa,ja,Fa,Qe,Ma,Zt,Ea,Ca,Oa,Xe,Pa,$o,Ga,Sa,Aa,j,Na,xo,Ia,Da,zo,Ba,Wa,Lo,Ua,Ha,qo,Ya,Ra,jo,Ja,Va,Fo,Ka,Za,Qa,q,Mo,Xa,es,Eo,ts,os,Co,ns,as,Oo,ss,rs,Po,ds,is,Go,ls,cs,So,ps,us,hs,et,ms,tt,gs,fs,xn,ot,zn,Q,_s,nt,Ts,bs,at,ks,ys,Ln,te,_e,Ao,st,vs,No,ws,qn,K,rt,$s,Z,xs,Qt,zs,Ls,Xt,qs,js,dt,Fs,Ms,Es,oe,Cs,eo,Os,Ps,to,Gs,Ss,jn,ne,Te,Io,it,As,Do,Ns,Fn,F,lt,Is,Bo,Ds,Bs,ct,Ws,pt,Us,Hs,Ys,ut,Rs,oo,Js,Vs,Ks,ht,Zs,mt,Qs,Xs,er,D,gt,tr,ae,or,no,nr,ar,Wo,sr,rr,dr,be,ir,ke,Mn,se,ye,Uo,ft,lr,Ho,cr,En,M,_t,pr,Tt,ur,Yo,hr,mr,gr,bt,fr,kt,_r,Tr,br,yt,kr,ao,yr,vr,wr,vt,$r,wt,xr,zr,Lr,B,$t,qr,re,jr,so,Fr,Mr,Ro,Er,Cr,Or,ve,Pr,we,Cn,de,$e,Jo,xt,Gr,Vo,Sr,On,E,zt,Ar,Ko,Nr,Ir,Lt,Dr,qt,Br,Wr,Ur,jt,Hr,ro,Yr,Rr,Jr,Ft,Vr,Mt,Kr,Zr,Qr,W,Et,Xr,ie,ed,io,td,od,Zo,nd,ad,sd,xe,rd,ze,Pn,le,Le,Qo,Ct,dd,Xo,id,Gn,A,Ot,ld,U,Pt,cd,ce,pd,en,ud,hd,tn,md,gd,fd,qe,_d,je,Td,Fe,Gt,bd,Me,kd,Ee,St,yd,Ce,Sn,pe,Oe,on,At,vd,nn,wd,An,N,Nt,$d,H,It,xd,ue,zd,an,Ld,qd,sn,jd,Fd,Md,Pe,Ed,Ge,Cd,Se,Dt,Od,Ae,Pd,Ne,Bt,Gd,Ie,Nn;return n=new Ve({}),Ke=new Ve({}),ot=new X({props:{code:`import evaluate from datasets import load_dataset from transformers import AutoTokenizer, LongT5ForConditionalGeneration dataset = load_dataset("scientific_papers", "pubmed", split="validation") model = ( LongT5ForConditionalGeneration.from_pretrained("Stancld/longt5-tglobal-large-16384-pubmed-3k_steps") .to("cuda") .half() ) tokenizer = AutoTokenizer.from_pretrained("Stancld/longt5-tglobal-large-16384-pubmed-3k_steps") def generate_answers(batch): inputs_dict = tokenizer( batch["article"], max_length=16384, padding="max_length", truncation=True, return_tensors="pt" ) input_ids = inputs_dict.input_ids.to("cuda") attention_mask = inputs_dict.attention_mask.to("cuda") output_ids = model.generate(input_ids, attention_mask=attention_mask, max_length=512, num_beams=2) batch["predicted_abstract"] = tokenizer.batch_decode(output_ids, skip_special_tokens=True) return batch result = dataset.map(generate_answer, batched=True, batch_size=2) rouge = evaluate.load("rouge") rouge.compute(predictions=result["predicted_abstract"], references=result["abstract"])`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> evaluate <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, LongT5ForConditionalGeneration <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;scientific_papers&quot;</span>, <span class="hljs-string">&quot;pubmed&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ( <span class="hljs-meta">... </span> LongT5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;Stancld/longt5-tglobal-large-16384-pubmed-3k_steps&quot;</span>) <span class="hljs-meta">... </span> .to(<span class="hljs-string">&quot;cuda&quot;</span>) <span class="hljs-meta">... </span> .half() <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;Stancld/longt5-tglobal-large-16384-pubmed-3k_steps&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">generate_answers</span>(<span class="hljs-params">batch</span>): <span class="hljs-meta">... </span> inputs_dict = tokenizer( <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;article&quot;</span>], max_length=<span class="hljs-number">16384</span>, padding=<span class="hljs-string">&quot;max_length&quot;</span>, truncation=<span class="hljs-literal">True</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span> <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span> input_ids = inputs_dict.input_ids.to(<span class="hljs-string">&quot;cuda&quot;</span>) <span class="hljs-meta">... </span> attention_mask = inputs_dict.attention_mask.to(<span class="hljs-string">&quot;cuda&quot;</span>) <span class="hljs-meta">... </span> output_ids = model.generate(input_ids, attention_mask=attention_mask, max_length=<span class="hljs-number">512</span>, num_beams=<span class="hljs-number">2</span>) <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;predicted_abstract&quot;</span>] = tokenizer.batch_decode(output_ids, skip_special_tokens=<span class="hljs-literal">True</span>) <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch <span class="hljs-meta">&gt;&gt;&gt; </span>result = dataset.<span class="hljs-built_in">map</span>(generate_answer, batched=<span class="hljs-literal">True</span>, batch_size=<span class="hljs-number">2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>rouge = evaluate.load(<span class="hljs-string">&quot;rouge&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>rouge.compute(predictions=result[<span class="hljs-string">&quot;predicted_abstract&quot;</span>], references=result[<span class="hljs-string">&quot;abstract&quot;</span>])`}}),st=new Ve({}),rt=new G({props:{name:"class transformers.LongT5Config",anchor:"transformers.LongT5Config",parameters:[{name:"vocab_size",val:" = 32128"},{name:"d_model",val:" = 512"},{name:"d_kv",val:" = 64"},{name:"d_ff",val:" = 2048"},{name:"num_layers",val:" = 6"},{name:"num_decoder_layers",val:" = None"},{name:"num_heads",val:" = 8"},{name:"local_radius",val:" = 127"},{name:"global_block_size",val:" = 16"},{name:"relative_attention_num_buckets",val:" = 32"},{name:"relative_attention_max_distance",val:" = 128"},{name:"dropout_rate",val:" = 0.1"},{name:"layer_norm_epsilon",val:" = 1e-06"},{name:"initializer_factor",val:" = 1.0"},{name:"feed_forward_proj",val:" = 'relu'"},{name:"is_encoder_decoder",val:" = True"},{name:"encoder_attention_type",val:" = 'local'"},{name:"use_cache",val:" = True"},{name:"pad_token_id",val:" = 0"},{name:"eos_token_id",val:" = 1"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.LongT5Config.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 32128) &#x2014; Vocabulary size of the LongT5 model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_19429/en/model_doc/longt5#transformers.LongT5Model">LongT5Model</a>.`,name:"vocab_size"},{anchor:"transformers.LongT5Config.d_model",description:`<strong>d_model</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; Size of the encoder layers and the pooler layer.`,name:"d_model"},{anchor:"transformers.LongT5Config.d_kv",description:`<strong>d_kv</strong> (<code>int</code>, <em>optional</em>, defaults to 64) &#x2014; Size of the key, query, value projections per attention head. <code>d_kv</code> has to be equal to <code>d_model // num_heads</code>.`,name:"d_kv"},{anchor:"transformers.LongT5Config.d_ff",description:`<strong>d_ff</strong> (<code>int</code>, <em>optional</em>, defaults to 2048) &#x2014; Size of the intermediate feed forward layer in each <code>LongT5Block</code>.`,name:"d_ff"},{anchor:"transformers.LongT5Config.num_layers",description:`<strong>num_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 6) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_layers"},{anchor:"transformers.LongT5Config.num_decoder_layers",description:`<strong>num_decoder_layers</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of hidden layers in the Transformer decoder. Will use the same value as <code>num_layers</code> if not set.`,name:"num_decoder_layers"},{anchor:"transformers.LongT5Config.num_heads",description:`<strong>num_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_heads"},{anchor:"transformers.LongT5Config.local_radius",description:`<strong>local_radius</strong> (<code>int</code>, <em>optional</em>, defaults to 127) &#x2014; Number of tokens to the left/right for each token to locally self-attend in a local attention mechanism.`,name:"local_radius"},{anchor:"transformers.LongT5Config.global_block_size",description:`<strong>global_block_size</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Lenght of blocks an input sequence is divided into for a global token representation. Used only for <code>encoder_attention_type = &quot;transient-global&quot;</code>.`,name:"global_block_size"},{anchor:"transformers.LongT5Config.relative_attention_num_buckets",description:`<strong>relative_attention_num_buckets</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; The number of buckets to use for each attention layer.`,name:"relative_attention_num_buckets"},{anchor:"transformers.LongT5Config.relative_attention_max_distance",description:`<strong>relative_attention_max_distance</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; The maximum distance of the longer sequences for the bucket separation.`,name:"relative_attention_max_distance"},{anchor:"transformers.LongT5Config.dropout_rate",description:`<strong>dropout_rate</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The ratio for all dropout layers.`,name:"dropout_rate"},{anchor:"transformers.LongT5Config.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-6) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.LongT5Config.initializer_factor",description:`<strong>initializer_factor</strong> (<code>float</code>, <em>optional</em>, defaults to 1) &#x2014; A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing).`,name:"initializer_factor"},{anchor:"transformers.LongT5Config.feed_forward_proj",description:`<strong>feed_forward_proj</strong> (<code>string</code>, <em>optional</em>, defaults to <code>&quot;relu&quot;</code>) &#x2014; Type of feed forward layer to be used. Should be one of <code>&quot;relu&quot;</code> or <code>&quot;gated-gelu&quot;</code>. LongT5v1.1 uses the <code>&quot;gated-gelu&quot;</code> feed forward projection. Original LongT5 implementation uses <code>&quot;gated-gelu&quot;</code>.`,name:"feed_forward_proj"},{anchor:"transformers.LongT5Config.encoder_attention_type",description:`<strong>encoder_attention_type</strong> (<code>string</code>, <em>optional</em>, defaults to <code>&quot;local&quot;</code>) &#x2014; Type of encoder attention to be used. Should be one of <code>&quot;local&quot;</code> or <code>&quot;transient-global&quot;</code>, which are supported by LongT5 implementation.`,name:"encoder_attention_type"},{anchor:"transformers.LongT5Config.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models).`,name:"use_cache"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/longt5/configuration_longt5.py#L33"}}),it=new Ve({}),lt=new G({props:{name:"class transformers.LongT5Model",anchor:"transformers.LongT5Model",parameters:[{name:"config",val:": LongT5Config"}],parametersDescription:[{anchor:"transformers.LongT5Model.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/model_doc/longt5#transformers.LongT5Config">LongT5Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/longt5/modeling_longt5.py#L1745"}}),gt=new G({props:{name:"forward",anchor:"transformers.LongT5Model.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"decoder_input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"decoder_attention_mask",val:": typing.Optional[torch.BoolTensor] = None"},{name:"head_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"decoder_head_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"cross_attn_head_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"encoder_outputs",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"past_key_values",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"decoder_inputs_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"use_cache",val:": typing.Optional[bool] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],parametersDescription:[{anchor:"transformers.LongT5Model.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/mt5#transformers.T5Tokenizer">T5Tokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for detail.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>To know more on how to prepare <code>input_ids</code> for pretraining take a look a <a href="./longt5#training">LONGT5 Training</a>.`,name:"input_ids"},{anchor:"transformers.LongT5Model.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LongT5Model.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/mt5#transformers.T5Tokenizer">T5Tokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>LONGT5 uses the <code>pad_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>To know more on how to prepare <code>decoder_input_ids</code> for pretraining take a look at <a href="./longt5#training">LONGT5 Training</a>.`,name:"decoder_input_ids"},{anchor:"transformers.LongT5Model.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.LongT5Model.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.LongT5Model.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.LongT5Model.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.LongT5Model.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <code>optional</code>: <em>hidden_states</em>, <code>optional</code>: <em>attentions</em>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.LongT5Model.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.LongT5Model.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.LongT5Model.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.LongT5Model.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.LongT5Model.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LongT5Model.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LongT5Model.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/longt5/modeling_longt5.py#L1795",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/longt5#transformers.LongT5Config" >LongT5Config</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.Seq2SeqModelOutput" >transformers.modeling_outputs.Seq2SeqModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),be=new Tn({props:{$$slots:{default:[nl]},$$scope:{ctx:z}}}),ke=new he({props:{anchor:"transformers.LongT5Model.forward.example",$$slots:{default:[al]},$$scope:{ctx:z}}}),ft=new Ve({}),_t=new G({props:{name:"class transformers.LongT5ForConditionalGeneration",anchor:"transformers.LongT5ForConditionalGeneration",parameters:[{name:"config",val:": LongT5Config"}],parametersDescription:[{anchor:"transformers.LongT5ForConditionalGeneration.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/model_doc/longt5#transformers.LongT5Config">LongT5Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/longt5/modeling_longt5.py#L1898"}}),$t=new G({props:{name:"forward",anchor:"transformers.LongT5ForConditionalGeneration.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"decoder_input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"decoder_attention_mask",val:": typing.Optional[torch.BoolTensor] = None"},{name:"head_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"decoder_head_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"cross_attn_head_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"encoder_outputs",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.Tensor]]] = None"},{name:"past_key_values",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.Tensor]]] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"decoder_inputs_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"labels",val:": typing.Optional[torch.LongTensor] = None"},{name:"use_cache",val:": typing.Optional[bool] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],parametersDescription:[{anchor:"transformers.LongT5ForConditionalGeneration.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/mt5#transformers.T5Tokenizer">T5Tokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for detail.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>To know more on how to prepare <code>input_ids</code> for pretraining take a look a <a href="./longt5#training">LONGT5 Training</a>.`,name:"input_ids"},{anchor:"transformers.LongT5ForConditionalGeneration.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LongT5ForConditionalGeneration.forward.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/mt5#transformers.T5Tokenizer">T5Tokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>LONGT5 uses the <code>pad_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>To know more on how to prepare <code>decoder_input_ids</code> for pretraining take a look at <a href="./longt5#training">LONGT5 Training</a>.`,name:"decoder_input_ids"},{anchor:"transformers.LongT5ForConditionalGeneration.forward.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>torch.BoolTensor</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.LongT5ForConditionalGeneration.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.LongT5ForConditionalGeneration.forward.decoder_head_mask",description:`<strong>decoder_head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"decoder_head_mask"},{anchor:"transformers.LongT5ForConditionalGeneration.forward.cross_attn_head_mask",description:`<strong>cross_attn_head_mask</strong> (<code>torch.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"cross_attn_head_mask"},{anchor:"transformers.LongT5ForConditionalGeneration.forward.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(torch.FloatTensor)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <code>optional</code>: <em>hidden_states</em>, <code>optional</code>: <em>attentions</em>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.LongT5ForConditionalGeneration.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.LongT5ForConditionalGeneration.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.LongT5ForConditionalGeneration.forward.decoder_inputs_embeds",description:`<strong>decoder_inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, target_sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>decoder_input_ids</code> you can choose to directly pass an embedded representation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_inputs_embeds</code> have to be input (see <code>past_key_values</code>). This is useful if you want more control over how to convert <code>decoder_input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.</p> <p>If <code>decoder_input_ids</code> and <code>decoder_inputs_embeds</code> are both unset, <code>decoder_inputs_embeds</code> takes the value of <code>inputs_embeds</code>.`,name:"decoder_inputs_embeds"},{anchor:"transformers.LongT5ForConditionalGeneration.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.LongT5ForConditionalGeneration.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LongT5ForConditionalGeneration.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LongT5ForConditionalGeneration.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.LongT5ForConditionalGeneration.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[-100, 0, ..., config.vocab_size - 1]</code>. All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/longt5/modeling_longt5.py#L1951",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/longt5#transformers.LongT5Config" >LongT5Config</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.Seq2SeqLMOutput" >transformers.modeling_outputs.Seq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ve=new Tn({props:{$$slots:{default:[sl]},$$scope:{ctx:z}}}),we=new he({props:{anchor:"transformers.LongT5ForConditionalGeneration.forward.example",$$slots:{default:[rl]},$$scope:{ctx:z}}}),xt=new Ve({}),zt=new G({props:{name:"class transformers.LongT5EncoderModel",anchor:"transformers.LongT5EncoderModel",parameters:[{name:"config",val:": LongT5Config"}],parametersDescription:[{anchor:"transformers.LongT5EncoderModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/model_doc/longt5#transformers.LongT5Config">LongT5Config</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/longt5/modeling_longt5.py#L2139"}}),Et=new G({props:{name:"forward",anchor:"transformers.LongT5EncoderModel.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"head_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],parametersDescription:[{anchor:"transformers.LongT5EncoderModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/mt5#transformers.T5Tokenizer">T5Tokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for detail.</p> <p>To know more on how to prepare <code>input_ids</code> for pretraining take a look a <a href="./longt5#training">LONGT5 Training</a>.`,name:"input_ids"},{anchor:"transformers.LongT5EncoderModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.LongT5EncoderModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.LongT5EncoderModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.LongT5EncoderModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.LongT5EncoderModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.LongT5EncoderModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/longt5/modeling_longt5.py#L2174",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/longt5#transformers.LongT5Config" >LongT5Config</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),xe=new Tn({props:{$$slots:{default:[dl]},$$scope:{ctx:z}}}),ze=new he({props:{anchor:"transformers.LongT5EncoderModel.forward.example",$$slots:{default:[il]},$$scope:{ctx:z}}}),Ct=new Ve({}),Ot=new G({props:{name:"class transformers.FlaxLongT5Model",anchor:"transformers.FlaxLongT5Model",parameters:[{name:"config",val:": LongT5Config"},{name:"input_shape",val:": typing.Tuple[int] = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax.numpy.float32'>"},{name:"_do_init",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/longt5/modeling_flax_longt5.py#L2108"}}),Pt=new G({props:{name:"__call__",anchor:"transformers.FlaxLongT5Model.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.ndarray.ndarray] = None"},{name:"decoder_input_ids",val:": ndarray = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.ndarray.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],parametersDescription:[{anchor:"transformers.FlaxLongT5Model.__call__.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/mt5#transformers.T5Tokenizer">T5Tokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for detail.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>To know more on how to prepare <code>input_ids</code> for pretraining take a look a <a href="./longt5#training">LONGT5 Training</a>.`,name:"input_ids"},{anchor:"transformers.FlaxLongT5Model.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxLongT5Model.__call__.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/mt5#transformers.T5Tokenizer">T5Tokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>LONGT5 uses the <code>pad_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>To know more on how to prepare <code>decoder_input_ids</code> for pretraining take a look at <a href="./longt5#training">LONGT5 Training</a>.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxLongT5Model.__call__.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxLongT5Model.__call__.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(jnp.ndarray)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <code>optional</code>: <em>hidden_states</em>, <code>optional</code>: <em>attentions</em>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.FlaxLongT5Model.__call__.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/longt5/modeling_flax_longt5.py#L1725",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/longt5#transformers.LongT5Config" >LongT5Config</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),qe=new Tn({props:{$$slots:{default:[ll]},$$scope:{ctx:z}}}),je=new he({props:{anchor:"transformers.FlaxLongT5Model.__call__.example",$$slots:{default:[cl]},$$scope:{ctx:z}}}),Gt=new G({props:{name:"encode",anchor:"transformers.FlaxLongT5Model.encode",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.ndarray.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],parametersDescription:[{anchor:"transformers.FlaxLongT5Model.encode.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/mt5#transformers.T5Tokenizer">T5Tokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for detail.</p> <p>To know more on how to prepare <code>input_ids</code> for pretraining take a look a <a href="./longt5#training">LONGT5 Training</a>.`,name:"input_ids"},{anchor:"transformers.FlaxLongT5Model.encode.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxLongT5Model.encode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxLongT5Model.encode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxLongT5Model.encode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/longt5/modeling_flax_longt5.py#L1811",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.longt5.configuration_longt5.LongT5Config'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Me=new he({props:{anchor:"transformers.FlaxLongT5Model.encode.example",$$slots:{default:[pl]},$$scope:{ctx:z}}}),St=new G({props:{name:"decode",anchor:"transformers.FlaxLongT5Model.decode",parameters:[{name:"decoder_input_ids",val:""},{name:"encoder_outputs",val:""},{name:"encoder_attention_mask",val:": typing.Optional[jax._src.numpy.ndarray.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.ndarray.ndarray] = None"},{name:"past_key_values",val:": dict = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],parametersDescription:[{anchor:"transformers.FlaxLongT5Model.decode.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/mt5#transformers.T5Tokenizer">T5Tokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For training, <code>decoder_input_ids</code> should be provided.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxLongT5Model.decode.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(jnp.ndarray)</code>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.FlaxLongT5Model.decode.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"encoder_attention_mask"},{anchor:"transformers.FlaxLongT5Model.decode.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxLongT5Model.decode.past_key_values",description:`<strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.`,name:"past_key_values"},{anchor:"transformers.FlaxLongT5Model.decode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxLongT5Model.decode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxLongT5Model.decode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/longt5/modeling_flax_longt5.py#L1869",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.longt5.configuration_longt5.LongT5Config'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" >transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ce=new he({props:{anchor:"transformers.FlaxLongT5Model.decode.example",$$slots:{default:[ul]},$$scope:{ctx:z}}}),At=new Ve({}),Nt=new G({props:{name:"class transformers.FlaxLongT5ForConditionalGeneration",anchor:"transformers.FlaxLongT5ForConditionalGeneration",parameters:[{name:"config",val:": LongT5Config"},{name:"input_shape",val:": typing.Tuple[int] = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax.numpy.float32'>"},{name:"_do_init",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/longt5/modeling_flax_longt5.py#L2254"}}),It=new G({props:{name:"__call__",anchor:"transformers.FlaxLongT5ForConditionalGeneration.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.ndarray.ndarray] = None"},{name:"decoder_input_ids",val:": ndarray = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.ndarray.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],parametersDescription:[{anchor:"transformers.FlaxLongT5ForConditionalGeneration.__call__.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/mt5#transformers.T5Tokenizer">T5Tokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for detail.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> <p>To know more on how to prepare <code>input_ids</code> for pretraining take a look a <a href="./longt5#training">LONGT5 Training</a>.`,name:"input_ids"},{anchor:"transformers.FlaxLongT5ForConditionalGeneration.__call__.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxLongT5ForConditionalGeneration.__call__.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/mt5#transformers.T5Tokenizer">T5Tokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>LONGT5 uses the <code>pad_token_id</code> as the starting token for <code>decoder_input_ids</code> generation. If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>To know more on how to prepare <code>decoder_input_ids</code> for pretraining take a look at <a href="./longt5#training">LONGT5 Training</a>.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxLongT5ForConditionalGeneration.__call__.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxLongT5ForConditionalGeneration.__call__.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(jnp.ndarray)</code>, <em>optional</em>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <code>optional</code>: <em>hidden_states</em>, <code>optional</code>: <em>attentions</em>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code> is a sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.FlaxLongT5ForConditionalGeneration.__call__.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code> of length <code>config.n_layers</code> with each tuple having 4 tensors of shape <code>(batch_size, num_heads, sequence_length - 1, embed_size_per_head)</code>) &#x2014; Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/longt5/modeling_flax_longt5.py#L1725",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/longt5#transformers.LongT5Config" >LongT5Config</a>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) \u2014 Sequence of hidden-states at the output of the last layer of the encoder of the model.</p> </li> <li> <p><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" >transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Pe=new Tn({props:{$$slots:{default:[hl]},$$scope:{ctx:z}}}),Ge=new he({props:{anchor:"transformers.FlaxLongT5ForConditionalGeneration.__call__.example",$$slots:{default:[ml]},$$scope:{ctx:z}}}),Dt=new G({props:{name:"encode",anchor:"transformers.FlaxLongT5ForConditionalGeneration.encode",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.ndarray.ndarray] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],parametersDescription:[{anchor:"transformers.FlaxLongT5ForConditionalGeneration.encode.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/mt5#transformers.T5Tokenizer">T5Tokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for detail.</p> <p>To know more on how to prepare <code>input_ids</code> for pretraining take a look a <a href="./longt5#training">LONGT5 Training</a>.`,name:"input_ids"},{anchor:"transformers.FlaxLongT5ForConditionalGeneration.encode.attention_mask",description:`<strong>attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.FlaxLongT5ForConditionalGeneration.encode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxLongT5ForConditionalGeneration.encode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxLongT5ForConditionalGeneration.encode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/longt5/modeling_flax_longt5.py#L1811",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.longt5.configuration_longt5.LongT5Config'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ae=new he({props:{anchor:"transformers.FlaxLongT5ForConditionalGeneration.encode.example",$$slots:{default:[gl]},$$scope:{ctx:z}}}),Bt=new G({props:{name:"decode",anchor:"transformers.FlaxLongT5ForConditionalGeneration.decode",parameters:[{name:"decoder_input_ids",val:""},{name:"encoder_outputs",val:""},{name:"encoder_attention_mask",val:": typing.Optional[jax._src.numpy.ndarray.ndarray] = None"},{name:"decoder_attention_mask",val:": typing.Optional[jax._src.numpy.ndarray.ndarray] = None"},{name:"past_key_values",val:": dict = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"train",val:": bool = False"},{name:"params",val:": dict = None"},{name:"dropout_rng",val:": PRNGKey = None"}],parametersDescription:[{anchor:"transformers.FlaxLongT5ForConditionalGeneration.decode.decoder_input_ids",description:`<strong>decoder_input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>) &#x2014; Indices of decoder input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/mt5#transformers.T5Tokenizer">T5Tokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#decoder-input-ids">What are decoder input IDs?</a></p> <p>For training, <code>decoder_input_ids</code> should be provided.`,name:"decoder_input_ids"},{anchor:"transformers.FlaxLongT5ForConditionalGeneration.decode.encoder_outputs",description:`<strong>encoder_outputs</strong> (<code>tuple(tuple(jnp.ndarray)</code>) &#x2014; Tuple consists of (<code>last_hidden_state</code>, <em>optional</em>: <code>hidden_states</code>, <em>optional</em>: <code>attentions</code>) <code>last_hidden_state</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.`,name:"encoder_outputs"},{anchor:"transformers.FlaxLongT5ForConditionalGeneration.decode.encoder_attention_mask",description:`<strong>encoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"encoder_attention_mask"},{anchor:"transformers.FlaxLongT5ForConditionalGeneration.decode.decoder_attention_mask",description:`<strong>decoder_attention_mask</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, target_sequence_length)</code>, <em>optional</em>) &#x2014; Default behavior: generate a tensor that ignores pad tokens in <code>decoder_input_ids</code>. Causal mask will also be used by default.</p> <p>If you want to change padding behavior, you should modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"decoder_attention_mask"},{anchor:"transformers.FlaxLongT5ForConditionalGeneration.decode.past_key_values",description:`<strong>past_key_values</strong> (<code>Dict[str, np.ndarray]</code>, <em>optional</em>, returned by <code>init_cache</code> or when passing previous <code>past_key_values</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.`,name:"past_key_values"},{anchor:"transformers.FlaxLongT5ForConditionalGeneration.decode.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.FlaxLongT5ForConditionalGeneration.decode.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.FlaxLongT5ForConditionalGeneration.decode.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/longt5/modeling_flax_longt5.py#L2257",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" >transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.models.longt5.configuration_longt5.LongT5Config'&gt;</code>) and inputs.</p> <ul> <li> <p><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" >transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ie=new he({props:{anchor:"transformers.FlaxLongT5ForConditionalGeneration.decode.example",$$slots:{default:[fl]},$$scope:{ctx:z}}}),{c(){l=r("meta"),b=u(),m=r("h1"),p=r("a"),_=r("span"),k(n.$$.fragment),c=u(),L=r("span"),la=a("LongT5"),bn=u(),ee=r("h2"),ge=r("a"),_o=r("span"),k(Ke.$$.fragment),ca=u(),To=r("span"),pa=a("Overview"),kn=u(),fe=r("p"),ua=a("The LongT5 model was proposed in "),Ze=r("a"),ha=a("LongT5: Efficient Text-To-Text Transformer for Long Sequences"),ma=a(` by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung and Yinfei Yang. It\u2019s an encoder-decoder transformer pre-trained in a text-to-text denoising generative setting. LongT5 model is an extension of T5 model, and it enables using one of the two different efficient attention mechanisms - (1) Local attention, or (2) Transient-Global attention.`),yn=u(),Yt=r("p"),ga=a("The abstract from the paper is the following:"),vn=u(),Rt=r("p"),bo=r("em"),fa=a(`Recent work has shown that either (1) increasing the input length or (2) increasing model size can improve the performance of Transformer-based neural models. In this paper, we present a new model, called LongT5, with which we explore the effects of scaling both the input length and model size at the same time. Specifically, we integrated attention ideas from long-input transformers (ETC), and adopted pre-training strategies from summarization pre-training (PEGASUS) into the scalable T5 architecture. The result is a new attention mechanism we call {\\em Transient Global} (TGlobal), which mimics ETC\u2019s local/global attention mechanism, but without requiring additional side-inputs. We are able to achieve state-of-the-art results on several summarization tasks and outperform the original T5 models on question answering tasks.`),wn=u(),Jt=r("p"),_a=a("Tips:"),$n=u(),C=r("ul"),O=r("li"),Vt=r("a"),Ta=a("LongT5ForConditionalGeneration"),ba=a(" is an extension of "),Kt=r("a"),ka=a("T5ForConditionalGeneration"),ya=a(` exchanging the traditional encoder `),ko=r("em"),va=a("self-attention"),wa=a(" layer with efficient either "),yo=r("em"),$a=a("local"),xa=a(" attention or "),vo=r("em"),za=a("transient-global"),La=a(" ("),wo=r("em"),qa=a("tglobal"),ja=a(") attention."),Fa=u(),Qe=r("li"),Ma=a(`Unlike the T5 model, LongT5 does not use a task prefix. Furthermore, it uses a different pre-training objective inspired by the pre-training of `),Zt=r("a"),Ea=a("PegasusForConditionalGeneration"),Ca=a("."),Oa=u(),Xe=r("li"),Pa=a("LongT5 model is designed to work efficiently and very well on long-range "),$o=r("em"),Ga=a("sequence-to-sequence"),Sa=a(` tasks where the input sequence exceeds commonly used 512 tokens. It is capable of handling input sequences of a length up to 16,384 tokens.`),Aa=u(),j=r("li"),Na=a("For "),xo=r("em"),Ia=a("Local Attention"),Da=a(", the sparse sliding-window local attention operation allows a given token to attend only "),zo=r("code"),Ba=a("r"),Wa=a(` tokens to the left and right of it (with `),Lo=r("code"),Ua=a("r=127"),Ha=a(" by default). "),qo=r("em"),Ya=a("Local Attention"),Ra=a(` does not introduce any new parameters to the model. The complexity of the mechanism is linear in input sequence length `),jo=r("code"),Ja=a("l"),Va=a(": "),Fo=r("code"),Ka=a("O(l*r)"),Za=a("."),Qa=u(),q=r("li"),Mo=r("em"),Xa=a("Transient Global Attention"),es=a(" is an extension of the "),Eo=r("em"),ts=a("Local Attention"),os=a(`. It, furthermore, allows each input token to interact with all other tokens in the layer. This is achieved via splitting an input sequence into blocks of a fixed length `),Co=r("code"),ns=a("k"),as=a(" (with a default "),Oo=r("code"),ss=a("k=16"),rs=a(`). Then, a global token for such a block is obtained via summing and normalizing the embeddings of every token in the block. Thanks to this, the attention allows each token to attend to both nearby tokens like in Local attention, and also every global token like in the case of standard global attention (`),Po=r("em"),ds=a("transient"),is=a(` represents the fact the global tokens are constructed dynamically within each attention operation). As a consequence, `),Go=r("em"),ls=a("TGlobal"),cs=a(` attention introduces a few new parameters \u2014 global relative position biases and a layer normalization for global token\u2019s embedding. The complexity of this mechanism is `),So=r("code"),ps=a("O(l(r + l/k))"),us=a("."),hs=u(),et=r("li"),ms=a("An example showing how to evaluate a fine-tuned LongT5 model on the "),tt=r("a"),gs=a("pubmed dataset"),fs=a(" is below."),xn=u(),k(ot.$$.fragment),zn=u(),Q=r("p"),_s=a("This model was contributed by "),nt=r("a"),Ts=a("stancld"),bs=a(`. The original code can be found `),at=r("a"),ks=a("here"),ys=a("."),Ln=u(),te=r("h2"),_e=r("a"),Ao=r("span"),k(st.$$.fragment),vs=u(),No=r("span"),ws=a("LongT5Config"),qn=u(),K=r("div"),k(rt.$$.fragment),$s=u(),Z=r("p"),xs=a("This is the configuration class to store the configuration of a "),Qt=r("a"),zs=a("LongT5Model"),Ls=a(" or a "),Xt=r("a"),qs=a("FlaxLongT5Model"),js=a(`. It is used to instantiate a LongT5 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LongT5 `),dt=r("a"),Fs=a("google/long-t5-local-base"),Ms=a(" architecture."),Es=u(),oe=r("p"),Cs=a("Configuration objects inherit from "),eo=r("a"),Os=a("PretrainedConfig"),Ps=a(` and can be used to control the model outputs. Read the documentation from `),to=r("a"),Gs=a("PretrainedConfig"),Ss=a(" for more information."),jn=u(),ne=r("h2"),Te=r("a"),Io=r("span"),k(it.$$.fragment),As=u(),Do=r("span"),Ns=a("LongT5Model"),Fn=u(),F=r("div"),k(lt.$$.fragment),Is=u(),Bo=r("p"),Ds=a("The bare LONGT5 Model transformer outputting raw hidden-states without any specific head on top."),Bs=u(),ct=r("p"),Ws=a("The LongT5 model was proposed in "),pt=r("a"),Us=a(`LongT5: Efficient Text-To-Text Transformer for Long Sequences`),Hs=a(` by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung and Yinfei Yang. It\u2019s an encoder-decoder transformer pre-trained in a text-to-text denoising generative setting. LongT5 model is an extension of T5 model, and it enables using one of the two different efficient attention mechanisms - (1) Local attention, or (2) Transient-Global attention.`),Ys=u(),ut=r("p"),Rs=a("This model inherits from "),oo=r("a"),Js=a("PreTrainedModel"),Vs=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ks=u(),ht=r("p"),Zs=a("This model is also a PyTorch "),mt=r("a"),Qs=a("torch.nn.Module"),Xs=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),er=u(),D=r("div"),k(gt.$$.fragment),tr=u(),ae=r("p"),or=a("The "),no=r("a"),nr=a("LongT5Model"),ar=a(" forward method, overrides the "),Wo=r("code"),sr=a("__call__"),rr=a(" special method."),dr=u(),k(be.$$.fragment),ir=u(),k(ke.$$.fragment),Mn=u(),se=r("h2"),ye=r("a"),Uo=r("span"),k(ft.$$.fragment),lr=u(),Ho=r("span"),cr=a("LongT5ForConditionalGeneration"),En=u(),M=r("div"),k(_t.$$.fragment),pr=u(),Tt=r("p"),ur=a("LONGT5 Model with a "),Yo=r("code"),hr=a("language modeling"),mr=a(" head on top."),gr=u(),bt=r("p"),fr=a("The LongT5 model was proposed in "),kt=r("a"),_r=a(`LongT5: Efficient Text-To-Text Transformer for Long Sequences`),Tr=a(` by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung and Yinfei Yang. It\u2019s an encoder-decoder transformer pre-trained in a text-to-text denoising generative setting. LongT5 model is an extension of T5 model, and it enables using one of the two different efficient attention mechanisms - (1) Local attention, or (2) Transient-Global attention.`),br=u(),yt=r("p"),kr=a("This model inherits from "),ao=r("a"),yr=a("PreTrainedModel"),vr=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),wr=u(),vt=r("p"),$r=a("This model is also a PyTorch "),wt=r("a"),xr=a("torch.nn.Module"),zr=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Lr=u(),B=r("div"),k($t.$$.fragment),qr=u(),re=r("p"),jr=a("The "),so=r("a"),Fr=a("LongT5ForConditionalGeneration"),Mr=a(" forward method, overrides the "),Ro=r("code"),Er=a("__call__"),Cr=a(" special method."),Or=u(),k(ve.$$.fragment),Pr=u(),k(we.$$.fragment),Cn=u(),de=r("h2"),$e=r("a"),Jo=r("span"),k(xt.$$.fragment),Gr=u(),Vo=r("span"),Sr=a("LongT5EncoderModel"),On=u(),E=r("div"),k(zt.$$.fragment),Ar=u(),Ko=r("p"),Nr=a("The bare LONGT5 Model transformer outputting encoder\u2019s raw hidden-states without any specific head on top."),Ir=u(),Lt=r("p"),Dr=a("The LongT5 model was proposed in "),qt=r("a"),Br=a(`LongT5: Efficient Text-To-Text Transformer for Long Sequences`),Wr=a(` by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung and Yinfei Yang. It\u2019s an encoder-decoder transformer pre-trained in a text-to-text denoising generative setting. LongT5 model is an extension of T5 model, and it enables using one of the two different efficient attention mechanisms - (1) Local attention, or (2) Transient-Global attention.`),Ur=u(),jt=r("p"),Hr=a("This model inherits from "),ro=r("a"),Yr=a("PreTrainedModel"),Rr=a(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Jr=u(),Ft=r("p"),Vr=a("This model is also a PyTorch "),Mt=r("a"),Kr=a("torch.nn.Module"),Zr=a(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Qr=u(),W=r("div"),k(Et.$$.fragment),Xr=u(),ie=r("p"),ed=a("The "),io=r("a"),td=a("LongT5EncoderModel"),od=a(" forward method, overrides the "),Zo=r("code"),nd=a("__call__"),ad=a(" special method."),sd=u(),k(xe.$$.fragment),rd=u(),k(ze.$$.fragment),Pn=u(),le=r("h2"),Le=r("a"),Qo=r("span"),k(Ct.$$.fragment),dd=u(),Xo=r("span"),id=a("FlaxLongT5Model"),Gn=u(),A=r("div"),k(Ot.$$.fragment),ld=u(),U=r("div"),k(Pt.$$.fragment),cd=u(),ce=r("p"),pd=a("The "),en=r("code"),ud=a("FlaxLongT5PreTrainedModel"),hd=a(" forward method, overrides the "),tn=r("code"),md=a("__call__"),gd=a(" special method."),fd=u(),k(qe.$$.fragment),_d=u(),k(je.$$.fragment),Td=u(),Fe=r("div"),k(Gt.$$.fragment),bd=u(),k(Me.$$.fragment),kd=u(),Ee=r("div"),k(St.$$.fragment),yd=u(),k(Ce.$$.fragment),Sn=u(),pe=r("h2"),Oe=r("a"),on=r("span"),k(At.$$.fragment),vd=u(),nn=r("span"),wd=a("FlaxLongT5ForConditionalGeneration"),An=u(),N=r("div"),k(Nt.$$.fragment),$d=u(),H=r("div"),k(It.$$.fragment),xd=u(),ue=r("p"),zd=a("The "),an=r("code"),Ld=a("FlaxLongT5PreTrainedModel"),qd=a(" forward method, overrides the "),sn=r("code"),jd=a("__call__"),Fd=a(" special method."),Md=u(),k(Pe.$$.fragment),Ed=u(),k(Ge.$$.fragment),Cd=u(),Se=r("div"),k(Dt.$$.fragment),Od=u(),k(Ae.$$.fragment),Pd=u(),Ne=r("div"),k(Bt.$$.fragment),Gd=u(),k(Ie.$$.fragment),this.h()},l(t){const f=tl('[data-svelte="svelte-1phssyn"]',document.head);l=d(f,"META",{name:!0,content:!0}),f.forEach(o),b=h(t),m=d(t,"H1",{class:!0});var Wt=i(m);p=d(Wt,"A",{id:!0,class:!0,href:!0});var rn=i(p);_=d(rn,"SPAN",{});var dn=i(_);y(n.$$.fragment,dn),dn.forEach(o),rn.forEach(o),c=h(Wt),L=d(Wt,"SPAN",{});var ln=i(L);la=s(ln,"LongT5"),ln.forEach(o),Wt.forEach(o),bn=h(t),ee=d(t,"H2",{class:!0});var Ut=i(ee);ge=d(Ut,"A",{id:!0,class:!0,href:!0});var cn=i(ge);_o=d(cn,"SPAN",{});var pn=i(_o);y(Ke.$$.fragment,pn),pn.forEach(o),cn.forEach(o),ca=h(Ut),To=d(Ut,"SPAN",{});var un=i(To);pa=s(un,"Overview"),un.forEach(o),Ut.forEach(o),kn=h(t),fe=d(t,"P",{});var Ht=i(fe);ua=s(Ht,"The LongT5 model was proposed in "),Ze=d(Ht,"A",{href:!0,rel:!0});var hn=i(Ze);ha=s(hn,"LongT5: Efficient Text-To-Text Transformer for Long Sequences"),hn.forEach(o),ma=s(Ht,` by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung and Yinfei Yang. It\u2019s an encoder-decoder transformer pre-trained in a text-to-text denoising generative setting. LongT5 model is an extension of T5 model, and it enables using one of the two different efficient attention mechanisms - (1) Local attention, or (2) Transient-Global attention.`),Ht.forEach(o),yn=h(t),Yt=d(t,"P",{});var mn=i(Yt);ga=s(mn,"The abstract from the paper is the following:"),mn.forEach(o),vn=h(t),Rt=d(t,"P",{});var gn=i(Rt);bo=d(gn,"EM",{});var fn=i(bo);fa=s(fn,`Recent work has shown that either (1) increasing the input length or (2) increasing model size can improve the performance of Transformer-based neural models. In this paper, we present a new model, called LongT5, with which we explore the effects of scaling both the input length and model size at the same time. Specifically, we integrated attention ideas from long-input transformers (ETC), and adopted pre-training strategies from summarization pre-training (PEGASUS) into the scalable T5 architecture. The result is a new attention mechanism we call {\\em Transient Global} (TGlobal), which mimics ETC\u2019s local/global attention mechanism, but without requiring additional side-inputs. We are able to achieve state-of-the-art results on several summarization tasks and outperform the original T5 models on question answering tasks.`),fn.forEach(o),gn.forEach(o),wn=h(t),Jt=d(t,"P",{});var _n=i(Jt);_a=s(_n,"Tips:"),_n.forEach(o),$n=h(t),C=d(t,"UL",{});var Y=i(C);O=d(Y,"LI",{});var I=i(O);Vt=d(I,"A",{href:!0});var Sd=i(Vt);Ta=s(Sd,"LongT5ForConditionalGeneration"),Sd.forEach(o),ba=s(I," is an extension of "),Kt=d(I,"A",{href:!0});var Ad=i(Kt);ka=s(Ad,"T5ForConditionalGeneration"),Ad.forEach(o),ya=s(I,` exchanging the traditional encoder `),ko=d(I,"EM",{});var Nd=i(ko);va=s(Nd,"self-attention"),Nd.forEach(o),wa=s(I," layer with efficient either "),yo=d(I,"EM",{});var Id=i(yo);$a=s(Id,"local"),Id.forEach(o),xa=s(I," attention or "),vo=d(I,"EM",{});var Dd=i(vo);za=s(Dd,"transient-global"),Dd.forEach(o),La=s(I," ("),wo=d(I,"EM",{});var Bd=i(wo);qa=s(Bd,"tglobal"),Bd.forEach(o),ja=s(I,") attention."),I.forEach(o),Fa=h(Y),Qe=d(Y,"LI",{});var In=i(Qe);Ma=s(In,`Unlike the T5 model, LongT5 does not use a task prefix. Furthermore, it uses a different pre-training objective inspired by the pre-training of `),Zt=d(In,"A",{href:!0});var Wd=i(Zt);Ea=s(Wd,"PegasusForConditionalGeneration"),Wd.forEach(o),Ca=s(In,"."),In.forEach(o),Oa=h(Y),Xe=d(Y,"LI",{});var Dn=i(Xe);Pa=s(Dn,"LongT5 model is designed to work efficiently and very well on long-range "),$o=d(Dn,"EM",{});var Ud=i($o);Ga=s(Ud,"sequence-to-sequence"),Ud.forEach(o),Sa=s(Dn,` tasks where the input sequence exceeds commonly used 512 tokens. It is capable of handling input sequences of a length up to 16,384 tokens.`),Dn.forEach(o),Aa=h(Y),j=d(Y,"LI",{});var S=i(j);Na=s(S,"For "),xo=d(S,"EM",{});var Hd=i(xo);Ia=s(Hd,"Local Attention"),Hd.forEach(o),Da=s(S,", the sparse sliding-window local attention operation allows a given token to attend only "),zo=d(S,"CODE",{});var Yd=i(zo);Ba=s(Yd,"r"),Yd.forEach(o),Wa=s(S,` tokens to the left and right of it (with `),Lo=d(S,"CODE",{});var Rd=i(Lo);Ua=s(Rd,"r=127"),Rd.forEach(o),Ha=s(S," by default). "),qo=d(S,"EM",{});var Jd=i(qo);Ya=s(Jd,"Local Attention"),Jd.forEach(o),Ra=s(S,` does not introduce any new parameters to the model. The complexity of the mechanism is linear in input sequence length `),jo=d(S,"CODE",{});var Vd=i(jo);Ja=s(Vd,"l"),Vd.forEach(o),Va=s(S,": "),Fo=d(S,"CODE",{});var Kd=i(Fo);Ka=s(Kd,"O(l*r)"),Kd.forEach(o),Za=s(S,"."),S.forEach(o),Qa=h(Y),q=d(Y,"LI",{});var P=i(q);Mo=d(P,"EM",{});var Zd=i(Mo);Xa=s(Zd,"Transient Global Attention"),Zd.forEach(o),es=s(P," is an extension of the "),Eo=d(P,"EM",{});var Qd=i(Eo);ts=s(Qd,"Local Attention"),Qd.forEach(o),os=s(P,`. It, furthermore, allows each input token to interact with all other tokens in the layer. This is achieved via splitting an input sequence into blocks of a fixed length `),Co=d(P,"CODE",{});var Xd=i(Co);ns=s(Xd,"k"),Xd.forEach(o),as=s(P," (with a default "),Oo=d(P,"CODE",{});var ei=i(Oo);ss=s(ei,"k=16"),ei.forEach(o),rs=s(P,`). Then, a global token for such a block is obtained via summing and normalizing the embeddings of every token in the block. Thanks to this, the attention allows each token to attend to both nearby tokens like in Local attention, and also every global token like in the case of standard global attention (`),Po=d(P,"EM",{});var ti=i(Po);ds=s(ti,"transient"),ti.forEach(o),is=s(P,` represents the fact the global tokens are constructed dynamically within each attention operation). As a consequence, `),Go=d(P,"EM",{});var oi=i(Go);ls=s(oi,"TGlobal"),oi.forEach(o),cs=s(P,` attention introduces a few new parameters \u2014 global relative position biases and a layer normalization for global token\u2019s embedding. The complexity of this mechanism is `),So=d(P,"CODE",{});var ni=i(So);ps=s(ni,"O(l(r + l/k))"),ni.forEach(o),us=s(P,"."),P.forEach(o),hs=h(Y),et=d(Y,"LI",{});var Bn=i(et);ms=s(Bn,"An example showing how to evaluate a fine-tuned LongT5 model on the "),tt=d(Bn,"A",{href:!0,rel:!0});var ai=i(tt);gs=s(ai,"pubmed dataset"),ai.forEach(o),fs=s(Bn," is below."),Bn.forEach(o),Y.forEach(o),xn=h(t),y(ot.$$.fragment,t),zn=h(t),Q=d(t,"P",{});var lo=i(Q);_s=s(lo,"This model was contributed by "),nt=d(lo,"A",{href:!0,rel:!0});var si=i(nt);Ts=s(si,"stancld"),si.forEach(o),bs=s(lo,`. The original code can be found `),at=d(lo,"A",{href:!0,rel:!0});var ri=i(at);ks=s(ri,"here"),ri.forEach(o),ys=s(lo,"."),lo.forEach(o),Ln=h(t),te=d(t,"H2",{class:!0});var Wn=i(te);_e=d(Wn,"A",{id:!0,class:!0,href:!0});var di=i(_e);Ao=d(di,"SPAN",{});var ii=i(Ao);y(st.$$.fragment,ii),ii.forEach(o),di.forEach(o),vs=h(Wn),No=d(Wn,"SPAN",{});var li=i(No);ws=s(li,"LongT5Config"),li.forEach(o),Wn.forEach(o),qn=h(t),K=d(t,"DIV",{class:!0});var co=i(K);y(rt.$$.fragment,co),$s=h(co),Z=d(co,"P",{});var De=i(Z);xs=s(De,"This is the configuration class to store the configuration of a "),Qt=d(De,"A",{href:!0});var ci=i(Qt);zs=s(ci,"LongT5Model"),ci.forEach(o),Ls=s(De," or a "),Xt=d(De,"A",{href:!0});var pi=i(Xt);qs=s(pi,"FlaxLongT5Model"),pi.forEach(o),js=s(De,`. It is used to instantiate a LongT5 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LongT5 `),dt=d(De,"A",{href:!0,rel:!0});var ui=i(dt);Fs=s(ui,"google/long-t5-local-base"),ui.forEach(o),Ms=s(De," architecture."),De.forEach(o),Es=h(co),oe=d(co,"P",{});var po=i(oe);Cs=s(po,"Configuration objects inherit from "),eo=d(po,"A",{href:!0});var hi=i(eo);Os=s(hi,"PretrainedConfig"),hi.forEach(o),Ps=s(po,` and can be used to control the model outputs. Read the documentation from `),to=d(po,"A",{href:!0});var mi=i(to);Gs=s(mi,"PretrainedConfig"),mi.forEach(o),Ss=s(po," for more information."),po.forEach(o),co.forEach(o),jn=h(t),ne=d(t,"H2",{class:!0});var Un=i(ne);Te=d(Un,"A",{id:!0,class:!0,href:!0});var gi=i(Te);Io=d(gi,"SPAN",{});var fi=i(Io);y(it.$$.fragment,fi),fi.forEach(o),gi.forEach(o),As=h(Un),Do=d(Un,"SPAN",{});var _i=i(Do);Ns=s(_i,"LongT5Model"),_i.forEach(o),Un.forEach(o),Fn=h(t),F=d(t,"DIV",{class:!0});var R=i(F);y(lt.$$.fragment,R),Is=h(R),Bo=d(R,"P",{});var Ti=i(Bo);Ds=s(Ti,"The bare LONGT5 Model transformer outputting raw hidden-states without any specific head on top."),Ti.forEach(o),Bs=h(R),ct=d(R,"P",{});var Hn=i(ct);Ws=s(Hn,"The LongT5 model was proposed in "),pt=d(Hn,"A",{href:!0,rel:!0});var bi=i(pt);Us=s(bi,`LongT5: Efficient Text-To-Text Transformer for Long Sequences`),bi.forEach(o),Hs=s(Hn,` by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung and Yinfei Yang. It\u2019s an encoder-decoder transformer pre-trained in a text-to-text denoising generative setting. LongT5 model is an extension of T5 model, and it enables using one of the two different efficient attention mechanisms - (1) Local attention, or (2) Transient-Global attention.`),Hn.forEach(o),Ys=h(R),ut=d(R,"P",{});var Yn=i(ut);Rs=s(Yn,"This model inherits from "),oo=d(Yn,"A",{href:!0});var ki=i(oo);Js=s(ki,"PreTrainedModel"),ki.forEach(o),Vs=s(Yn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Yn.forEach(o),Ks=h(R),ht=d(R,"P",{});var Rn=i(ht);Zs=s(Rn,"This model is also a PyTorch "),mt=d(Rn,"A",{href:!0,rel:!0});var yi=i(mt);Qs=s(yi,"torch.nn.Module"),yi.forEach(o),Xs=s(Rn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Rn.forEach(o),er=h(R),D=d(R,"DIV",{class:!0});var Be=i(D);y(gt.$$.fragment,Be),tr=h(Be),ae=d(Be,"P",{});var uo=i(ae);or=s(uo,"The "),no=d(uo,"A",{href:!0});var vi=i(no);nr=s(vi,"LongT5Model"),vi.forEach(o),ar=s(uo," forward method, overrides the "),Wo=d(uo,"CODE",{});var wi=i(Wo);sr=s(wi,"__call__"),wi.forEach(o),rr=s(uo," special method."),uo.forEach(o),dr=h(Be),y(be.$$.fragment,Be),ir=h(Be),y(ke.$$.fragment,Be),Be.forEach(o),R.forEach(o),Mn=h(t),se=d(t,"H2",{class:!0});var Jn=i(se);ye=d(Jn,"A",{id:!0,class:!0,href:!0});var $i=i(ye);Uo=d($i,"SPAN",{});var xi=i(Uo);y(ft.$$.fragment,xi),xi.forEach(o),$i.forEach(o),lr=h(Jn),Ho=d(Jn,"SPAN",{});var zi=i(Ho);cr=s(zi,"LongT5ForConditionalGeneration"),zi.forEach(o),Jn.forEach(o),En=h(t),M=d(t,"DIV",{class:!0});var J=i(M);y(_t.$$.fragment,J),pr=h(J),Tt=d(J,"P",{});var Vn=i(Tt);ur=s(Vn,"LONGT5 Model with a "),Yo=d(Vn,"CODE",{});var Li=i(Yo);hr=s(Li,"language modeling"),Li.forEach(o),mr=s(Vn," head on top."),Vn.forEach(o),gr=h(J),bt=d(J,"P",{});var Kn=i(bt);fr=s(Kn,"The LongT5 model was proposed in "),kt=d(Kn,"A",{href:!0,rel:!0});var qi=i(kt);_r=s(qi,`LongT5: Efficient Text-To-Text Transformer for Long Sequences`),qi.forEach(o),Tr=s(Kn,` by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung and Yinfei Yang. It\u2019s an encoder-decoder transformer pre-trained in a text-to-text denoising generative setting. LongT5 model is an extension of T5 model, and it enables using one of the two different efficient attention mechanisms - (1) Local attention, or (2) Transient-Global attention.`),Kn.forEach(o),br=h(J),yt=d(J,"P",{});var Zn=i(yt);kr=s(Zn,"This model inherits from "),ao=d(Zn,"A",{href:!0});var ji=i(ao);yr=s(ji,"PreTrainedModel"),ji.forEach(o),vr=s(Zn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Zn.forEach(o),wr=h(J),vt=d(J,"P",{});var Qn=i(vt);$r=s(Qn,"This model is also a PyTorch "),wt=d(Qn,"A",{href:!0,rel:!0});var Fi=i(wt);xr=s(Fi,"torch.nn.Module"),Fi.forEach(o),zr=s(Qn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Qn.forEach(o),Lr=h(J),B=d(J,"DIV",{class:!0});var We=i(B);y($t.$$.fragment,We),qr=h(We),re=d(We,"P",{});var ho=i(re);jr=s(ho,"The "),so=d(ho,"A",{href:!0});var Mi=i(so);Fr=s(Mi,"LongT5ForConditionalGeneration"),Mi.forEach(o),Mr=s(ho," forward method, overrides the "),Ro=d(ho,"CODE",{});var Ei=i(Ro);Er=s(Ei,"__call__"),Ei.forEach(o),Cr=s(ho," special method."),ho.forEach(o),Or=h(We),y(ve.$$.fragment,We),Pr=h(We),y(we.$$.fragment,We),We.forEach(o),J.forEach(o),Cn=h(t),de=d(t,"H2",{class:!0});var Xn=i(de);$e=d(Xn,"A",{id:!0,class:!0,href:!0});var Ci=i($e);Jo=d(Ci,"SPAN",{});var Oi=i(Jo);y(xt.$$.fragment,Oi),Oi.forEach(o),Ci.forEach(o),Gr=h(Xn),Vo=d(Xn,"SPAN",{});var Pi=i(Vo);Sr=s(Pi,"LongT5EncoderModel"),Pi.forEach(o),Xn.forEach(o),On=h(t),E=d(t,"DIV",{class:!0});var V=i(E);y(zt.$$.fragment,V),Ar=h(V),Ko=d(V,"P",{});var Gi=i(Ko);Nr=s(Gi,"The bare LONGT5 Model transformer outputting encoder\u2019s raw hidden-states without any specific head on top."),Gi.forEach(o),Ir=h(V),Lt=d(V,"P",{});var ea=i(Lt);Dr=s(ea,"The LongT5 model was proposed in "),qt=d(ea,"A",{href:!0,rel:!0});var Si=i(qt);Br=s(Si,`LongT5: Efficient Text-To-Text Transformer for Long Sequences`),Si.forEach(o),Wr=s(ea,` by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung and Yinfei Yang. It\u2019s an encoder-decoder transformer pre-trained in a text-to-text denoising generative setting. LongT5 model is an extension of T5 model, and it enables using one of the two different efficient attention mechanisms - (1) Local attention, or (2) Transient-Global attention.`),ea.forEach(o),Ur=h(V),jt=d(V,"P",{});var ta=i(jt);Hr=s(ta,"This model inherits from "),ro=d(ta,"A",{href:!0});var Ai=i(ro);Yr=s(Ai,"PreTrainedModel"),Ai.forEach(o),Rr=s(ta,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ta.forEach(o),Jr=h(V),Ft=d(V,"P",{});var oa=i(Ft);Vr=s(oa,"This model is also a PyTorch "),Mt=d(oa,"A",{href:!0,rel:!0});var Ni=i(Mt);Kr=s(Ni,"torch.nn.Module"),Ni.forEach(o),Zr=s(oa,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),oa.forEach(o),Qr=h(V),W=d(V,"DIV",{class:!0});var Ue=i(W);y(Et.$$.fragment,Ue),Xr=h(Ue),ie=d(Ue,"P",{});var mo=i(ie);ed=s(mo,"The "),io=d(mo,"A",{href:!0});var Ii=i(io);td=s(Ii,"LongT5EncoderModel"),Ii.forEach(o),od=s(mo," forward method, overrides the "),Zo=d(mo,"CODE",{});var Di=i(Zo);nd=s(Di,"__call__"),Di.forEach(o),ad=s(mo," special method."),mo.forEach(o),sd=h(Ue),y(xe.$$.fragment,Ue),rd=h(Ue),y(ze.$$.fragment,Ue),Ue.forEach(o),V.forEach(o),Pn=h(t),le=d(t,"H2",{class:!0});var na=i(le);Le=d(na,"A",{id:!0,class:!0,href:!0});var Bi=i(Le);Qo=d(Bi,"SPAN",{});var Wi=i(Qo);y(Ct.$$.fragment,Wi),Wi.forEach(o),Bi.forEach(o),dd=h(na),Xo=d(na,"SPAN",{});var Ui=i(Xo);id=s(Ui,"FlaxLongT5Model"),Ui.forEach(o),na.forEach(o),Gn=h(t),A=d(t,"DIV",{class:!0});var He=i(A);y(Ot.$$.fragment,He),ld=h(He),U=d(He,"DIV",{class:!0});var Ye=i(U);y(Pt.$$.fragment,Ye),cd=h(Ye),ce=d(Ye,"P",{});var go=i(ce);pd=s(go,"The "),en=d(go,"CODE",{});var Hi=i(en);ud=s(Hi,"FlaxLongT5PreTrainedModel"),Hi.forEach(o),hd=s(go," forward method, overrides the "),tn=d(go,"CODE",{});var Yi=i(tn);md=s(Yi,"__call__"),Yi.forEach(o),gd=s(go," special method."),go.forEach(o),fd=h(Ye),y(qe.$$.fragment,Ye),_d=h(Ye),y(je.$$.fragment,Ye),Ye.forEach(o),Td=h(He),Fe=d(He,"DIV",{class:!0});var aa=i(Fe);y(Gt.$$.fragment,aa),bd=h(aa),y(Me.$$.fragment,aa),aa.forEach(o),kd=h(He),Ee=d(He,"DIV",{class:!0});var sa=i(Ee);y(St.$$.fragment,sa),yd=h(sa),y(Ce.$$.fragment,sa),sa.forEach(o),He.forEach(o),Sn=h(t),pe=d(t,"H2",{class:!0});var ra=i(pe);Oe=d(ra,"A",{id:!0,class:!0,href:!0});var Ri=i(Oe);on=d(Ri,"SPAN",{});var Ji=i(on);y(At.$$.fragment,Ji),Ji.forEach(o),Ri.forEach(o),vd=h(ra),nn=d(ra,"SPAN",{});var Vi=i(nn);wd=s(Vi,"FlaxLongT5ForConditionalGeneration"),Vi.forEach(o),ra.forEach(o),An=h(t),N=d(t,"DIV",{class:!0});var Re=i(N);y(Nt.$$.fragment,Re),$d=h(Re),H=d(Re,"DIV",{class:!0});var Je=i(H);y(It.$$.fragment,Je),xd=h(Je),ue=d(Je,"P",{});var fo=i(ue);zd=s(fo,"The "),an=d(fo,"CODE",{});var Ki=i(an);Ld=s(Ki,"FlaxLongT5PreTrainedModel"),Ki.forEach(o),qd=s(fo," forward method, overrides the "),sn=d(fo,"CODE",{});var Zi=i(sn);jd=s(Zi,"__call__"),Zi.forEach(o),Fd=s(fo," special method."),fo.forEach(o),Md=h(Je),y(Pe.$$.fragment,Je),Ed=h(Je),y(Ge.$$.fragment,Je),Je.forEach(o),Cd=h(Re),Se=d(Re,"DIV",{class:!0});var da=i(Se);y(Dt.$$.fragment,da),Od=h(da),y(Ae.$$.fragment,da),da.forEach(o),Pd=h(Re),Ne=d(Re,"DIV",{class:!0});var ia=i(Ne);y(Bt.$$.fragment,ia),Gd=h(ia),y(Ie.$$.fragment,ia),ia.forEach(o),Re.forEach(o),this.h()},h(){g(l,"name","hf:doc:metadata"),g(l,"content",JSON.stringify(Tl)),g(p,"id","longt5"),g(p,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),g(p,"href","#longt5"),g(m,"class","relative group"),g(ge,"id","overview"),g(ge,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),g(ge,"href","#overview"),g(ee,"class","relative group"),g(Ze,"href","https://arxiv.org/abs/2112.07916"),g(Ze,"rel","nofollow"),g(Vt,"href","/docs/transformers/pr_19429/en/model_doc/longt5#transformers.LongT5ForConditionalGeneration"),g(Kt,"href","/docs/transformers/pr_19429/en/model_doc/t5#transformers.T5ForConditionalGeneration"),g(Zt,"href","/docs/transformers/pr_19429/en/model_doc/pegasus#transformers.PegasusForConditionalGeneration"),g(tt,"href","https://huggingface.co/datasets/scientific_papers"),g(tt,"rel","nofollow"),g(nt,"href","https://huggingface.co/stancld"),g(nt,"rel","nofollow"),g(at,"href","https://github.com/google-research/longt5"),g(at,"rel","nofollow"),g(_e,"id","transformers.LongT5Config"),g(_e,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),g(_e,"href","#transformers.LongT5Config"),g(te,"class","relative group"),g(Qt,"href","/docs/transformers/pr_19429/en/model_doc/longt5#transformers.LongT5Model"),g(Xt,"href","/docs/transformers/pr_19429/en/model_doc/longt5#transformers.FlaxLongT5Model"),g(dt,"href","https://huggingface.co/google/long-t5-local-base"),g(dt,"rel","nofollow"),g(eo,"href","/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig"),g(to,"href","/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig"),g(K,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(Te,"id","transformers.LongT5Model"),g(Te,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),g(Te,"href","#transformers.LongT5Model"),g(ne,"class","relative group"),g(pt,"href","https://arxiv.org/abs/2112.07916"),g(pt,"rel","nofollow"),g(oo,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel"),g(mt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),g(mt,"rel","nofollow"),g(no,"href","/docs/transformers/pr_19429/en/model_doc/longt5#transformers.LongT5Model"),g(D,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(F,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(ye,"id","transformers.LongT5ForConditionalGeneration"),g(ye,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),g(ye,"href","#transformers.LongT5ForConditionalGeneration"),g(se,"class","relative group"),g(kt,"href","https://arxiv.org/abs/2112.07916"),g(kt,"rel","nofollow"),g(ao,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel"),g(wt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),g(wt,"rel","nofollow"),g(so,"href","/docs/transformers/pr_19429/en/model_doc/longt5#transformers.LongT5ForConditionalGeneration"),g(B,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(M,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g($e,"id","transformers.LongT5EncoderModel"),g($e,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),g($e,"href","#transformers.LongT5EncoderModel"),g(de,"class","relative group"),g(qt,"href","https://arxiv.org/abs/2112.07916"),g(qt,"rel","nofollow"),g(ro,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel"),g(Mt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),g(Mt,"rel","nofollow"),g(io,"href","/docs/transformers/pr_19429/en/model_doc/longt5#transformers.LongT5EncoderModel"),g(W,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(E,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(Le,"id","transformers.FlaxLongT5Model"),g(Le,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),g(Le,"href","#transformers.FlaxLongT5Model"),g(le,"class","relative group"),g(U,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(Fe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(Ee,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(A,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(Oe,"id","transformers.FlaxLongT5ForConditionalGeneration"),g(Oe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),g(Oe,"href","#transformers.FlaxLongT5ForConditionalGeneration"),g(pe,"class","relative group"),g(H,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(Se,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(Ne,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),g(N,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(t,f){e(document.head,l),T(t,b,f),T(t,m,f),e(m,p),e(p,_),v(n,_,null),e(m,c),e(m,L),e(L,la),T(t,bn,f),T(t,ee,f),e(ee,ge),e(ge,_o),v(Ke,_o,null),e(ee,ca),e(ee,To),e(To,pa),T(t,kn,f),T(t,fe,f),e(fe,ua),e(fe,Ze),e(Ze,ha),e(fe,ma),T(t,yn,f),T(t,Yt,f),e(Yt,ga),T(t,vn,f),T(t,Rt,f),e(Rt,bo),e(bo,fa),T(t,wn,f),T(t,Jt,f),e(Jt,_a),T(t,$n,f),T(t,C,f),e(C,O),e(O,Vt),e(Vt,Ta),e(O,ba),e(O,Kt),e(Kt,ka),e(O,ya),e(O,ko),e(ko,va),e(O,wa),e(O,yo),e(yo,$a),e(O,xa),e(O,vo),e(vo,za),e(O,La),e(O,wo),e(wo,qa),e(O,ja),e(C,Fa),e(C,Qe),e(Qe,Ma),e(Qe,Zt),e(Zt,Ea),e(Qe,Ca),e(C,Oa),e(C,Xe),e(Xe,Pa),e(Xe,$o),e($o,Ga),e(Xe,Sa),e(C,Aa),e(C,j),e(j,Na),e(j,xo),e(xo,Ia),e(j,Da),e(j,zo),e(zo,Ba),e(j,Wa),e(j,Lo),e(Lo,Ua),e(j,Ha),e(j,qo),e(qo,Ya),e(j,Ra),e(j,jo),e(jo,Ja),e(j,Va),e(j,Fo),e(Fo,Ka),e(j,Za),e(C,Qa),e(C,q),e(q,Mo),e(Mo,Xa),e(q,es),e(q,Eo),e(Eo,ts),e(q,os),e(q,Co),e(Co,ns),e(q,as),e(q,Oo),e(Oo,ss),e(q,rs),e(q,Po),e(Po,ds),e(q,is),e(q,Go),e(Go,ls),e(q,cs),e(q,So),e(So,ps),e(q,us),e(C,hs),e(C,et),e(et,ms),e(et,tt),e(tt,gs),e(et,fs),T(t,xn,f),v(ot,t,f),T(t,zn,f),T(t,Q,f),e(Q,_s),e(Q,nt),e(nt,Ts),e(Q,bs),e(Q,at),e(at,ks),e(Q,ys),T(t,Ln,f),T(t,te,f),e(te,_e),e(_e,Ao),v(st,Ao,null),e(te,vs),e(te,No),e(No,ws),T(t,qn,f),T(t,K,f),v(rt,K,null),e(K,$s),e(K,Z),e(Z,xs),e(Z,Qt),e(Qt,zs),e(Z,Ls),e(Z,Xt),e(Xt,qs),e(Z,js),e(Z,dt),e(dt,Fs),e(Z,Ms),e(K,Es),e(K,oe),e(oe,Cs),e(oe,eo),e(eo,Os),e(oe,Ps),e(oe,to),e(to,Gs),e(oe,Ss),T(t,jn,f),T(t,ne,f),e(ne,Te),e(Te,Io),v(it,Io,null),e(ne,As),e(ne,Do),e(Do,Ns),T(t,Fn,f),T(t,F,f),v(lt,F,null),e(F,Is),e(F,Bo),e(Bo,Ds),e(F,Bs),e(F,ct),e(ct,Ws),e(ct,pt),e(pt,Us),e(ct,Hs),e(F,Ys),e(F,ut),e(ut,Rs),e(ut,oo),e(oo,Js),e(ut,Vs),e(F,Ks),e(F,ht),e(ht,Zs),e(ht,mt),e(mt,Qs),e(ht,Xs),e(F,er),e(F,D),v(gt,D,null),e(D,tr),e(D,ae),e(ae,or),e(ae,no),e(no,nr),e(ae,ar),e(ae,Wo),e(Wo,sr),e(ae,rr),e(D,dr),v(be,D,null),e(D,ir),v(ke,D,null),T(t,Mn,f),T(t,se,f),e(se,ye),e(ye,Uo),v(ft,Uo,null),e(se,lr),e(se,Ho),e(Ho,cr),T(t,En,f),T(t,M,f),v(_t,M,null),e(M,pr),e(M,Tt),e(Tt,ur),e(Tt,Yo),e(Yo,hr),e(Tt,mr),e(M,gr),e(M,bt),e(bt,fr),e(bt,kt),e(kt,_r),e(bt,Tr),e(M,br),e(M,yt),e(yt,kr),e(yt,ao),e(ao,yr),e(yt,vr),e(M,wr),e(M,vt),e(vt,$r),e(vt,wt),e(wt,xr),e(vt,zr),e(M,Lr),e(M,B),v($t,B,null),e(B,qr),e(B,re),e(re,jr),e(re,so),e(so,Fr),e(re,Mr),e(re,Ro),e(Ro,Er),e(re,Cr),e(B,Or),v(ve,B,null),e(B,Pr),v(we,B,null),T(t,Cn,f),T(t,de,f),e(de,$e),e($e,Jo),v(xt,Jo,null),e(de,Gr),e(de,Vo),e(Vo,Sr),T(t,On,f),T(t,E,f),v(zt,E,null),e(E,Ar),e(E,Ko),e(Ko,Nr),e(E,Ir),e(E,Lt),e(Lt,Dr),e(Lt,qt),e(qt,Br),e(Lt,Wr),e(E,Ur),e(E,jt),e(jt,Hr),e(jt,ro),e(ro,Yr),e(jt,Rr),e(E,Jr),e(E,Ft),e(Ft,Vr),e(Ft,Mt),e(Mt,Kr),e(Ft,Zr),e(E,Qr),e(E,W),v(Et,W,null),e(W,Xr),e(W,ie),e(ie,ed),e(ie,io),e(io,td),e(ie,od),e(ie,Zo),e(Zo,nd),e(ie,ad),e(W,sd),v(xe,W,null),e(W,rd),v(ze,W,null),T(t,Pn,f),T(t,le,f),e(le,Le),e(Le,Qo),v(Ct,Qo,null),e(le,dd),e(le,Xo),e(Xo,id),T(t,Gn,f),T(t,A,f),v(Ot,A,null),e(A,ld),e(A,U),v(Pt,U,null),e(U,cd),e(U,ce),e(ce,pd),e(ce,en),e(en,ud),e(ce,hd),e(ce,tn),e(tn,md),e(ce,gd),e(U,fd),v(qe,U,null),e(U,_d),v(je,U,null),e(A,Td),e(A,Fe),v(Gt,Fe,null),e(Fe,bd),v(Me,Fe,null),e(A,kd),e(A,Ee),v(St,Ee,null),e(Ee,yd),v(Ce,Ee,null),T(t,Sn,f),T(t,pe,f),e(pe,Oe),e(Oe,on),v(At,on,null),e(pe,vd),e(pe,nn),e(nn,wd),T(t,An,f),T(t,N,f),v(Nt,N,null),e(N,$d),e(N,H),v(It,H,null),e(H,xd),e(H,ue),e(ue,zd),e(ue,an),e(an,Ld),e(ue,qd),e(ue,sn),e(sn,jd),e(ue,Fd),e(H,Md),v(Pe,H,null),e(H,Ed),v(Ge,H,null),e(N,Cd),e(N,Se),v(Dt,Se,null),e(Se,Od),v(Ae,Se,null),e(N,Pd),e(N,Ne),v(Bt,Ne,null),e(Ne,Gd),v(Ie,Ne,null),Nn=!0},p(t,[f]){const Wt={};f&2&&(Wt.$$scope={dirty:f,ctx:t}),be.$set(Wt);const rn={};f&2&&(rn.$$scope={dirty:f,ctx:t}),ke.$set(rn);const dn={};f&2&&(dn.$$scope={dirty:f,ctx:t}),ve.$set(dn);const ln={};f&2&&(ln.$$scope={dirty:f,ctx:t}),we.$set(ln);const Ut={};f&2&&(Ut.$$scope={dirty:f,ctx:t}),xe.$set(Ut);const cn={};f&2&&(cn.$$scope={dirty:f,ctx:t}),ze.$set(cn);const pn={};f&2&&(pn.$$scope={dirty:f,ctx:t}),qe.$set(pn);const un={};f&2&&(un.$$scope={dirty:f,ctx:t}),je.$set(un);const Ht={};f&2&&(Ht.$$scope={dirty:f,ctx:t}),Me.$set(Ht);const hn={};f&2&&(hn.$$scope={dirty:f,ctx:t}),Ce.$set(hn);const mn={};f&2&&(mn.$$scope={dirty:f,ctx:t}),Pe.$set(mn);const gn={};f&2&&(gn.$$scope={dirty:f,ctx:t}),Ge.$set(gn);const fn={};f&2&&(fn.$$scope={dirty:f,ctx:t}),Ae.$set(fn);const _n={};f&2&&(_n.$$scope={dirty:f,ctx:t}),Ie.$set(_n)},i(t){Nn||(w(n.$$.fragment,t),w(Ke.$$.fragment,t),w(ot.$$.fragment,t),w(st.$$.fragment,t),w(rt.$$.fragment,t),w(it.$$.fragment,t),w(lt.$$.fragment,t),w(gt.$$.fragment,t),w(be.$$.fragment,t),w(ke.$$.fragment,t),w(ft.$$.fragment,t),w(_t.$$.fragment,t),w($t.$$.fragment,t),w(ve.$$.fragment,t),w(we.$$.fragment,t),w(xt.$$.fragment,t),w(zt.$$.fragment,t),w(Et.$$.fragment,t),w(xe.$$.fragment,t),w(ze.$$.fragment,t),w(Ct.$$.fragment,t),w(Ot.$$.fragment,t),w(Pt.$$.fragment,t),w(qe.$$.fragment,t),w(je.$$.fragment,t),w(Gt.$$.fragment,t),w(Me.$$.fragment,t),w(St.$$.fragment,t),w(Ce.$$.fragment,t),w(At.$$.fragment,t),w(Nt.$$.fragment,t),w(It.$$.fragment,t),w(Pe.$$.fragment,t),w(Ge.$$.fragment,t),w(Dt.$$.fragment,t),w(Ae.$$.fragment,t),w(Bt.$$.fragment,t),w(Ie.$$.fragment,t),Nn=!0)},o(t){$(n.$$.fragment,t),$(Ke.$$.fragment,t),$(ot.$$.fragment,t),$(st.$$.fragment,t),$(rt.$$.fragment,t),$(it.$$.fragment,t),$(lt.$$.fragment,t),$(gt.$$.fragment,t),$(be.$$.fragment,t),$(ke.$$.fragment,t),$(ft.$$.fragment,t),$(_t.$$.fragment,t),$($t.$$.fragment,t),$(ve.$$.fragment,t),$(we.$$.fragment,t),$(xt.$$.fragment,t),$(zt.$$.fragment,t),$(Et.$$.fragment,t),$(xe.$$.fragment,t),$(ze.$$.fragment,t),$(Ct.$$.fragment,t),$(Ot.$$.fragment,t),$(Pt.$$.fragment,t),$(qe.$$.fragment,t),$(je.$$.fragment,t),$(Gt.$$.fragment,t),$(Me.$$.fragment,t),$(St.$$.fragment,t),$(Ce.$$.fragment,t),$(At.$$.fragment,t),$(Nt.$$.fragment,t),$(It.$$.fragment,t),$(Pe.$$.fragment,t),$(Ge.$$.fragment,t),$(Dt.$$.fragment,t),$(Ae.$$.fragment,t),$(Bt.$$.fragment,t),$(Ie.$$.fragment,t),Nn=!1},d(t){o(l),t&&o(b),t&&o(m),x(n),t&&o(bn),t&&o(ee),x(Ke),t&&o(kn),t&&o(fe),t&&o(yn),t&&o(Yt),t&&o(vn),t&&o(Rt),t&&o(wn),t&&o(Jt),t&&o($n),t&&o(C),t&&o(xn),x(ot,t),t&&o(zn),t&&o(Q),t&&o(Ln),t&&o(te),x(st),t&&o(qn),t&&o(K),x(rt),t&&o(jn),t&&o(ne),x(it),t&&o(Fn),t&&o(F),x(lt),x(gt),x(be),x(ke),t&&o(Mn),t&&o(se),x(ft),t&&o(En),t&&o(M),x(_t),x($t),x(ve),x(we),t&&o(Cn),t&&o(de),x(xt),t&&o(On),t&&o(E),x(zt),x(Et),x(xe),x(ze),t&&o(Pn),t&&o(le),x(Ct),t&&o(Gn),t&&o(A),x(Ot),x(Pt),x(qe),x(je),x(Gt),x(Me),x(St),x(Ce),t&&o(Sn),t&&o(pe),x(At),t&&o(An),t&&o(N),x(Nt),x(It),x(Pe),x(Ge),x(Dt),x(Ae),x(Bt),x(Ie)}}}const Tl={local:"longt5",sections:[{local:"overview",title:"Overview"},{local:"transformers.LongT5Config",title:"LongT5Config"},{local:"transformers.LongT5Model",title:"LongT5Model"},{local:"transformers.LongT5ForConditionalGeneration",title:"LongT5ForConditionalGeneration"},{local:"transformers.LongT5EncoderModel",title:"LongT5EncoderModel"},{local:"transformers.FlaxLongT5Model",title:"FlaxLongT5Model"},{local:"transformers.FlaxLongT5ForConditionalGeneration",title:"FlaxLongT5ForConditionalGeneration"}],title:"LongT5"};function bl(z){return ol(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class zl extends Qi{constructor(l){super();Xi(this,l,bl,_l,el,{})}}export{zl as default,Tl as metadata};
4
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/model_doc/opt.mdx-hf-doc-builder.js
import{S as Nl,i as Il,s as Sl,e as n,k as c,w as b,t as r,M as Dl,c as s,d as t,m as p,a,x as k,h as i,b as m,G as e,g,y as w,q as $,o as P,B as O,v as Wl,L as et}from"../../chunks/vendor-hf-doc-builder.js";import{T as rs}from"../../chunks/Tip-hf-doc-builder.js";import{D as ue}from"../../chunks/Docstring-hf-doc-builder.js";import{C as tt}from"../../chunks/CodeBlock-hf-doc-builder.js";import{I as it}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{E as Qe}from"../../chunks/ExampleCodeBlock-hf-doc-builder.js";function Gl(M){let d,T,h,f,y;return f=new tt({props:{code:`from transformers import OPTModel, OPTConfig # Initializing a OPT facebook/opt-large style configuration configuration = OPTConfig() # Initializing a model from the facebook/opt-large style configuration model = OPTModel(configuration) # Accessing the model configuration configuration = model.config`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> OPTModel, OPTConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a OPT facebook/opt-large style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = OPTConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the facebook/opt-large style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = OPTModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),{c(){d=n("p"),T=r("Example:"),h=c(),b(f.$$.fragment)},l(l){d=s(l,"P",{});var u=a(d);T=i(u,"Example:"),u.forEach(t),h=p(l),k(f.$$.fragment,l)},m(l,u){g(l,d,u),e(d,T),g(l,h,u),w(f,l,u),y=!0},p:et,i(l){y||($(f.$$.fragment,l),y=!0)},o(l){P(f.$$.fragment,l),y=!1},d(l){l&&t(d),l&&t(h),O(f,l)}}}function Bl(M){let d,T,h,f,y;return{c(){d=n("p"),T=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),h=n("code"),f=r("Module"),y=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(l){d=s(l,"P",{});var u=a(d);T=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),h=s(u,"CODE",{});var E=a(h);f=i(E,"Module"),E.forEach(t),y=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(l,u){g(l,d,u),e(d,T),e(d,h),e(h,f),e(d,y)},d(l){l&&t(d)}}}function Hl(M){let d,T,h,f,y;return f=new tt({props:{code:`from transformers import GPT2Tokenizer, OPTModel import torch tokenizer = GPT2Tokenizer.from_pretrained("facebook/opt-350m") model = OPTModel.from_pretrained("facebook/opt-350m") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, OPTModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/opt-350m&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = OPTModel.from_pretrained(<span class="hljs-string">&quot;facebook/opt-350m&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),{c(){d=n("p"),T=r("Example:"),h=c(),b(f.$$.fragment)},l(l){d=s(l,"P",{});var u=a(d);T=i(u,"Example:"),u.forEach(t),h=p(l),k(f.$$.fragment,l)},m(l,u){g(l,d,u),e(d,T),g(l,h,u),w(f,l,u),y=!0},p:et,i(l){y||($(f.$$.fragment,l),y=!0)},o(l){P(f.$$.fragment,l),y=!1},d(l){l&&t(d),l&&t(h),O(f,l)}}}function Ul(M){let d,T,h,f,y;return f=new tt({props:{code:`from transformers import GPT2Tokenizer, OPTForCausalLM model = OPTForCausalLM.from_pretrained("facebook/opt-350m") tokenizer = GPT2Tokenizer.from_pretrained("facebook/opt-350m") prompt = "Hey, are you consciours? Can you talk to me?" inputs = tokenizer(prompt, return_tensors="pt") # Generate generate_ids = model.generate(inputs.input_ids, max_length=30) tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, OPTForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>model = OPTForCausalLM.from_pretrained(<span class="hljs-string">&quot;facebook/opt-350m&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/opt-350m&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;Hey, are you consciours? Can you talk to me?&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(prompt, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Generate</span> <span class="hljs-meta">&gt;&gt;&gt; </span>generate_ids = model.generate(inputs.input_ids, max_length=<span class="hljs-number">30</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(generate_ids, skip_special_tokens=<span class="hljs-literal">True</span>, clean_up_tokenization_spaces=<span class="hljs-literal">False</span>)[<span class="hljs-number">0</span>] <span class="hljs-string">&quot;Hey, are you consciours? Can you talk to me?\\nI&#x27;m not consciours, but I can talk to you.&quot;</span>`}}),{c(){d=n("p"),T=r("Example:"),h=c(),b(f.$$.fragment)},l(l){d=s(l,"P",{});var u=a(d);T=i(u,"Example:"),u.forEach(t),h=p(l),k(f.$$.fragment,l)},m(l,u){g(l,d,u),e(d,T),g(l,h,u),w(f,l,u),y=!0},p:et,i(l){y||($(f.$$.fragment,l),y=!0)},o(l){P(f.$$.fragment,l),y=!1},d(l){l&&t(d),l&&t(h),O(f,l)}}}function Kl(M){let d,T,h,f,y,l,u,E,je,Oe,L,oe,se,F,Le,X,Ae,Fe,W,Ne,ae,re,Ie,Me,V,Se,xe,Z,Te,De,he,C,A,ze,G,ie,We,Y,Ge,Be,N,I,le,ye,He,de,ve,Ue,Q,be,ce,Ke,ne,S,Ve,B,j,Ze;return{c(){d=n("p"),T=r("TensorFlow models and layers in "),h=n("code"),f=r("transformers"),y=r(" accept two formats as input:"),l=c(),u=n("ul"),E=n("li"),je=r("having all inputs as keyword arguments (like PyTorch models), or"),Oe=c(),L=n("li"),oe=r("having all inputs as a list, tuple or dict in the first positional argument."),se=c(),F=n("p"),Le=r(`The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `),X=n("code"),Ae=r("model.fit()"),Fe=r(` things should \u201Cjust work\u201D for you - just pass your inputs and labels in any format that `),W=n("code"),Ne=r("model.fit()"),ae=r(` supports! If, however, you want to use the second format outside of Keras methods like `),re=n("code"),Ie=r("fit()"),Me=r(" and "),V=n("code"),Se=r("predict()"),xe=r(`, such as when creating your own layers or models with the Keras `),Z=n("code"),Te=r("Functional"),De=r(` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument:`),he=c(),C=n("ul"),A=n("li"),ze=r("a single Tensor with "),G=n("code"),ie=r("input_ids"),We=r(" only and nothing else: "),Y=n("code"),Ge=r("model(input_ids)"),Be=c(),N=n("li"),I=r(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),le=n("code"),ye=r("model([input_ids, attention_mask])"),He=r(" or "),de=n("code"),ve=r("model([input_ids, attention_mask, token_type_ids])"),Ue=c(),Q=n("li"),be=r(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),ce=n("code"),Ke=r('model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ne=c(),S=n("p"),Ve=r(`Note that when creating models and layers with `),B=n("a"),j=r("subclassing"),Ze=r(` then you don\u2019t need to worry about any of this, as you can just pass inputs like you would to any other Python function!`),this.h()},l(v){d=s(v,"P",{});var x=a(d);T=i(x,"TensorFlow models and layers in "),h=s(x,"CODE",{});var lt=a(h);f=i(lt,"transformers"),lt.forEach(t),y=i(x," accept two formats as input:"),x.forEach(t),l=p(v),u=s(v,"UL",{});var ee=a(u);E=s(ee,"LI",{});var dt=a(E);je=i(dt,"having all inputs as keyword arguments (like PyTorch models), or"),dt.forEach(t),Oe=p(ee),L=s(ee,"LI",{});var ct=a(L);oe=i(ct,"having all inputs as a list, tuple or dict in the first positional argument."),ct.forEach(t),ee.forEach(t),se=p(v),F=s(v,"P",{});var z=a(F);Le=i(z,`The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `),X=s(z,"CODE",{});var pt=a(X);Ae=i(pt,"model.fit()"),pt.forEach(t),Fe=i(z,` things should \u201Cjust work\u201D for you - just pass your inputs and labels in any format that `),W=s(z,"CODE",{});var ut=a(W);Ne=i(ut,"model.fit()"),ut.forEach(t),ae=i(z,` supports! If, however, you want to use the second format outside of Keras methods like `),re=s(z,"CODE",{});var ke=a(re);Ie=i(ke,"fit()"),ke.forEach(t),Me=i(z," and "),V=s(z,"CODE",{});var ht=a(V);Se=i(ht,"predict()"),ht.forEach(t),xe=i(z,`, such as when creating your own layers or models with the Keras `),Z=s(z,"CODE",{});var ft=a(Z);Te=i(ft,"Functional"),ft.forEach(t),De=i(z,` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument:`),z.forEach(t),he=p(v),C=s(v,"UL",{});var R=a(C);A=s(R,"LI",{});var D=a(A);ze=i(D,"a single Tensor with "),G=s(D,"CODE",{});var fe=a(G);ie=i(fe,"input_ids"),fe.forEach(t),We=i(D," only and nothing else: "),Y=s(D,"CODE",{});var Re=a(Y);Ge=i(Re,"model(input_ids)"),Re.forEach(t),D.forEach(t),Be=p(R),N=s(R,"LI",{});var H=a(N);I=i(H,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),le=s(H,"CODE",{});var mt=a(le);ye=i(mt,"model([input_ids, attention_mask])"),mt.forEach(t),He=i(H," or "),de=s(H,"CODE",{});var Je=a(de);ve=i(Je,"model([input_ids, attention_mask, token_type_ids])"),Je.forEach(t),H.forEach(t),Ue=p(R),Q=s(R,"LI",{});var Xe=a(Q);be=i(Xe,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),ce=s(Xe,"CODE",{});var ot=a(ce);Ke=i(ot,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ot.forEach(t),Xe.forEach(t),R.forEach(t),ne=p(v),S=s(v,"P",{});var q=a(S);Ve=i(q,`Note that when creating models and layers with `),B=s(q,"A",{href:!0,rel:!0});var we=a(B);j=i(we,"subclassing"),we.forEach(t),Ze=i(q,` then you don\u2019t need to worry about any of this, as you can just pass inputs like you would to any other Python function!`),q.forEach(t),this.h()},h(){m(B,"href","https://keras.io/guides/making_new_layers_and_models_via_subclassing/"),m(B,"rel","nofollow")},m(v,x){g(v,d,x),e(d,T),e(d,h),e(h,f),e(d,y),g(v,l,x),g(v,u,x),e(u,E),e(E,je),e(u,Oe),e(u,L),e(L,oe),g(v,se,x),g(v,F,x),e(F,Le),e(F,X),e(X,Ae),e(F,Fe),e(F,W),e(W,Ne),e(F,ae),e(F,re),e(re,Ie),e(F,Me),e(F,V),e(V,Se),e(F,xe),e(F,Z),e(Z,Te),e(F,De),g(v,he,x),g(v,C,x),e(C,A),e(A,ze),e(A,G),e(G,ie),e(A,We),e(A,Y),e(Y,Ge),e(C,Be),e(C,N),e(N,I),e(N,le),e(le,ye),e(N,He),e(N,de),e(de,ve),e(C,Ue),e(C,Q),e(Q,be),e(Q,ce),e(ce,Ke),g(v,ne,x),g(v,S,x),e(S,Ve),e(S,B),e(B,j),e(S,Ze)},d(v){v&&t(d),v&&t(l),v&&t(u),v&&t(se),v&&t(F),v&&t(he),v&&t(C),v&&t(ne),v&&t(S)}}}function Vl(M){let d,T,h,f,y;return{c(){d=n("p"),T=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),h=n("code"),f=r("Module"),y=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(l){d=s(l,"P",{});var u=a(d);T=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),h=s(u,"CODE",{});var E=a(h);f=i(E,"Module"),E.forEach(t),y=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(l,u){g(l,d,u),e(d,T),e(d,h),e(h,f),e(d,y)},d(l){l&&t(d)}}}function Zl(M){let d,T,h,f,y;return f=new tt({props:{code:`from transformers import GPT2Tokenizer, TFOPTModel import tensorflow as tf tokenizer = GPT2Tokenizer.from_pretrained("facebook/opt-350m") model = TFOPTModel.from_pretrained("facebook/opt-350m") inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, TFOPTModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/opt-350m&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFOPTModel.from_pretrained(<span class="hljs-string">&quot;facebook/opt-350m&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),{c(){d=n("p"),T=r("Example:"),h=c(),b(f.$$.fragment)},l(l){d=s(l,"P",{});var u=a(d);T=i(u,"Example:"),u.forEach(t),h=p(l),k(f.$$.fragment,l)},m(l,u){g(l,d,u),e(d,T),g(l,h,u),w(f,l,u),y=!0},p:et,i(l){y||($(f.$$.fragment,l),y=!0)},o(l){P(f.$$.fragment,l),y=!1},d(l){l&&t(d),l&&t(h),O(f,l)}}}function Rl(M){let d,T,h,f,y,l,u,E,je,Oe,L,oe,se,F,Le,X,Ae,Fe,W,Ne,ae,re,Ie,Me,V,Se,xe,Z,Te,De,he,C,A,ze,G,ie,We,Y,Ge,Be,N,I,le,ye,He,de,ve,Ue,Q,be,ce,Ke,ne,S,Ve,B,j,Ze;return{c(){d=n("p"),T=r("TensorFlow models and layers in "),h=n("code"),f=r("transformers"),y=r(" accept two formats as input:"),l=c(),u=n("ul"),E=n("li"),je=r("having all inputs as keyword arguments (like PyTorch models), or"),Oe=c(),L=n("li"),oe=r("having all inputs as a list, tuple or dict in the first positional argument."),se=c(),F=n("p"),Le=r(`The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `),X=n("code"),Ae=r("model.fit()"),Fe=r(` things should \u201Cjust work\u201D for you - just pass your inputs and labels in any format that `),W=n("code"),Ne=r("model.fit()"),ae=r(` supports! If, however, you want to use the second format outside of Keras methods like `),re=n("code"),Ie=r("fit()"),Me=r(" and "),V=n("code"),Se=r("predict()"),xe=r(`, such as when creating your own layers or models with the Keras `),Z=n("code"),Te=r("Functional"),De=r(` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument:`),he=c(),C=n("ul"),A=n("li"),ze=r("a single Tensor with "),G=n("code"),ie=r("input_ids"),We=r(" only and nothing else: "),Y=n("code"),Ge=r("model(input_ids)"),Be=c(),N=n("li"),I=r(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),le=n("code"),ye=r("model([input_ids, attention_mask])"),He=r(" or "),de=n("code"),ve=r("model([input_ids, attention_mask, token_type_ids])"),Ue=c(),Q=n("li"),be=r(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),ce=n("code"),Ke=r('model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ne=c(),S=n("p"),Ve=r(`Note that when creating models and layers with `),B=n("a"),j=r("subclassing"),Ze=r(` then you don\u2019t need to worry about any of this, as you can just pass inputs like you would to any other Python function!`),this.h()},l(v){d=s(v,"P",{});var x=a(d);T=i(x,"TensorFlow models and layers in "),h=s(x,"CODE",{});var lt=a(h);f=i(lt,"transformers"),lt.forEach(t),y=i(x," accept two formats as input:"),x.forEach(t),l=p(v),u=s(v,"UL",{});var ee=a(u);E=s(ee,"LI",{});var dt=a(E);je=i(dt,"having all inputs as keyword arguments (like PyTorch models), or"),dt.forEach(t),Oe=p(ee),L=s(ee,"LI",{});var ct=a(L);oe=i(ct,"having all inputs as a list, tuple or dict in the first positional argument."),ct.forEach(t),ee.forEach(t),se=p(v),F=s(v,"P",{});var z=a(F);Le=i(z,`The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `),X=s(z,"CODE",{});var pt=a(X);Ae=i(pt,"model.fit()"),pt.forEach(t),Fe=i(z,` things should \u201Cjust work\u201D for you - just pass your inputs and labels in any format that `),W=s(z,"CODE",{});var ut=a(W);Ne=i(ut,"model.fit()"),ut.forEach(t),ae=i(z,` supports! If, however, you want to use the second format outside of Keras methods like `),re=s(z,"CODE",{});var ke=a(re);Ie=i(ke,"fit()"),ke.forEach(t),Me=i(z," and "),V=s(z,"CODE",{});var ht=a(V);Se=i(ht,"predict()"),ht.forEach(t),xe=i(z,`, such as when creating your own layers or models with the Keras `),Z=s(z,"CODE",{});var ft=a(Z);Te=i(ft,"Functional"),ft.forEach(t),De=i(z,` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument:`),z.forEach(t),he=p(v),C=s(v,"UL",{});var R=a(C);A=s(R,"LI",{});var D=a(A);ze=i(D,"a single Tensor with "),G=s(D,"CODE",{});var fe=a(G);ie=i(fe,"input_ids"),fe.forEach(t),We=i(D," only and nothing else: "),Y=s(D,"CODE",{});var Re=a(Y);Ge=i(Re,"model(input_ids)"),Re.forEach(t),D.forEach(t),Be=p(R),N=s(R,"LI",{});var H=a(N);I=i(H,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),le=s(H,"CODE",{});var mt=a(le);ye=i(mt,"model([input_ids, attention_mask])"),mt.forEach(t),He=i(H," or "),de=s(H,"CODE",{});var Je=a(de);ve=i(Je,"model([input_ids, attention_mask, token_type_ids])"),Je.forEach(t),H.forEach(t),Ue=p(R),Q=s(R,"LI",{});var Xe=a(Q);be=i(Xe,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),ce=s(Xe,"CODE",{});var ot=a(ce);Ke=i(ot,'model({"input_ids": input_ids, "token_type_ids": token_type_ids})'),ot.forEach(t),Xe.forEach(t),R.forEach(t),ne=p(v),S=s(v,"P",{});var q=a(S);Ve=i(q,`Note that when creating models and layers with `),B=s(q,"A",{href:!0,rel:!0});var we=a(B);j=i(we,"subclassing"),we.forEach(t),Ze=i(q,` then you don\u2019t need to worry about any of this, as you can just pass inputs like you would to any other Python function!`),q.forEach(t),this.h()},h(){m(B,"href","https://keras.io/guides/making_new_layers_and_models_via_subclassing/"),m(B,"rel","nofollow")},m(v,x){g(v,d,x),e(d,T),e(d,h),e(h,f),e(d,y),g(v,l,x),g(v,u,x),e(u,E),e(E,je),e(u,Oe),e(u,L),e(L,oe),g(v,se,x),g(v,F,x),e(F,Le),e(F,X),e(X,Ae),e(F,Fe),e(F,W),e(W,Ne),e(F,ae),e(F,re),e(re,Ie),e(F,Me),e(F,V),e(V,Se),e(F,xe),e(F,Z),e(Z,Te),e(F,De),g(v,he,x),g(v,C,x),e(C,A),e(A,ze),e(A,G),e(G,ie),e(A,We),e(A,Y),e(Y,Ge),e(C,Be),e(C,N),e(N,I),e(N,le),e(le,ye),e(N,He),e(N,de),e(de,ve),e(C,Ue),e(C,Q),e(Q,be),e(Q,ce),e(ce,Ke),g(v,ne,x),g(v,S,x),e(S,Ve),e(S,B),e(B,j),e(S,Ze)},d(v){v&&t(d),v&&t(l),v&&t(u),v&&t(se),v&&t(F),v&&t(he),v&&t(C),v&&t(ne),v&&t(S)}}}function Jl(M){let d,T,h,f,y;return f=new tt({props:{code:`from transformers import GPT2Tokenizer, TFOPTForCausalLM import tensorflow as tf tokenizer = GPT2Tokenizer.from_pretrained("facebook/opt-350m") model = TFOPTForCausalLM.from_pretrained("facebook/opt-350m") inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, TFOPTForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/opt-350m&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFOPTForCausalLM.from_pretrained(<span class="hljs-string">&quot;facebook/opt-350m&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),{c(){d=n("p"),T=r("Example:"),h=c(),b(f.$$.fragment)},l(l){d=s(l,"P",{});var u=a(d);T=i(u,"Example:"),u.forEach(t),h=p(l),k(f.$$.fragment,l)},m(l,u){g(l,d,u),e(d,T),g(l,h,u),w(f,l,u),y=!0},p:et,i(l){y||($(f.$$.fragment,l),y=!0)},o(l){P(f.$$.fragment,l),y=!1},d(l){l&&t(d),l&&t(h),O(f,l)}}}function Xl(M){let d,T,h,f,y;return{c(){d=n("p"),T=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),h=n("code"),f=r("Module"),y=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(l){d=s(l,"P",{});var u=a(d);T=i(u,"Although the recipe for forward pass needs to be defined within this function, one should call the "),h=s(u,"CODE",{});var E=a(h);f=i(E,"Module"),E.forEach(t),y=i(u,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),u.forEach(t)},m(l,u){g(l,d,u),e(d,T),e(d,h),e(h,f),e(d,y)},d(l){l&&t(d)}}}function Yl(M){let d,T,h,f,y;return f=new tt({props:{code:`import torch from transformers import GPT2Tokenizer, OPTForSequenceClassification tokenizer = GPT2Tokenizer.from_pretrained("ArthurZ/opt-350m-dummy-sc") model = OPTForSequenceClassification.from_pretrained("ArthurZ/opt-350m-dummy-sc") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") with torch.no_grad(): logits = model(**inputs).logits predicted_class_id = logits.argmax().item() model.config.id2label[predicted_class_id]`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, OPTForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&quot;ArthurZ/opt-350m-dummy-sc&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = OPTForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;ArthurZ/opt-350m-dummy-sc&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_id = logits.argmax().item() <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.id2label[predicted_class_id] <span class="hljs-string">&#x27;LABEL_0&#x27;</span>`}}),{c(){d=n("p"),T=r("Example of single-label classification:"),h=c(),b(f.$$.fragment)},l(l){d=s(l,"P",{});var u=a(d);T=i(u,"Example of single-label classification:"),u.forEach(t),h=p(l),k(f.$$.fragment,l)},m(l,u){g(l,d,u),e(d,T),g(l,h,u),w(f,l,u),y=!0},p:et,i(l){y||($(f.$$.fragment,l),y=!0)},o(l){P(f.$$.fragment,l),y=!1},d(l){l&&t(d),l&&t(h),O(f,l)}}}function Ql(M){let d,T;return d=new tt({props:{code:'# To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)`\nnum_labels = len(model.config.id2label)\nmodel = OPTForSequenceClassification.from_pretrained("ArthurZ/opt-350m-dummy-sc", num_labels=num_labels)\n\nlabels = torch.tensor([1])\nloss = model(**inputs, labels=labels).loss\nround(loss.item(), 2)',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># To train a model on \`num_labels\` classes, you can pass \`num_labels=num_labels\` to \`.from_pretrained(...)\`</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_labels = <span class="hljs-built_in">len</span>(model.config.id2label) <span class="hljs-meta">&gt;&gt;&gt; </span>model = OPTForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;ArthurZ/opt-350m-dummy-sc&quot;</span>, num_labels=num_labels) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(**inputs, labels=labels).loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(loss.item(), <span class="hljs-number">2</span>) <span class="hljs-number">1.71</span>`}}),{c(){b(d.$$.fragment)},l(h){k(d.$$.fragment,h)},m(h,f){w(d,h,f),T=!0},p:et,i(h){T||($(d.$$.fragment,h),T=!0)},o(h){P(d.$$.fragment,h),T=!1},d(h){O(d,h)}}}function ed(M){let d,T,h,f,y;return f=new tt({props:{code:`import torch from transformers import GPT2Tokenizer, OPTForSequenceClassification tokenizer = GPT2Tokenizer.from_pretrained("ArthurZ/opt-350m-dummy-sc") model = OPTForSequenceClassification.from_pretrained("ArthurZ/opt-350m-dummy-sc", problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") with torch.no_grad(): logits = model(**inputs).logits predicted_class_id = logits.argmax().item() model.config.id2label[predicted_class_id]`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, OPTForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&quot;ArthurZ/opt-350m-dummy-sc&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = OPTForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;ArthurZ/opt-350m-dummy-sc&quot;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_id = logits.argmax().item() <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.id2label[predicted_class_id] <span class="hljs-string">&#x27;LABEL_0&#x27;</span>`}}),{c(){d=n("p"),T=r("Example of multi-label classification:"),h=c(),b(f.$$.fragment)},l(l){d=s(l,"P",{});var u=a(d);T=i(u,"Example of multi-label classification:"),u.forEach(t),h=p(l),k(f.$$.fragment,l)},m(l,u){g(l,d,u),e(d,T),g(l,h,u),w(f,l,u),y=!0},p:et,i(l){y||($(f.$$.fragment,l),y=!0)},o(l){P(f.$$.fragment,l),y=!1},d(l){l&&t(d),l&&t(h),O(f,l)}}}function td(M){let d,T;return d=new tt({props:{code:`# To train a model on \`num_labels\` classes, you can pass \`num_labels=num_labels\` to \`.from_pretrained(...)\` num_labels = len(model.config.id2label) model = OPTForSequenceClassification.from_pretrained( "ArthurZ/opt-350m-dummy-sc", num_labels=num_labels, problem_type="multi_label_classification" ) labels = torch.nn.functional.one_hot(torch.tensor([predicted_class_id]), num_classes=num_labels).to( torch.float ) loss = model(**inputs, labels=labels).loss loss.backward()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># To train a model on \`num_labels\` classes, you can pass \`num_labels=num_labels\` to \`.from_pretrained(...)\`</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_labels = <span class="hljs-built_in">len</span>(model.config.id2label) <span class="hljs-meta">&gt;&gt;&gt; </span>model = OPTForSequenceClassification.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;ArthurZ/opt-350m-dummy-sc&quot;</span>, num_labels=num_labels, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.nn.functional.one_hot(torch.tensor([predicted_class_id]), num_classes=num_labels).to( <span class="hljs-meta">... </span> torch.<span class="hljs-built_in">float</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(**inputs, labels=labels).loss <span class="hljs-meta">&gt;&gt;&gt; </span>loss.backward()`}}),{c(){b(d.$$.fragment)},l(h){k(d.$$.fragment,h)},m(h,f){w(d,h,f),T=!0},p:et,i(h){T||($(d.$$.fragment,h),T=!0)},o(h){P(d.$$.fragment,h),T=!1},d(h){O(d,h)}}}function od(M){let d,T,h,f,y;return f=new tt({props:{code:`from transformers import GPT2Tokenizer, FlaxOPTModel tokenizer = GPT2Tokenizer.from_pretrained("facebook/opt-350m") model = FlaxOPTModel.from_pretrained("facebook/opt-350m") inputs = tokenizer("Hello, my dog is cute", return_tensors="jax") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, FlaxOPTModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/opt-350m&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxOPTModel.from_pretrained(<span class="hljs-string">&quot;facebook/opt-350m&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;jax&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),{c(){d=n("p"),T=r("Example:"),h=c(),b(f.$$.fragment)},l(l){d=s(l,"P",{});var u=a(d);T=i(u,"Example:"),u.forEach(t),h=p(l),k(f.$$.fragment,l)},m(l,u){g(l,d,u),e(d,T),g(l,h,u),w(f,l,u),y=!0},p:et,i(l){y||($(f.$$.fragment,l),y=!0)},o(l){P(f.$$.fragment,l),y=!1},d(l){l&&t(d),l&&t(h),O(f,l)}}}function nd(M){let d,T,h,f,y;return f=new tt({props:{code:`from transformers import GPT2Tokenizer, FlaxOPTForCausalLM tokenizer = GPT2Tokenizer.from_pretrained("facebook/opt-350m") model = FlaxOPTForCausalLM.from_pretrained("facebook/opt-350m") inputs = tokenizer("Hello, my dog is cute", return_tensors="np") outputs = model(**inputs) # retrieve logts for next token next_token_logits = outputs.logits[:, -1]`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, FlaxOPTForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/opt-350m&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxOPTForCausalLM.from_pretrained(<span class="hljs-string">&quot;facebook/opt-350m&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># retrieve logts for next token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>next_token_logits = outputs.logits[:, -<span class="hljs-number">1</span>]`}}),{c(){d=n("p"),T=r("Example:"),h=c(),b(f.$$.fragment)},l(l){d=s(l,"P",{});var u=a(d);T=i(u,"Example:"),u.forEach(t),h=p(l),k(f.$$.fragment,l)},m(l,u){g(l,d,u),e(d,T),g(l,h,u),w(f,l,u),y=!0},p:et,i(l){y||($(f.$$.fragment,l),y=!0)},o(l){P(f.$$.fragment,l),y=!1},d(l){l&&t(d),l&&t(h),O(f,l)}}}function sd(M){let d,T,h,f,y,l,u,E,je,Oe,L,oe,se,F,Le,X,Ae,Fe,W,Ne,ae,re,Ie,Me,V,Se,xe,Z,Te,De,he,C,A,ze,G,ie,We,Y,Ge,Be,N,I,le,ye,He,de,ve,Ue,Q,be,ce,Ke,ne,S,Ve,B,j,Ze,v,x,lt,ee,dt,ct,z,pt,ut,ke,ht,ft,R,D,fe,Re,H,mt,Je,Xe,ot,q,we,Ks,gt,Vs,tn,Zs,Rs,lo,Js,Xs,Ys,_t,Qs,on,ea,ta,nn,oa,na,sa,zt,is,Tt,Ct,vn,co,aa,bn,ra,ls,$e,po,ia,uo,la,sn,da,ca,pa,ho,ua,fo,ha,fa,ma,Ce,mo,ga,yt,_a,an,Ta,ya,kn,va,ba,ka,Et,wa,qt,ds,vt,jt,wn,go,$a,$n,Pa,cs,bt,_o,Oa,Lt,To,Fa,At,ps,kt,Nt,Pn,yo,Ma,On,xa,us,pe,vo,za,bo,Ca,rn,Ea,qa,ja,ko,La,wo,Aa,Na,Ia,It,Sa,Ee,$o,Da,wt,Wa,ln,Ga,Ba,Fn,Ha,Ua,Ka,St,Va,Dt,hs,$t,Wt,Mn,Po,Za,xn,Ra,fs,te,Oo,Ja,zn,Xa,Ya,Fo,Qa,dn,er,tr,or,Mo,nr,xo,sr,ar,rr,Gt,ir,Bt,zo,lr,Ht,ms,Pt,Ut,Cn,Co,dr,En,cr,gs,U,Eo,pr,qn,ur,hr,cn,pn,fr,mr,gr,Pe,_r,jn,Tr,yr,Ln,vr,br,An,kr,wr,Nn,$r,Pr,Or,qo,Fr,un,Mr,xr,zr,jo,Cr,Lo,Er,qr,jr,J,Ao,Lr,Ot,Ar,hn,Nr,Ir,In,Sr,Dr,Wr,Kt,Gr,Vt,Br,Zt,Hr,Rt,Ur,Jt,_s,Ft,Xt,Sn,No,Kr,Dn,Vr,Ts,Mt,Io,Zr,Yt,So,Rr,Qt,ys,xt,eo,Wn,Do,Jr,Gn,Xr,vs,K,Wo,Yr,Bn,Qr,ei,Go,ti,fn,oi,ni,si,Bo,ai,Ho,ri,ii,li,Hn,di,ci,Ye,Un,Uo,pi,ui,Kn,Ko,hi,fi,Vn,Vo,mi,gi,Zn,Zo,_i,Ti,to,Ro,yi,oo,bs;return l=new it({}),F=new it({}),H=new it({}),we=new ue({props:{name:"class transformers.OPTConfig",anchor:"transformers.OPTConfig",parameters:[{name:"vocab_size",val:" = 50272"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"ffn_dim",val:" = 3072"},{name:"max_position_embeddings",val:" = 2048"},{name:"do_layer_norm_before",val:" = True"},{name:"_remove_final_layer_norm",val:" = False"},{name:"word_embed_proj_dim",val:" = None"},{name:"dropout",val:" = 0.1"},{name:"attention_dropout",val:" = 0.0"},{name:"num_attention_heads",val:" = 12"},{name:"activation_function",val:" = 'relu'"},{name:"layerdrop",val:" = 0.0"},{name:"init_std",val:" = 0.02"},{name:"use_cache",val:" = True"},{name:"pad_token_id",val:" = 1"},{name:"bos_token_id",val:" = 2"},{name:"eos_token_id",val:" = 2"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.OPTConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 50272) &#x2014; Vocabulary size of the OPT model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_19429/en/model_doc/opt#transformers.OPTModel">OPTModel</a>`,name:"vocab_size"},{anchor:"transformers.OPTConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.OPTConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of decoder layers.`,name:"num_hidden_layers"},{anchor:"transformers.OPTConfig.ffn_dim",description:`<strong>ffn_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in decoder.`,name:"ffn_dim"},{anchor:"transformers.OPTConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer decoder.`,name:"num_attention_heads"},{anchor:"transformers.OPTConfig.activation_function",description:`<strong>activation_function</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;relu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"activation_function"},{anchor:"transformers.OPTConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 2048) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.OPTConfig.do_layer_norm_before",description:`<strong>do_layer_norm_before</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to perform layer normalization before the attention block.`,name:"do_layer_norm_before"},{anchor:"transformers.OPTConfig.word_embed_proj_dim",description:`<strong>word_embed_proj_dim</strong> (<code>int</code>, <em>optional</em>) &#x2014; <code>word_embed_proj_dim</code> can be set to down-project word embeddings, <em>e.g.</em> <code>opt-350m</code>. Defaults to <code>hidden_size</code>.`,name:"word_embed_proj_dim"},{anchor:"transformers.OPTConfig.dropout",description:`<strong>dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"dropout"},{anchor:"transformers.OPTConfig.attention_dropout",description:`<strong>attention_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for the attention probabilities. layerdrop &#x2014; (<code>float</code>, <em>optional</em>, defaults to 0.0): The LayerDrop probability. See the [LayerDrop paper](see <a href="https://arxiv.org/abs/1909.11556" rel="nofollow">https://arxiv.org/abs/1909.11556</a>) for more details.`,name:"attention_dropout"},{anchor:"transformers.OPTConfig.init_std",description:`<strong>init_std</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"init_std"},{anchor:"transformers.OPTConfig.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return the last key/values attentions (not used by all models).`,name:"use_cache"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/opt/configuration_opt.py#L32"}}),zt=new Qe({props:{anchor:"transformers.OPTConfig.example",$$slots:{default:[Gl]},$$scope:{ctx:M}}}),co=new it({}),po=new ue({props:{name:"class transformers.OPTModel",anchor:"transformers.OPTModel",parameters:[{name:"config",val:": OPTConfig"}],parametersDescription:[{anchor:"transformers.OPTModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/model_doc/opt#transformers.OPTConfig">OPTConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/opt/modeling_opt.py#L731"}}),mo=new ue({props:{name:"forward",anchor:"transformers.OPTModel.forward",parameters:[{name:"input_ids",val:": LongTensor = None"},{name:"attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"head_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"past_key_values",val:": typing.Optional[typing.List[torch.FloatTensor]] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"use_cache",val:": typing.Optional[bool] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],parametersDescription:[{anchor:"transformers.OPTModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/gpt2#transformers.GPT2Tokenizer">GPT2Tokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.OPTModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <p>Indices can be obtained using <code>OPTTokenizer</code>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p>If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>If you want to change padding behavior, you should read <code>modeling_opt._prepare_decoder_attention_mask</code> and modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"attention_mask"},{anchor:"transformers.OPTModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.OPTModel.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.OPTModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.OPTModel.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.OPTModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.OPTModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.OPTModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/opt/modeling_opt.py#L747",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPast" >transformers.modeling_outputs.BaseModelOutputWithPast</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/opt#transformers.OPTConfig" >OPTConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPast" >transformers.modeling_outputs.BaseModelOutputWithPast</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Et=new rs({props:{$$slots:{default:[Bl]},$$scope:{ctx:M}}}),qt=new Qe({props:{anchor:"transformers.OPTModel.forward.example",$$slots:{default:[Hl]},$$scope:{ctx:M}}}),go=new it({}),_o=new ue({props:{name:"class transformers.OPTForCausalLM",anchor:"transformers.OPTForCausalLM",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/opt/modeling_opt.py#L799"}}),To=new ue({props:{name:"forward",anchor:"transformers.OPTForCausalLM.forward",parameters:[{name:"input_ids",val:": LongTensor = None"},{name:"attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"head_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"past_key_values",val:": typing.Optional[typing.List[torch.FloatTensor]] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"labels",val:": typing.Optional[torch.LongTensor] = None"},{name:"use_cache",val:": typing.Optional[bool] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],parametersDescription:[{anchor:"transformers.OPTForCausalLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <code>OPTTokenizer</code>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.OPTForCausalLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.OPTForCausalLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(num_hidden_layers, num_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.OPTForCausalLM.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.OPTForCausalLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.OPTForCausalLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"},{anchor:"transformers.OPTForCausalLM.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.OPTForCausalLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.OPTForCausalLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.OPTForCausalLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/opt/modeling_opt.py#L830",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithPast" >transformers.modeling_outputs.CausalLMOutputWithPast</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/opt#transformers.OPTConfig" >OPTConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>)</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.CausalLMOutputWithPast" >transformers.modeling_outputs.CausalLMOutputWithPast</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),At=new Qe({props:{anchor:"transformers.OPTForCausalLM.forward.example",$$slots:{default:[Ul]},$$scope:{ctx:M}}}),yo=new it({}),vo=new ue({props:{name:"class transformers.TFOPTModel",anchor:"transformers.TFOPTModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.TFOPTModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/model_doc/opt#transformers.OPTConfig">OPTConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/opt/modeling_tf_opt.py#L785"}}),It=new rs({props:{$$slots:{default:[Kl]},$$scope:{ctx:M}}}),$o=new ue({props:{name:"call",anchor:"transformers.TFOPTModel.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"past_key_values",val:": typing.Union[typing.Tuple[typing.Tuple[typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor]]], NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"use_cache",val:": typing.Optional[bool] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.TFOPTModel.call.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>({0})</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFOPTModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFOPTModel.call.head_mask",description:`<strong>head_mask</strong> (<code>tf.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFOPTModel.call.past_key_values",description:`<strong>past_key_values</strong> (<code>Tuple[Tuple[tf.Tensor]]</code> of length <code>config.n_layers</code>) &#x2014; contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.TFOPTModel.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>). Set to <code>False</code> during training, <code>True</code> during generation`,name:"use_cache"},{anchor:"transformers.TFOPTModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFOPTModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFOPTModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFOPTModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/opt/modeling_tf_opt.py#L799",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPast" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPast</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/opt#transformers.OPTConfig" >OPTConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutputWithPast" >transformers.modeling_tf_outputs.TFBaseModelOutputWithPast</a> or <code>tuple(tf.Tensor)</code></p> `}}),St=new rs({props:{$$slots:{default:[Vl]},$$scope:{ctx:M}}}),Dt=new Qe({props:{anchor:"transformers.TFOPTModel.call.example",$$slots:{default:[Zl]},$$scope:{ctx:M}}}),Po=new it({}),Oo=new ue({props:{name:"class transformers.TFOPTForCausalLM",anchor:"transformers.TFOPTForCausalLM",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.TFOPTForCausalLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/model_doc/opt#transformers.OPTConfig">OPTConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/opt/modeling_tf_opt.py#L873"}}),Gt=new rs({props:{$$slots:{default:[Rl]},$$scope:{ctx:M}}}),zo=new ue({props:{name:"call",anchor:"transformers.TFOPTForCausalLM.call",parameters:[{name:"input_ids",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"past_key_values",val:": typing.Union[typing.Tuple[typing.Tuple[typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor]]], NoneType] = None"},{name:"attention_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"position_ids",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"inputs_embeds",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"labels",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"use_cache",val:": typing.Optional[bool] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"training",val:": typing.Optional[bool] = False"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.TFOPTForCausalLM.call.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <code>OPTTokenizer</code>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFOPTForCausalLM.call.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFOPTForCausalLM.call.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(num_hidden_layers, num_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFOPTForCausalLM.call.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.TFOPTForCausalLM.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFOPTForCausalLM.call.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should either be in <code>[0, ..., config.vocab_size]</code> or -100 (see <code>input_ids</code> docstring). Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>.`,name:"labels"},{anchor:"transformers.TFOPTForCausalLM.call.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.TFOPTForCausalLM.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.TFOPTForCausalLM.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.TFOPTForCausalLM.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/opt/modeling_tf_opt.py#L898",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutputWithPast" >transformers.modeling_tf_outputs.TFCausalLMOutputWithPast</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/opt#transformers.OPTConfig" >OPTConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutputWithPast" >transformers.modeling_tf_outputs.TFCausalLMOutputWithPast</a> or <code>tuple(tf.Tensor)</code>: A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutputWithPast" >transformers.modeling_tf_outputs.TFCausalLMOutputWithPast</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/opt#transformers.OPTConfig" >OPTConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutputWithPast" >transformers.modeling_tf_outputs.TFCausalLMOutputWithPast</a> or <code>tuple(tf.Tensor)</code></p> `}}),Ht=new Qe({props:{anchor:"transformers.TFOPTForCausalLM.call.example",$$slots:{default:[Jl]},$$scope:{ctx:M}}}),Co=new it({}),Eo=new ue({props:{name:"class transformers.OPTForSequenceClassification",anchor:"transformers.OPTForSequenceClassification",parameters:[{name:"config",val:": OPTConfig"}],parametersDescription:[{anchor:"transformers.OPTForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/model_doc/opt#transformers.OPTConfig">OPTConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/opt/modeling_opt.py#L998"}}),Ao=new ue({props:{name:"forward",anchor:"transformers.OPTForSequenceClassification.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"head_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"past_key_values",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.Tensor]]] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"labels",val:": typing.Optional[torch.LongTensor] = None"},{name:"use_cache",val:": typing.Optional[bool] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],parametersDescription:[{anchor:"transformers.OPTForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/gpt2#transformers.GPT2Tokenizer">GPT2Tokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.OPTForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <p>Indices can be obtained using <code>OPTTokenizer</code>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p>If <code>past_key_values</code> is used, optionally only the last <code>decoder_input_ids</code> have to be input (see <code>past_key_values</code>).</p> <p>If you want to change padding behavior, you should read <code>modeling_opt._prepare_decoder_attention_mask</code> and modify to your needs. See diagram 1 in <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">the paper</a> for more information on the default strategy.`,name:"attention_mask"},{anchor:"transformers.OPTForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> of shape <code>(encoder_layers, encoder_attention_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.OPTForSequenceClassification.forward.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> <p>If <code>past_key_values</code> are used, the user can optionally input only the last <code>decoder_input_ids</code> (those that don&#x2019;t have their past key value states given to this model) of shape <code>(batch_size, 1)</code> instead of all <code>decoder_input_ids</code> of shape <code>(batch_size, sequence_length)</code>.`,name:"past_key_values"},{anchor:"transformers.OPTForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.OPTForSequenceClassification.forward.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If set to <code>True</code>, <code>past_key_values</code> key value states are returned and can be used to speed up decoding (see <code>past_key_values</code>).`,name:"use_cache"},{anchor:"transformers.OPTForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.OPTForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.OPTForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.OPTForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/opt/modeling_opt.py#L1010",returnDescription:` <p>A <code>transformers.modeling_outputs.SequenceClassifierOutputWithPast</code> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/opt#transformers.OPTConfig" >OPTConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>)</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><code>transformers.modeling_outputs.SequenceClassifierOutputWithPast</code> or <code>tuple(torch.FloatTensor)</code></p> `}}),Kt=new rs({props:{$$slots:{default:[Xl]},$$scope:{ctx:M}}}),Vt=new Qe({props:{anchor:"transformers.OPTForSequenceClassification.forward.example",$$slots:{default:[Yl]},$$scope:{ctx:M}}}),Zt=new Qe({props:{anchor:"transformers.OPTForSequenceClassification.forward.example-2",$$slots:{default:[Ql]},$$scope:{ctx:M}}}),Rt=new Qe({props:{anchor:"transformers.OPTForSequenceClassification.forward.example-3",$$slots:{default:[ed]},$$scope:{ctx:M}}}),Jt=new Qe({props:{anchor:"transformers.OPTForSequenceClassification.forward.example-4",$$slots:{default:[td]},$$scope:{ctx:M}}}),No=new it({}),Io=new ue({props:{name:"class transformers.FlaxOPTModel",anchor:"transformers.FlaxOPTModel",parameters:[{name:"config",val:": OPTConfig"},{name:"input_shape",val:": typing.Tuple[int] = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax.numpy.float32'>"},{name:"_do_init",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/opt/modeling_flax_opt.py#L691"}}),So=new ue({props:{name:"__call__",anchor:"transformers.FlaxOPTModel.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.ndarray.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.ndarray.ndarray] = None"},{name:"params",val:": dict = None"},{name:"past_key_values",val:": dict = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"deterministic",val:": bool = True"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/opt/modeling_flax_opt.py#L583",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/opt#transformers.OPTConfig" >OPTConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Qt=new Qe({props:{anchor:"transformers.FlaxOPTModel.__call__.example",$$slots:{default:[od]},$$scope:{ctx:M}}}),Do=new it({}),Wo=new ue({props:{name:"class transformers.FlaxOPTForCausalLM",anchor:"transformers.FlaxOPTForCausalLM",parameters:[{name:"config",val:": OPTConfig"},{name:"input_shape",val:": typing.Tuple[int] = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax.numpy.float32'>"},{name:"_do_init",val:": bool = True"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.FlaxOPTForCausalLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/model_doc/opt#transformers.OPTConfig">OPTConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"},{anchor:"transformers.FlaxOPTForCausalLM.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/opt/modeling_flax_opt.py#L767"}}),Ro=new ue({props:{name:"__call__",anchor:"transformers.FlaxOPTForCausalLM.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"attention_mask",val:": typing.Optional[jax._src.numpy.ndarray.ndarray] = None"},{name:"position_ids",val:": typing.Optional[jax._src.numpy.ndarray.ndarray] = None"},{name:"params",val:": dict = None"},{name:"past_key_values",val:": dict = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"dropout_rng",val:": PRNGKey = None"},{name:"deterministic",val:": bool = True"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/opt/modeling_flax_opt.py#L583",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/opt#transformers.OPTConfig" >OPTConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_flax_outputs.FlaxBaseModelOutput" >transformers.modeling_flax_outputs.FlaxBaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),oo=new Qe({props:{anchor:"transformers.FlaxOPTForCausalLM.__call__.example",$$slots:{default:[nd]},$$scope:{ctx:M}}}),{c(){d=n("meta"),T=c(),h=n("h1"),f=n("a"),y=n("span"),b(l.$$.fragment),u=c(),E=n("span"),je=r("OPT"),Oe=c(),L=n("h2"),oe=n("a"),se=n("span"),b(F.$$.fragment),Le=c(),X=n("span"),Ae=r("Overview"),Fe=c(),W=n("p"),Ne=r("The OPT model was proposed in "),ae=n("a"),re=r("Open Pre-trained Transformer Language Models"),Ie=r(` by Meta AI. OPT is a series of open-sourced large causal language models which perform similar in performance to GPT3.`),Me=c(),V=n("p"),Se=r("The abstract from the paper is the following:"),xe=c(),Z=n("p"),Te=n("em"),De=r("Large language models, which are often trained for hundreds of thousands of compute days, have shown remarkable capabilities for zero- and few-shot learning. Given their computational cost, these models are difficult to replicate without significant capital. For the few that are available through APIs, no access is granted to the full model weights, making them difficult to study. We present Open Pre-trained Transformers (OPT), a suite of decoder-only pre-trained transformers ranging from 125M to 175B parameters, which we aim to fully and responsibly share with interested researchers. We show that OPT-175B is comparable to GPT-3, while requiring only 1/7th the carbon footprint to develop. We are also releasing our logbook detailing the infrastructure challenges we faced, along with code for experimenting with all of the released models."),he=c(),C=n("p"),A=r("Tips:"),ze=c(),G=n("ul"),ie=n("li"),We=r("OPT has the same architecture as "),Y=n("code"),Ge=r("BartDecoder"),Be=r("."),N=c(),I=n("li"),le=r("Contrary to GPT2, OPT adds the EOS token "),ye=n("code"),He=r("</s>"),de=r(" to the beginning of every prompt. "),ve=n("strong"),Ue=r("Note"),Q=r(": Make sure to pass "),be=n("code"),ce=r("use_fast=False"),Ke=r(" when loading OPT\u2019s tokenizer with "),ne=n("a"),S=r("AutoTokenizer"),Ve=r(" to get the correct tokenizer."),B=c(),j=n("p"),Ze=r("This model was contributed by "),v=n("a"),x=r("Arthur Zucker"),lt=r(", "),ee=n("a"),dt=r("Younes Belkada"),ct=r(", and "),z=n("a"),pt=r("Patrick Von Platen"),ut=r(`. The original code can be found `),ke=n("a"),ht=r("here"),ft=r("."),R=c(),D=n("h2"),fe=n("a"),Re=n("span"),b(H.$$.fragment),mt=c(),Je=n("span"),Xe=r("OPTConfig"),ot=c(),q=n("div"),b(we.$$.fragment),Ks=c(),gt=n("p"),Vs=r("This is the configuration class to store the configuration of a "),tn=n("a"),Zs=r("OPTModel"),Rs=r(`. It is used to instantiate a OPT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the OPT `),lo=n("a"),Js=r("facebook/opt-350m"),Xs=r(" architecture."),Ys=c(),_t=n("p"),Qs=r("Configuration objects inherit from "),on=n("a"),ea=r("PretrainedConfig"),ta=r(` and can be used to control the model outputs. Read the documentation from `),nn=n("a"),oa=r("PretrainedConfig"),na=r(" for more information."),sa=c(),b(zt.$$.fragment),is=c(),Tt=n("h2"),Ct=n("a"),vn=n("span"),b(co.$$.fragment),aa=c(),bn=n("span"),ra=r("OPTModel"),ls=c(),$e=n("div"),b(po.$$.fragment),ia=c(),uo=n("p"),la=r(`The bare OPT Model outputting raw hidden-states without any specific head on top. This model inherits from `),sn=n("a"),da=r("PreTrainedModel"),ca=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),pa=c(),ho=n("p"),ua=r("This model is also a PyTorch "),fo=n("a"),ha=r("torch.nn.Module"),fa=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ma=c(),Ce=n("div"),b(mo.$$.fragment),ga=c(),yt=n("p"),_a=r("The "),an=n("a"),Ta=r("OPTModel"),ya=r(" forward method, overrides the "),kn=n("code"),va=r("__call__"),ba=r(" special method."),ka=c(),b(Et.$$.fragment),wa=c(),b(qt.$$.fragment),ds=c(),vt=n("h2"),jt=n("a"),wn=n("span"),b(go.$$.fragment),$a=c(),$n=n("span"),Pa=r("OPTForCausalLM"),cs=c(),bt=n("div"),b(_o.$$.fragment),Oa=c(),Lt=n("div"),b(To.$$.fragment),Fa=c(),b(At.$$.fragment),ps=c(),kt=n("h2"),Nt=n("a"),Pn=n("span"),b(yo.$$.fragment),Ma=c(),On=n("span"),xa=r("TFOPTModel"),us=c(),pe=n("div"),b(vo.$$.fragment),za=c(),bo=n("p"),Ca=r(`The bare TF OPT Model outputting raw hidden-states without any specific head on top. This model inherits from `),rn=n("a"),Ea=r("TFPreTrainedModel"),qa=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ja=c(),ko=n("p"),La=r("This model is also a "),wo=n("a"),Aa=r("tf.keras.Model"),Na=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Ia=c(),b(It.$$.fragment),Sa=c(),Ee=n("div"),b($o.$$.fragment),Da=c(),wt=n("p"),Wa=r("The "),ln=n("a"),Ga=r("TFOPTModel"),Ba=r(" forward method, overrides the "),Fn=n("code"),Ha=r("__call__"),Ua=r(" special method."),Ka=c(),b(St.$$.fragment),Va=c(),b(Dt.$$.fragment),hs=c(),$t=n("h2"),Wt=n("a"),Mn=n("span"),b(Po.$$.fragment),Za=c(),xn=n("span"),Ra=r("TFOPTForCausalLM"),fs=c(),te=n("div"),b(Oo.$$.fragment),Ja=c(),zn=n("p"),Xa=r("The OPT Model transformer with a language modeling head on top."),Ya=c(),Fo=n("p"),Qa=r("This model inherits from "),dn=n("a"),er=r("TFPreTrainedModel"),tr=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),or=c(),Mo=n("p"),nr=r("This model is also a "),xo=n("a"),sr=r("tf.keras.Model"),ar=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),rr=c(),b(Gt.$$.fragment),ir=c(),Bt=n("div"),b(zo.$$.fragment),lr=c(),b(Ht.$$.fragment),ms=c(),Pt=n("h2"),Ut=n("a"),Cn=n("span"),b(Co.$$.fragment),dr=c(),En=n("span"),cr=r("OPTForSequenceClassification"),gs=c(),U=n("div"),b(Eo.$$.fragment),pr=c(),qn=n("p"),ur=r("The OPT Model transformer with a sequence classification head on top (linear layer)."),hr=c(),cn=n("p"),pn=n("a"),fr=r("OPTForSequenceClassification"),mr=r(` uses the last token in order to do the classification, as other causal models (e.g. GPT-2) do.`),gr=c(),Pe=n("p"),_r=r(`Since it does classification on the last token, it requires to know the position of the last token. If a `),jn=n("code"),Tr=r("pad_token_id"),yr=r(` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `),Ln=n("code"),vr=r("pad_token_id"),br=r(` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `),An=n("code"),kr=r("inputs_embeds"),wr=r(" are passed instead of "),Nn=n("code"),$r=r("input_ids"),Pr=r(`, it does the same (take the last value in each row of the batch).`),Or=c(),qo=n("p"),Fr=r("This model inherits from "),un=n("a"),Mr=r("PreTrainedModel"),xr=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),zr=c(),jo=n("p"),Cr=r("This model is also a PyTorch "),Lo=n("a"),Er=r("torch.nn.Module"),qr=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),jr=c(),J=n("div"),b(Ao.$$.fragment),Lr=c(),Ot=n("p"),Ar=r("The "),hn=n("a"),Nr=r("OPTForSequenceClassification"),Ir=r(" forward method, overrides the "),In=n("code"),Sr=r("__call__"),Dr=r(" special method."),Wr=c(),b(Kt.$$.fragment),Gr=c(),b(Vt.$$.fragment),Br=c(),b(Zt.$$.fragment),Hr=c(),b(Rt.$$.fragment),Ur=c(),b(Jt.$$.fragment),_s=c(),Ft=n("h2"),Xt=n("a"),Sn=n("span"),b(No.$$.fragment),Kr=c(),Dn=n("span"),Vr=r("FlaxOPTModel"),Ts=c(),Mt=n("div"),b(Io.$$.fragment),Zr=c(),Yt=n("div"),b(So.$$.fragment),Rr=c(),b(Qt.$$.fragment),ys=c(),xt=n("h2"),eo=n("a"),Wn=n("span"),b(Do.$$.fragment),Jr=c(),Gn=n("span"),Xr=r("FlaxOPTForCausalLM"),vs=c(),K=n("div"),b(Wo.$$.fragment),Yr=c(),Bn=n("p"),Qr=r(`OPT Model with a language modeling head on top (linear layer with weights tied to the input embeddings) e.g for autoregressive tasks.`),ei=c(),Go=n("p"),ti=r("This model inherits from "),fn=n("a"),oi=r("FlaxPreTrainedModel"),ni=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),si=c(),Bo=n("p"),ai=r(`This model is also a Flax Linen `),Ho=n("a"),ri=r("flax.nn.Module"),ii=r(` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),li=c(),Hn=n("p"),di=r("Finally, this model supports inherent JAX features such as:"),ci=c(),Ye=n("ul"),Un=n("li"),Uo=n("a"),pi=r("Just-In-Time (JIT) compilation"),ui=c(),Kn=n("li"),Ko=n("a"),hi=r("Automatic Differentiation"),fi=c(),Vn=n("li"),Vo=n("a"),mi=r("Vectorization"),gi=c(),Zn=n("li"),Zo=n("a"),_i=r("Parallelization"),Ti=c(),to=n("div"),b(Ro.$$.fragment),yi=c(),b(oo.$$.fragment),this.h()},l(o){const _=Dl('[data-svelte="svelte-1phssyn"]',document.head);d=s(_,"META",{name:!0,content:!0}),_.forEach(t),T=p(o),h=s(o,"H1",{class:!0});var Jo=a(h);f=s(Jo,"A",{id:!0,class:!0,href:!0});var Rn=a(f);y=s(Rn,"SPAN",{});var Jn=a(y);k(l.$$.fragment,Jn),Jn.forEach(t),Rn.forEach(t),u=p(Jo),E=s(Jo,"SPAN",{});var Xn=a(E);je=i(Xn,"OPT"),Xn.forEach(t),Jo.forEach(t),Oe=p(o),L=s(o,"H2",{class:!0});var Xo=a(L);oe=s(Xo,"A",{id:!0,class:!0,href:!0});var Yn=a(oe);se=s(Yn,"SPAN",{});var Qn=a(se);k(F.$$.fragment,Qn),Qn.forEach(t),Yn.forEach(t),Le=p(Xo),X=s(Xo,"SPAN",{});var es=a(X);Ae=i(es,"Overview"),es.forEach(t),Xo.forEach(t),Fe=p(o),W=s(o,"P",{});var Yo=a(W);Ne=i(Yo,"The OPT model was proposed in "),ae=s(Yo,"A",{href:!0,rel:!0});var ts=a(ae);re=i(ts,"Open Pre-trained Transformer Language Models"),ts.forEach(t),Ie=i(Yo,` by Meta AI. OPT is a series of open-sourced large causal language models which perform similar in performance to GPT3.`),Yo.forEach(t),Me=p(o),V=s(o,"P",{});var os=a(V);Se=i(os,"The abstract from the paper is the following:"),os.forEach(t),xe=p(o),Z=s(o,"P",{});var ns=a(Z);Te=s(ns,"EM",{});var ss=a(Te);De=i(ss,"Large language models, which are often trained for hundreds of thousands of compute days, have shown remarkable capabilities for zero- and few-shot learning. Given their computational cost, these models are difficult to replicate without significant capital. For the few that are available through APIs, no access is granted to the full model weights, making them difficult to study. We present Open Pre-trained Transformers (OPT), a suite of decoder-only pre-trained transformers ranging from 125M to 175B parameters, which we aim to fully and responsibly share with interested researchers. We show that OPT-175B is comparable to GPT-3, while requiring only 1/7th the carbon footprint to develop. We are also releasing our logbook detailing the infrastructure challenges we faced, along with code for experimenting with all of the released models."),ss.forEach(t),ns.forEach(t),he=p(o),C=s(o,"P",{});var as=a(C);A=i(as,"Tips:"),as.forEach(t),ze=p(o),G=s(o,"UL",{});var Qo=a(G);ie=s(Qo,"LI",{});var en=a(ie);We=i(en,"OPT has the same architecture as "),Y=s(en,"CODE",{});var bi=a(Y);Ge=i(bi,"BartDecoder"),bi.forEach(t),Be=i(en,"."),en.forEach(t),N=p(Qo),I=s(Qo,"LI",{});var nt=a(I);le=i(nt,"Contrary to GPT2, OPT adds the EOS token "),ye=s(nt,"CODE",{});var ki=a(ye);He=i(ki,"</s>"),ki.forEach(t),de=i(nt," to the beginning of every prompt. "),ve=s(nt,"STRONG",{});var wi=a(ve);Ue=i(wi,"Note"),wi.forEach(t),Q=i(nt,": Make sure to pass "),be=s(nt,"CODE",{});var $i=a(be);ce=i($i,"use_fast=False"),$i.forEach(t),Ke=i(nt," when loading OPT\u2019s tokenizer with "),ne=s(nt,"A",{href:!0});var Pi=a(ne);S=i(Pi,"AutoTokenizer"),Pi.forEach(t),Ve=i(nt," to get the correct tokenizer."),nt.forEach(t),Qo.forEach(t),B=p(o),j=s(o,"P",{});var st=a(j);Ze=i(st,"This model was contributed by "),v=s(st,"A",{href:!0,rel:!0});var Oi=a(v);x=i(Oi,"Arthur Zucker"),Oi.forEach(t),lt=i(st,", "),ee=s(st,"A",{href:!0,rel:!0});var Fi=a(ee);dt=i(Fi,"Younes Belkada"),Fi.forEach(t),ct=i(st,", and "),z=s(st,"A",{href:!0,rel:!0});var Mi=a(z);pt=i(Mi,"Patrick Von Platen"),Mi.forEach(t),ut=i(st,`. The original code can be found `),ke=s(st,"A",{href:!0,rel:!0});var xi=a(ke);ht=i(xi,"here"),xi.forEach(t),ft=i(st,"."),st.forEach(t),R=p(o),D=s(o,"H2",{class:!0});var ks=a(D);fe=s(ks,"A",{id:!0,class:!0,href:!0});var zi=a(fe);Re=s(zi,"SPAN",{});var Ci=a(Re);k(H.$$.fragment,Ci),Ci.forEach(t),zi.forEach(t),mt=p(ks),Je=s(ks,"SPAN",{});var Ei=a(Je);Xe=i(Ei,"OPTConfig"),Ei.forEach(t),ks.forEach(t),ot=p(o),q=s(o,"DIV",{class:!0});var no=a(q);k(we.$$.fragment,no),Ks=p(no),gt=s(no,"P",{});var mn=a(gt);Vs=i(mn,"This is the configuration class to store the configuration of a "),tn=s(mn,"A",{href:!0});var qi=a(tn);Zs=i(qi,"OPTModel"),qi.forEach(t),Rs=i(mn,`. It is used to instantiate a OPT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the OPT `),lo=s(mn,"A",{href:!0,rel:!0});var ji=a(lo);Js=i(ji,"facebook/opt-350m"),ji.forEach(t),Xs=i(mn," architecture."),mn.forEach(t),Ys=p(no),_t=s(no,"P",{});var gn=a(_t);Qs=i(gn,"Configuration objects inherit from "),on=s(gn,"A",{href:!0});var Li=a(on);ea=i(Li,"PretrainedConfig"),Li.forEach(t),ta=i(gn,` and can be used to control the model outputs. Read the documentation from `),nn=s(gn,"A",{href:!0});var Ai=a(nn);oa=i(Ai,"PretrainedConfig"),Ai.forEach(t),na=i(gn," for more information."),gn.forEach(t),sa=p(no),k(zt.$$.fragment,no),no.forEach(t),is=p(o),Tt=s(o,"H2",{class:!0});var ws=a(Tt);Ct=s(ws,"A",{id:!0,class:!0,href:!0});var Ni=a(Ct);vn=s(Ni,"SPAN",{});var Ii=a(vn);k(co.$$.fragment,Ii),Ii.forEach(t),Ni.forEach(t),aa=p(ws),bn=s(ws,"SPAN",{});var Si=a(bn);ra=i(Si,"OPTModel"),Si.forEach(t),ws.forEach(t),ls=p(o),$e=s(o,"DIV",{class:!0});var so=a($e);k(po.$$.fragment,so),ia=p(so),uo=s(so,"P",{});var $s=a(uo);la=i($s,`The bare OPT Model outputting raw hidden-states without any specific head on top. This model inherits from `),sn=s($s,"A",{href:!0});var Di=a(sn);da=i(Di,"PreTrainedModel"),Di.forEach(t),ca=i($s,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),$s.forEach(t),pa=p(so),ho=s(so,"P",{});var Ps=a(ho);ua=i(Ps,"This model is also a PyTorch "),fo=s(Ps,"A",{href:!0,rel:!0});var Wi=a(fo);ha=i(Wi,"torch.nn.Module"),Wi.forEach(t),fa=i(Ps,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ps.forEach(t),ma=p(so),Ce=s(so,"DIV",{class:!0});var ao=a(Ce);k(mo.$$.fragment,ao),ga=p(ao),yt=s(ao,"P",{});var _n=a(yt);_a=i(_n,"The "),an=s(_n,"A",{href:!0});var Gi=a(an);Ta=i(Gi,"OPTModel"),Gi.forEach(t),ya=i(_n," forward method, overrides the "),kn=s(_n,"CODE",{});var Bi=a(kn);va=i(Bi,"__call__"),Bi.forEach(t),ba=i(_n," special method."),_n.forEach(t),ka=p(ao),k(Et.$$.fragment,ao),wa=p(ao),k(qt.$$.fragment,ao),ao.forEach(t),so.forEach(t),ds=p(o),vt=s(o,"H2",{class:!0});var Os=a(vt);jt=s(Os,"A",{id:!0,class:!0,href:!0});var Hi=a(jt);wn=s(Hi,"SPAN",{});var Ui=a(wn);k(go.$$.fragment,Ui),Ui.forEach(t),Hi.forEach(t),$a=p(Os),$n=s(Os,"SPAN",{});var Ki=a($n);Pa=i(Ki,"OPTForCausalLM"),Ki.forEach(t),Os.forEach(t),cs=p(o),bt=s(o,"DIV",{class:!0});var Fs=a(bt);k(_o.$$.fragment,Fs),Oa=p(Fs),Lt=s(Fs,"DIV",{class:!0});var Ms=a(Lt);k(To.$$.fragment,Ms),Fa=p(Ms),k(At.$$.fragment,Ms),Ms.forEach(t),Fs.forEach(t),ps=p(o),kt=s(o,"H2",{class:!0});var xs=a(kt);Nt=s(xs,"A",{id:!0,class:!0,href:!0});var Vi=a(Nt);Pn=s(Vi,"SPAN",{});var Zi=a(Pn);k(yo.$$.fragment,Zi),Zi.forEach(t),Vi.forEach(t),Ma=p(xs),On=s(xs,"SPAN",{});var Ri=a(On);xa=i(Ri,"TFOPTModel"),Ri.forEach(t),xs.forEach(t),us=p(o),pe=s(o,"DIV",{class:!0});var at=a(pe);k(vo.$$.fragment,at),za=p(at),bo=s(at,"P",{});var zs=a(bo);Ca=i(zs,`The bare TF OPT Model outputting raw hidden-states without any specific head on top. This model inherits from `),rn=s(zs,"A",{href:!0});var Ji=a(rn);Ea=i(Ji,"TFPreTrainedModel"),Ji.forEach(t),qa=i(zs,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),zs.forEach(t),ja=p(at),ko=s(at,"P",{});var Cs=a(ko);La=i(Cs,"This model is also a "),wo=s(Cs,"A",{href:!0,rel:!0});var Xi=a(wo);Aa=i(Xi,"tf.keras.Model"),Xi.forEach(t),Na=i(Cs,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Cs.forEach(t),Ia=p(at),k(It.$$.fragment,at),Sa=p(at),Ee=s(at,"DIV",{class:!0});var ro=a(Ee);k($o.$$.fragment,ro),Da=p(ro),wt=s(ro,"P",{});var Tn=a(wt);Wa=i(Tn,"The "),ln=s(Tn,"A",{href:!0});var Yi=a(ln);Ga=i(Yi,"TFOPTModel"),Yi.forEach(t),Ba=i(Tn," forward method, overrides the "),Fn=s(Tn,"CODE",{});var Qi=a(Fn);Ha=i(Qi,"__call__"),Qi.forEach(t),Ua=i(Tn," special method."),Tn.forEach(t),Ka=p(ro),k(St.$$.fragment,ro),Va=p(ro),k(Dt.$$.fragment,ro),ro.forEach(t),at.forEach(t),hs=p(o),$t=s(o,"H2",{class:!0});var Es=a($t);Wt=s(Es,"A",{id:!0,class:!0,href:!0});var el=a(Wt);Mn=s(el,"SPAN",{});var tl=a(Mn);k(Po.$$.fragment,tl),tl.forEach(t),el.forEach(t),Za=p(Es),xn=s(Es,"SPAN",{});var ol=a(xn);Ra=i(ol,"TFOPTForCausalLM"),ol.forEach(t),Es.forEach(t),fs=p(o),te=s(o,"DIV",{class:!0});var qe=a(te);k(Oo.$$.fragment,qe),Ja=p(qe),zn=s(qe,"P",{});var nl=a(zn);Xa=i(nl,"The OPT Model transformer with a language modeling head on top."),nl.forEach(t),Ya=p(qe),Fo=s(qe,"P",{});var qs=a(Fo);Qa=i(qs,"This model inherits from "),dn=s(qs,"A",{href:!0});var sl=a(dn);er=i(sl,"TFPreTrainedModel"),sl.forEach(t),tr=i(qs,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),qs.forEach(t),or=p(qe),Mo=s(qe,"P",{});var js=a(Mo);nr=i(js,"This model is also a "),xo=s(js,"A",{href:!0,rel:!0});var al=a(xo);sr=i(al,"tf.keras.Model"),al.forEach(t),ar=i(js,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),js.forEach(t),rr=p(qe),k(Gt.$$.fragment,qe),ir=p(qe),Bt=s(qe,"DIV",{class:!0});var Ls=a(Bt);k(zo.$$.fragment,Ls),lr=p(Ls),k(Ht.$$.fragment,Ls),Ls.forEach(t),qe.forEach(t),ms=p(o),Pt=s(o,"H2",{class:!0});var As=a(Pt);Ut=s(As,"A",{id:!0,class:!0,href:!0});var rl=a(Ut);Cn=s(rl,"SPAN",{});var il=a(Cn);k(Co.$$.fragment,il),il.forEach(t),rl.forEach(t),dr=p(As),En=s(As,"SPAN",{});var ll=a(En);cr=i(ll,"OPTForSequenceClassification"),ll.forEach(t),As.forEach(t),gs=p(o),U=s(o,"DIV",{class:!0});var me=a(U);k(Eo.$$.fragment,me),pr=p(me),qn=s(me,"P",{});var dl=a(qn);ur=i(dl,"The OPT Model transformer with a sequence classification head on top (linear layer)."),dl.forEach(t),hr=p(me),cn=s(me,"P",{});var vi=a(cn);pn=s(vi,"A",{href:!0});var cl=a(pn);fr=i(cl,"OPTForSequenceClassification"),cl.forEach(t),mr=i(vi,` uses the last token in order to do the classification, as other causal models (e.g. GPT-2) do.`),vi.forEach(t),gr=p(me),Pe=s(me,"P",{});var rt=a(Pe);_r=i(rt,`Since it does classification on the last token, it requires to know the position of the last token. If a `),jn=s(rt,"CODE",{});var pl=a(jn);Tr=i(pl,"pad_token_id"),pl.forEach(t),yr=i(rt,` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `),Ln=s(rt,"CODE",{});var ul=a(Ln);vr=i(ul,"pad_token_id"),ul.forEach(t),br=i(rt,` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `),An=s(rt,"CODE",{});var hl=a(An);kr=i(hl,"inputs_embeds"),hl.forEach(t),wr=i(rt," are passed instead of "),Nn=s(rt,"CODE",{});var fl=a(Nn);$r=i(fl,"input_ids"),fl.forEach(t),Pr=i(rt,`, it does the same (take the last value in each row of the batch).`),rt.forEach(t),Or=p(me),qo=s(me,"P",{});var Ns=a(qo);Fr=i(Ns,"This model inherits from "),un=s(Ns,"A",{href:!0});var ml=a(un);Mr=i(ml,"PreTrainedModel"),ml.forEach(t),xr=i(Ns,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ns.forEach(t),zr=p(me),jo=s(me,"P",{});var Is=a(jo);Cr=i(Is,"This model is also a PyTorch "),Lo=s(Is,"A",{href:!0,rel:!0});var gl=a(Lo);Er=i(gl,"torch.nn.Module"),gl.forEach(t),qr=i(Is,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Is.forEach(t),jr=p(me),J=s(me,"DIV",{class:!0});var ge=a(J);k(Ao.$$.fragment,ge),Lr=p(ge),Ot=s(ge,"P",{});var yn=a(Ot);Ar=i(yn,"The "),hn=s(yn,"A",{href:!0});var _l=a(hn);Nr=i(_l,"OPTForSequenceClassification"),_l.forEach(t),Ir=i(yn," forward method, overrides the "),In=s(yn,"CODE",{});var Tl=a(In);Sr=i(Tl,"__call__"),Tl.forEach(t),Dr=i(yn," special method."),yn.forEach(t),Wr=p(ge),k(Kt.$$.fragment,ge),Gr=p(ge),k(Vt.$$.fragment,ge),Br=p(ge),k(Zt.$$.fragment,ge),Hr=p(ge),k(Rt.$$.fragment,ge),Ur=p(ge),k(Jt.$$.fragment,ge),ge.forEach(t),me.forEach(t),_s=p(o),Ft=s(o,"H2",{class:!0});var Ss=a(Ft);Xt=s(Ss,"A",{id:!0,class:!0,href:!0});var yl=a(Xt);Sn=s(yl,"SPAN",{});var vl=a(Sn);k(No.$$.fragment,vl),vl.forEach(t),yl.forEach(t),Kr=p(Ss),Dn=s(Ss,"SPAN",{});var bl=a(Dn);Vr=i(bl,"FlaxOPTModel"),bl.forEach(t),Ss.forEach(t),Ts=p(o),Mt=s(o,"DIV",{class:!0});var Ds=a(Mt);k(Io.$$.fragment,Ds),Zr=p(Ds),Yt=s(Ds,"DIV",{class:!0});var Ws=a(Yt);k(So.$$.fragment,Ws),Rr=p(Ws),k(Qt.$$.fragment,Ws),Ws.forEach(t),Ds.forEach(t),ys=p(o),xt=s(o,"H2",{class:!0});var Gs=a(xt);eo=s(Gs,"A",{id:!0,class:!0,href:!0});var kl=a(eo);Wn=s(kl,"SPAN",{});var wl=a(Wn);k(Do.$$.fragment,wl),wl.forEach(t),kl.forEach(t),Jr=p(Gs),Gn=s(Gs,"SPAN",{});var $l=a(Gn);Xr=i($l,"FlaxOPTForCausalLM"),$l.forEach(t),Gs.forEach(t),vs=p(o),K=s(o,"DIV",{class:!0});var _e=a(K);k(Wo.$$.fragment,_e),Yr=p(_e),Bn=s(_e,"P",{});var Pl=a(Bn);Qr=i(Pl,`OPT Model with a language modeling head on top (linear layer with weights tied to the input embeddings) e.g for autoregressive tasks.`),Pl.forEach(t),ei=p(_e),Go=s(_e,"P",{});var Bs=a(Go);ti=i(Bs,"This model inherits from "),fn=s(Bs,"A",{href:!0});var Ol=a(fn);oi=i(Ol,"FlaxPreTrainedModel"),Ol.forEach(t),ni=i(Bs,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Bs.forEach(t),si=p(_e),Bo=s(_e,"P",{});var Hs=a(Bo);ai=i(Hs,`This model is also a Flax Linen `),Ho=s(Hs,"A",{href:!0,rel:!0});var Fl=a(Ho);ri=i(Fl,"flax.nn.Module"),Fl.forEach(t),ii=i(Hs,` subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.`),Hs.forEach(t),li=p(_e),Hn=s(_e,"P",{});var Ml=a(Hn);di=i(Ml,"Finally, this model supports inherent JAX features such as:"),Ml.forEach(t),ci=p(_e),Ye=s(_e,"UL",{});var io=a(Ye);Un=s(io,"LI",{});var xl=a(Un);Uo=s(xl,"A",{href:!0,rel:!0});var zl=a(Uo);pi=i(zl,"Just-In-Time (JIT) compilation"),zl.forEach(t),xl.forEach(t),ui=p(io),Kn=s(io,"LI",{});var Cl=a(Kn);Ko=s(Cl,"A",{href:!0,rel:!0});var El=a(Ko);hi=i(El,"Automatic Differentiation"),El.forEach(t),Cl.forEach(t),fi=p(io),Vn=s(io,"LI",{});var ql=a(Vn);Vo=s(ql,"A",{href:!0,rel:!0});var jl=a(Vo);mi=i(jl,"Vectorization"),jl.forEach(t),ql.forEach(t),gi=p(io),Zn=s(io,"LI",{});var Ll=a(Zn);Zo=s(Ll,"A",{href:!0,rel:!0});var Al=a(Zo);_i=i(Al,"Parallelization"),Al.forEach(t),Ll.forEach(t),io.forEach(t),Ti=p(_e),to=s(_e,"DIV",{class:!0});var Us=a(to);k(Ro.$$.fragment,Us),yi=p(Us),k(oo.$$.fragment,Us),Us.forEach(t),_e.forEach(t),this.h()},h(){m(d,"name","hf:doc:metadata"),m(d,"content",JSON.stringify(ad)),m(f,"id","opt"),m(f,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(f,"href","#opt"),m(h,"class","relative group"),m(oe,"id","overview"),m(oe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(oe,"href","#overview"),m(L,"class","relative group"),m(ae,"href","https://arxiv.org/pdf/2205.01068"),m(ae,"rel","nofollow"),m(ne,"href","/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoTokenizer"),m(v,"href","https://huggingface.co/ArthurZ"),m(v,"rel","nofollow"),m(ee,"href","https://huggingface.co/ybelkada"),m(ee,"rel","nofollow"),m(z,"href","https://huggingface.co/patrickvonplaten"),m(z,"rel","nofollow"),m(ke,"href","https://github.com/facebookresearch/metaseq"),m(ke,"rel","nofollow"),m(fe,"id","transformers.OPTConfig"),m(fe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(fe,"href","#transformers.OPTConfig"),m(D,"class","relative group"),m(tn,"href","/docs/transformers/pr_19429/en/model_doc/opt#transformers.OPTModel"),m(lo,"href","https://huggingface.co/facebook/opt-350m"),m(lo,"rel","nofollow"),m(on,"href","/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig"),m(nn,"href","/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig"),m(q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Ct,"id","transformers.OPTModel"),m(Ct,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Ct,"href","#transformers.OPTModel"),m(Tt,"class","relative group"),m(sn,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel"),m(fo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),m(fo,"rel","nofollow"),m(an,"href","/docs/transformers/pr_19429/en/model_doc/opt#transformers.OPTModel"),m(Ce,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m($e,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(jt,"id","transformers.OPTForCausalLM"),m(jt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(jt,"href","#transformers.OPTForCausalLM"),m(vt,"class","relative group"),m(Lt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(bt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Nt,"id","transformers.TFOPTModel"),m(Nt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Nt,"href","#transformers.TFOPTModel"),m(kt,"class","relative group"),m(rn,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel"),m(wo,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),m(wo,"rel","nofollow"),m(ln,"href","/docs/transformers/pr_19429/en/model_doc/opt#transformers.TFOPTModel"),m(Ee,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(pe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Wt,"id","transformers.TFOPTForCausalLM"),m(Wt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Wt,"href","#transformers.TFOPTForCausalLM"),m($t,"class","relative group"),m(dn,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel"),m(xo,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),m(xo,"rel","nofollow"),m(Bt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(te,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Ut,"id","transformers.OPTForSequenceClassification"),m(Ut,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Ut,"href","#transformers.OPTForSequenceClassification"),m(Pt,"class","relative group"),m(pn,"href","/docs/transformers/pr_19429/en/model_doc/opt#transformers.OPTForSequenceClassification"),m(un,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel"),m(Lo,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),m(Lo,"rel","nofollow"),m(hn,"href","/docs/transformers/pr_19429/en/model_doc/opt#transformers.OPTForSequenceClassification"),m(J,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(U,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Xt,"id","transformers.FlaxOPTModel"),m(Xt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Xt,"href","#transformers.FlaxOPTModel"),m(Ft,"class","relative group"),m(Yt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Mt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(eo,"id","transformers.FlaxOPTForCausalLM"),m(eo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(eo,"href","#transformers.FlaxOPTForCausalLM"),m(xt,"class","relative group"),m(fn,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.FlaxPreTrainedModel"),m(Ho,"href","https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html"),m(Ho,"rel","nofollow"),m(Uo,"href","https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit"),m(Uo,"rel","nofollow"),m(Ko,"href","https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation"),m(Ko,"rel","nofollow"),m(Vo,"href","https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap"),m(Vo,"rel","nofollow"),m(Zo,"href","https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap"),m(Zo,"rel","nofollow"),m(to,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(K,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(o,_){e(document.head,d),g(o,T,_),g(o,h,_),e(h,f),e(f,y),w(l,y,null),e(h,u),e(h,E),e(E,je),g(o,Oe,_),g(o,L,_),e(L,oe),e(oe,se),w(F,se,null),e(L,Le),e(L,X),e(X,Ae),g(o,Fe,_),g(o,W,_),e(W,Ne),e(W,ae),e(ae,re),e(W,Ie),g(o,Me,_),g(o,V,_),e(V,Se),g(o,xe,_),g(o,Z,_),e(Z,Te),e(Te,De),g(o,he,_),g(o,C,_),e(C,A),g(o,ze,_),g(o,G,_),e(G,ie),e(ie,We),e(ie,Y),e(Y,Ge),e(ie,Be),e(G,N),e(G,I),e(I,le),e(I,ye),e(ye,He),e(I,de),e(I,ve),e(ve,Ue),e(I,Q),e(I,be),e(be,ce),e(I,Ke),e(I,ne),e(ne,S),e(I,Ve),g(o,B,_),g(o,j,_),e(j,Ze),e(j,v),e(v,x),e(j,lt),e(j,ee),e(ee,dt),e(j,ct),e(j,z),e(z,pt),e(j,ut),e(j,ke),e(ke,ht),e(j,ft),g(o,R,_),g(o,D,_),e(D,fe),e(fe,Re),w(H,Re,null),e(D,mt),e(D,Je),e(Je,Xe),g(o,ot,_),g(o,q,_),w(we,q,null),e(q,Ks),e(q,gt),e(gt,Vs),e(gt,tn),e(tn,Zs),e(gt,Rs),e(gt,lo),e(lo,Js),e(gt,Xs),e(q,Ys),e(q,_t),e(_t,Qs),e(_t,on),e(on,ea),e(_t,ta),e(_t,nn),e(nn,oa),e(_t,na),e(q,sa),w(zt,q,null),g(o,is,_),g(o,Tt,_),e(Tt,Ct),e(Ct,vn),w(co,vn,null),e(Tt,aa),e(Tt,bn),e(bn,ra),g(o,ls,_),g(o,$e,_),w(po,$e,null),e($e,ia),e($e,uo),e(uo,la),e(uo,sn),e(sn,da),e(uo,ca),e($e,pa),e($e,ho),e(ho,ua),e(ho,fo),e(fo,ha),e(ho,fa),e($e,ma),e($e,Ce),w(mo,Ce,null),e(Ce,ga),e(Ce,yt),e(yt,_a),e(yt,an),e(an,Ta),e(yt,ya),e(yt,kn),e(kn,va),e(yt,ba),e(Ce,ka),w(Et,Ce,null),e(Ce,wa),w(qt,Ce,null),g(o,ds,_),g(o,vt,_),e(vt,jt),e(jt,wn),w(go,wn,null),e(vt,$a),e(vt,$n),e($n,Pa),g(o,cs,_),g(o,bt,_),w(_o,bt,null),e(bt,Oa),e(bt,Lt),w(To,Lt,null),e(Lt,Fa),w(At,Lt,null),g(o,ps,_),g(o,kt,_),e(kt,Nt),e(Nt,Pn),w(yo,Pn,null),e(kt,Ma),e(kt,On),e(On,xa),g(o,us,_),g(o,pe,_),w(vo,pe,null),e(pe,za),e(pe,bo),e(bo,Ca),e(bo,rn),e(rn,Ea),e(bo,qa),e(pe,ja),e(pe,ko),e(ko,La),e(ko,wo),e(wo,Aa),e(ko,Na),e(pe,Ia),w(It,pe,null),e(pe,Sa),e(pe,Ee),w($o,Ee,null),e(Ee,Da),e(Ee,wt),e(wt,Wa),e(wt,ln),e(ln,Ga),e(wt,Ba),e(wt,Fn),e(Fn,Ha),e(wt,Ua),e(Ee,Ka),w(St,Ee,null),e(Ee,Va),w(Dt,Ee,null),g(o,hs,_),g(o,$t,_),e($t,Wt),e(Wt,Mn),w(Po,Mn,null),e($t,Za),e($t,xn),e(xn,Ra),g(o,fs,_),g(o,te,_),w(Oo,te,null),e(te,Ja),e(te,zn),e(zn,Xa),e(te,Ya),e(te,Fo),e(Fo,Qa),e(Fo,dn),e(dn,er),e(Fo,tr),e(te,or),e(te,Mo),e(Mo,nr),e(Mo,xo),e(xo,sr),e(Mo,ar),e(te,rr),w(Gt,te,null),e(te,ir),e(te,Bt),w(zo,Bt,null),e(Bt,lr),w(Ht,Bt,null),g(o,ms,_),g(o,Pt,_),e(Pt,Ut),e(Ut,Cn),w(Co,Cn,null),e(Pt,dr),e(Pt,En),e(En,cr),g(o,gs,_),g(o,U,_),w(Eo,U,null),e(U,pr),e(U,qn),e(qn,ur),e(U,hr),e(U,cn),e(cn,pn),e(pn,fr),e(cn,mr),e(U,gr),e(U,Pe),e(Pe,_r),e(Pe,jn),e(jn,Tr),e(Pe,yr),e(Pe,Ln),e(Ln,vr),e(Pe,br),e(Pe,An),e(An,kr),e(Pe,wr),e(Pe,Nn),e(Nn,$r),e(Pe,Pr),e(U,Or),e(U,qo),e(qo,Fr),e(qo,un),e(un,Mr),e(qo,xr),e(U,zr),e(U,jo),e(jo,Cr),e(jo,Lo),e(Lo,Er),e(jo,qr),e(U,jr),e(U,J),w(Ao,J,null),e(J,Lr),e(J,Ot),e(Ot,Ar),e(Ot,hn),e(hn,Nr),e(Ot,Ir),e(Ot,In),e(In,Sr),e(Ot,Dr),e(J,Wr),w(Kt,J,null),e(J,Gr),w(Vt,J,null),e(J,Br),w(Zt,J,null),e(J,Hr),w(Rt,J,null),e(J,Ur),w(Jt,J,null),g(o,_s,_),g(o,Ft,_),e(Ft,Xt),e(Xt,Sn),w(No,Sn,null),e(Ft,Kr),e(Ft,Dn),e(Dn,Vr),g(o,Ts,_),g(o,Mt,_),w(Io,Mt,null),e(Mt,Zr),e(Mt,Yt),w(So,Yt,null),e(Yt,Rr),w(Qt,Yt,null),g(o,ys,_),g(o,xt,_),e(xt,eo),e(eo,Wn),w(Do,Wn,null),e(xt,Jr),e(xt,Gn),e(Gn,Xr),g(o,vs,_),g(o,K,_),w(Wo,K,null),e(K,Yr),e(K,Bn),e(Bn,Qr),e(K,ei),e(K,Go),e(Go,ti),e(Go,fn),e(fn,oi),e(Go,ni),e(K,si),e(K,Bo),e(Bo,ai),e(Bo,Ho),e(Ho,ri),e(Bo,ii),e(K,li),e(K,Hn),e(Hn,di),e(K,ci),e(K,Ye),e(Ye,Un),e(Un,Uo),e(Uo,pi),e(Ye,ui),e(Ye,Kn),e(Kn,Ko),e(Ko,hi),e(Ye,fi),e(Ye,Vn),e(Vn,Vo),e(Vo,mi),e(Ye,gi),e(Ye,Zn),e(Zn,Zo),e(Zo,_i),e(K,Ti),e(K,to),w(Ro,to,null),e(to,yi),w(oo,to,null),bs=!0},p(o,[_]){const Jo={};_&2&&(Jo.$$scope={dirty:_,ctx:o}),zt.$set(Jo);const Rn={};_&2&&(Rn.$$scope={dirty:_,ctx:o}),Et.$set(Rn);const Jn={};_&2&&(Jn.$$scope={dirty:_,ctx:o}),qt.$set(Jn);const Xn={};_&2&&(Xn.$$scope={dirty:_,ctx:o}),At.$set(Xn);const Xo={};_&2&&(Xo.$$scope={dirty:_,ctx:o}),It.$set(Xo);const Yn={};_&2&&(Yn.$$scope={dirty:_,ctx:o}),St.$set(Yn);const Qn={};_&2&&(Qn.$$scope={dirty:_,ctx:o}),Dt.$set(Qn);const es={};_&2&&(es.$$scope={dirty:_,ctx:o}),Gt.$set(es);const Yo={};_&2&&(Yo.$$scope={dirty:_,ctx:o}),Ht.$set(Yo);const ts={};_&2&&(ts.$$scope={dirty:_,ctx:o}),Kt.$set(ts);const os={};_&2&&(os.$$scope={dirty:_,ctx:o}),Vt.$set(os);const ns={};_&2&&(ns.$$scope={dirty:_,ctx:o}),Zt.$set(ns);const ss={};_&2&&(ss.$$scope={dirty:_,ctx:o}),Rt.$set(ss);const as={};_&2&&(as.$$scope={dirty:_,ctx:o}),Jt.$set(as);const Qo={};_&2&&(Qo.$$scope={dirty:_,ctx:o}),Qt.$set(Qo);const en={};_&2&&(en.$$scope={dirty:_,ctx:o}),oo.$set(en)},i(o){bs||($(l.$$.fragment,o),$(F.$$.fragment,o),$(H.$$.fragment,o),$(we.$$.fragment,o),$(zt.$$.fragment,o),$(co.$$.fragment,o),$(po.$$.fragment,o),$(mo.$$.fragment,o),$(Et.$$.fragment,o),$(qt.$$.fragment,o),$(go.$$.fragment,o),$(_o.$$.fragment,o),$(To.$$.fragment,o),$(At.$$.fragment,o),$(yo.$$.fragment,o),$(vo.$$.fragment,o),$(It.$$.fragment,o),$($o.$$.fragment,o),$(St.$$.fragment,o),$(Dt.$$.fragment,o),$(Po.$$.fragment,o),$(Oo.$$.fragment,o),$(Gt.$$.fragment,o),$(zo.$$.fragment,o),$(Ht.$$.fragment,o),$(Co.$$.fragment,o),$(Eo.$$.fragment,o),$(Ao.$$.fragment,o),$(Kt.$$.fragment,o),$(Vt.$$.fragment,o),$(Zt.$$.fragment,o),$(Rt.$$.fragment,o),$(Jt.$$.fragment,o),$(No.$$.fragment,o),$(Io.$$.fragment,o),$(So.$$.fragment,o),$(Qt.$$.fragment,o),$(Do.$$.fragment,o),$(Wo.$$.fragment,o),$(Ro.$$.fragment,o),$(oo.$$.fragment,o),bs=!0)},o(o){P(l.$$.fragment,o),P(F.$$.fragment,o),P(H.$$.fragment,o),P(we.$$.fragment,o),P(zt.$$.fragment,o),P(co.$$.fragment,o),P(po.$$.fragment,o),P(mo.$$.fragment,o),P(Et.$$.fragment,o),P(qt.$$.fragment,o),P(go.$$.fragment,o),P(_o.$$.fragment,o),P(To.$$.fragment,o),P(At.$$.fragment,o),P(yo.$$.fragment,o),P(vo.$$.fragment,o),P(It.$$.fragment,o),P($o.$$.fragment,o),P(St.$$.fragment,o),P(Dt.$$.fragment,o),P(Po.$$.fragment,o),P(Oo.$$.fragment,o),P(Gt.$$.fragment,o),P(zo.$$.fragment,o),P(Ht.$$.fragment,o),P(Co.$$.fragment,o),P(Eo.$$.fragment,o),P(Ao.$$.fragment,o),P(Kt.$$.fragment,o),P(Vt.$$.fragment,o),P(Zt.$$.fragment,o),P(Rt.$$.fragment,o),P(Jt.$$.fragment,o),P(No.$$.fragment,o),P(Io.$$.fragment,o),P(So.$$.fragment,o),P(Qt.$$.fragment,o),P(Do.$$.fragment,o),P(Wo.$$.fragment,o),P(Ro.$$.fragment,o),P(oo.$$.fragment,o),bs=!1},d(o){t(d),o&&t(T),o&&t(h),O(l),o&&t(Oe),o&&t(L),O(F),o&&t(Fe),o&&t(W),o&&t(Me),o&&t(V),o&&t(xe),o&&t(Z),o&&t(he),o&&t(C),o&&t(ze),o&&t(G),o&&t(B),o&&t(j),o&&t(R),o&&t(D),O(H),o&&t(ot),o&&t(q),O(we),O(zt),o&&t(is),o&&t(Tt),O(co),o&&t(ls),o&&t($e),O(po),O(mo),O(Et),O(qt),o&&t(ds),o&&t(vt),O(go),o&&t(cs),o&&t(bt),O(_o),O(To),O(At),o&&t(ps),o&&t(kt),O(yo),o&&t(us),o&&t(pe),O(vo),O(It),O($o),O(St),O(Dt),o&&t(hs),o&&t($t),O(Po),o&&t(fs),o&&t(te),O(Oo),O(Gt),O(zo),O(Ht),o&&t(ms),o&&t(Pt),O(Co),o&&t(gs),o&&t(U),O(Eo),O(Ao),O(Kt),O(Vt),O(Zt),O(Rt),O(Jt),o&&t(_s),o&&t(Ft),O(No),o&&t(Ts),o&&t(Mt),O(Io),O(So),O(Qt),o&&t(ys),o&&t(xt),O(Do),o&&t(vs),o&&t(K),O(Wo),O(Ro),O(oo)}}}const ad={local:"opt",sections:[{local:"overview",title:"Overview"},{local:"transformers.OPTConfig",title:"OPTConfig"},{local:"transformers.OPTModel",title:"OPTModel"},{local:"transformers.OPTForCausalLM",title:"OPTForCausalLM"},{local:"transformers.TFOPTModel",title:"TFOPTModel"},{local:"transformers.TFOPTForCausalLM",title:"TFOPTForCausalLM"},{local:"transformers.OPTForSequenceClassification",title:"OPTForSequenceClassification"},{local:"transformers.FlaxOPTModel",title:"FlaxOPTModel"},{local:"transformers.FlaxOPTForCausalLM",title:"FlaxOPTForCausalLM"}],title:"OPT"};function rd(M){return Wl(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class hd extends Nl{constructor(d){super();Il(this,d,rd,sd,Sl,{})}}export{hd as default,ad as metadata};
5
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/model_doc/vit_mae.mdx-hf-doc-builder.js
import{S as qs,i as Is,s as zs,e as a,k as p,w as b,t as r,M as Os,c as s,d as t,m,a as i,x as $,h as n,b as h,N as Ds,G as e,g,y as M,q as A,o as k,B as x,v as Ns,L as ur}from"../../chunks/vendor-hf-doc-builder.js";import{T as yo}from"../../chunks/Tip-hf-doc-builder.js";import{D as Tt}from"../../chunks/Docstring-hf-doc-builder.js";import{C as fr}from"../../chunks/CodeBlock-hf-doc-builder.js";import{I as bo}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{E as mr}from"../../chunks/ExampleCodeBlock-hf-doc-builder.js";function Ls(C){let d,w,u,f,T;return f=new fr({props:{code:`from transformers import ViTMAEModel, ViTMAEConfig # Initializing a ViT MAE vit-mae-base style configuration configuration = ViTMAEConfig() # Initializing a model from the vit-mae-base style configuration model = ViTMAEModel(configuration) # Accessing the model configuration configuration = model.config`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ViTMAEModel, ViTMAEConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a ViT MAE vit-mae-base style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = ViTMAEConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the vit-mae-base style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = ViTMAEModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),{c(){d=a("p"),w=r("Example:"),u=p(),b(f.$$.fragment)},l(l){d=s(l,"P",{});var c=i(d);w=n(c,"Example:"),c.forEach(t),u=m(l),$(f.$$.fragment,l)},m(l,c){g(l,d,c),e(d,w),g(l,u,c),M(f,l,c),T=!0},p:ur,i(l){T||(A(f.$$.fragment,l),T=!0)},o(l){k(f.$$.fragment,l),T=!1},d(l){l&&t(d),l&&t(u),x(f,l)}}}function Ss(C){let d,w,u,f,T;return{c(){d=a("p"),w=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=a("code"),f=r("Module"),T=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(l){d=s(l,"P",{});var c=i(d);w=n(c,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(c,"CODE",{});var F=i(u);f=n(F,"Module"),F.forEach(t),T=n(c,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),c.forEach(t)},m(l,c){g(l,d,c),e(d,w),e(d,u),e(u,f),e(d,T)},d(l){l&&t(d)}}}function Ks(C){let d,w,u,f,T;return f=new fr({props:{code:`from transformers import AutoFeatureExtractor, ViTMAEModel from PIL import Image import requests url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/vit-mae-base") model = ViTMAEModel.from_pretrained("facebook/vit-mae-base") inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoFeatureExtractor, ViTMAEModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = AutoFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;facebook/vit-mae-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ViTMAEModel.from_pretrained(<span class="hljs-string">&quot;facebook/vit-mae-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),{c(){d=a("p"),w=r("Examples:"),u=p(),b(f.$$.fragment)},l(l){d=s(l,"P",{});var c=i(d);w=n(c,"Examples:"),c.forEach(t),u=m(l),$(f.$$.fragment,l)},m(l,c){g(l,d,c),e(d,w),g(l,u,c),M(f,l,c),T=!0},p:ur,i(l){T||(A(f.$$.fragment,l),T=!0)},o(l){k(f.$$.fragment,l),T=!1},d(l){l&&t(d),l&&t(u),x(f,l)}}}function Hs(C){let d,w,u,f,T;return{c(){d=a("p"),w=r("Note that we provide a script to pre-train this model on custom data in our "),u=a("a"),f=r(`examples directory`),T=r("."),this.h()},l(l){d=s(l,"P",{});var c=i(d);w=n(c,"Note that we provide a script to pre-train this model on custom data in our "),u=s(c,"A",{href:!0,rel:!0});var F=i(u);f=n(F,`examples directory`),F.forEach(t),T=n(c,"."),c.forEach(t),this.h()},h(){h(u,"href","https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining"),h(u,"rel","nofollow")},m(l,c){g(l,d,c),e(d,w),e(d,u),e(u,f),e(d,T)},d(l){l&&t(d)}}}function Us(C){let d,w,u,f,T;return{c(){d=a("p"),w=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=a("code"),f=r("Module"),T=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(l){d=s(l,"P",{});var c=i(d);w=n(c,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(c,"CODE",{});var F=i(u);f=n(F,"Module"),F.forEach(t),T=n(c,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),c.forEach(t)},m(l,c){g(l,d,c),e(d,w),e(d,u),e(u,f),e(d,T)},d(l){l&&t(d)}}}function Ws(C){let d,w,u,f,T;return f=new fr({props:{code:`from transformers import AutoFeatureExtractor, ViTMAEForPreTraining from PIL import Image import requests url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/vit-mae-base") model = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base") inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) loss = outputs.loss mask = outputs.mask ids_restore = outputs.ids_restore`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoFeatureExtractor, ViTMAEForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = AutoFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;facebook/vit-mae-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = ViTMAEForPreTraining.from_pretrained(<span class="hljs-string">&quot;facebook/vit-mae-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>mask = outputs.mask <span class="hljs-meta">&gt;&gt;&gt; </span>ids_restore = outputs.ids_restore`}}),{c(){d=a("p"),w=r("Examples:"),u=p(),b(f.$$.fragment)},l(l){d=s(l,"P",{});var c=i(d);w=n(c,"Examples:"),c.forEach(t),u=m(l),$(f.$$.fragment,l)},m(l,c){g(l,d,c),e(d,w),g(l,u,c),M(f,l,c),T=!0},p:ur,i(l){T||(A(f.$$.fragment,l),T=!0)},o(l){k(f.$$.fragment,l),T=!1},d(l){l&&t(d),l&&t(u),x(f,l)}}}function Rs(C){let d,w,u,f,T,l,c,F,Fe,Te,I,Y,Z,E,Ve,W,Pe,we,L,je,ee,te,Ce,Ee,K,qe,ye,H,fe,Ie,de,j,z,be,V,oe,ze,U,Oe,De,O,R,re,ce,Ne,ne,ae,Le,B,Se,D,Ke,J,N,He,q,Ue,We;return{c(){d=a("p"),w=r("TensorFlow models and layers in "),u=a("code"),f=r("transformers"),T=r(" accept two formats as input:"),l=p(),c=a("ul"),F=a("li"),Fe=r("having all inputs as keyword arguments (like PyTorch models), or"),Te=p(),I=a("li"),Y=r("having all inputs as a list, tuple or dict in the first positional argument."),Z=p(),E=a("p"),Ve=r(`The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `),W=a("code"),Pe=r("model.fit()"),we=r(` things should \u201Cjust work\u201D for you - just pass your inputs and labels in any format that `),L=a("code"),je=r("model.fit()"),ee=r(` supports! If, however, you want to use the second format outside of Keras methods like `),te=a("code"),Ce=r("fit()"),Ee=r(" and "),K=a("code"),qe=r("predict()"),ye=r(`, such as when creating your own layers or models with the Keras `),H=a("code"),fe=r("Functional"),Ie=r(` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument:`),de=p(),j=a("ul"),z=a("li"),be=r("a single Tensor with "),V=a("code"),oe=r("pixel_values"),ze=r(" only and nothing else: "),U=a("code"),Oe=r("model(pixel_values)"),De=p(),O=a("li"),R=r(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),re=a("code"),ce=r("model([pixel_values, attention_mask])"),Ne=r(" or "),ne=a("code"),ae=r("model([pixel_values, attention_mask, token_type_ids])"),Le=p(),B=a("li"),Se=r(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),D=a("code"),Ke=r('model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})'),J=p(),N=a("p"),He=r(`Note that when creating models and layers with `),q=a("a"),Ue=r("subclassing"),We=r(` then you don\u2019t need to worry about any of this, as you can just pass inputs like you would to any other Python function!`),this.h()},l(v){d=s(v,"P",{});var y=i(d);w=n(y,"TensorFlow models and layers in "),u=s(y,"CODE",{});var Qe=i(u);f=n(Qe,"transformers"),Qe.forEach(t),T=n(y," accept two formats as input:"),y.forEach(t),l=m(v),c=s(v,"UL",{});var Q=i(c);F=s(Q,"LI",{});var Ze=i(F);Fe=n(Ze,"having all inputs as keyword arguments (like PyTorch models), or"),Ze.forEach(t),Te=m(Q),I=s(Q,"LI",{});var et=i(I);Y=n(et,"having all inputs as a list, tuple or dict in the first positional argument."),et.forEach(t),Q.forEach(t),Z=m(v),E=s(v,"P",{});var P=i(E);Ve=n(P,`The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `),W=s(P,"CODE",{});var ge=i(W);Pe=n(ge,"model.fit()"),ge.forEach(t),we=n(P,` things should \u201Cjust work\u201D for you - just pass your inputs and labels in any format that `),L=s(P,"CODE",{});var tt=i(L);je=n(tt,"model.fit()"),tt.forEach(t),ee=n(P,` supports! If, however, you want to use the second format outside of Keras methods like `),te=s(P,"CODE",{});var $e=i(te);Ce=n($e,"fit()"),$e.forEach(t),Ee=n(P," and "),K=s(P,"CODE",{});var ot=i(K);qe=n(ot,"predict()"),ot.forEach(t),ye=n(P,`, such as when creating your own layers or models with the Keras `),H=s(P,"CODE",{});var rt=i(H);fe=n(rt,"Functional"),rt.forEach(t),Ie=n(P,` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument:`),P.forEach(t),de=m(v),j=s(v,"UL",{});var G=i(j);z=s(G,"LI",{});var X=i(z);be=n(X,"a single Tensor with "),V=s(X,"CODE",{});var nt=i(V);oe=n(nt,"pixel_values"),nt.forEach(t),ze=n(X," only and nothing else: "),U=s(X,"CODE",{});var at=i(U);Oe=n(at,"model(pixel_values)"),at.forEach(t),X.forEach(t),De=m(G),O=s(G,"LI",{});var S=i(O);R=n(S,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),re=s(S,"CODE",{});var st=i(re);ce=n(st,"model([pixel_values, attention_mask])"),st.forEach(t),Ne=n(S," or "),ne=s(S,"CODE",{});var _e=i(ne);ae=n(_e,"model([pixel_values, attention_mask, token_type_ids])"),_e.forEach(t),S.forEach(t),Le=m(G),B=s(G,"LI",{});var Re=i(B);Se=n(Re,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),D=s(Re,"CODE",{});var it=i(D);Ke=n(it,'model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})'),it.forEach(t),Re.forEach(t),G.forEach(t),J=m(v),N=s(v,"P",{});var he=i(N);He=n(he,`Note that when creating models and layers with `),q=s(he,"A",{href:!0,rel:!0});var pe=i(q);Ue=n(pe,"subclassing"),pe.forEach(t),We=n(he,` then you don\u2019t need to worry about any of this, as you can just pass inputs like you would to any other Python function!`),he.forEach(t),this.h()},h(){h(q,"href","https://keras.io/guides/making_new_layers_and_models_via_subclassing/"),h(q,"rel","nofollow")},m(v,y){g(v,d,y),e(d,w),e(d,u),e(u,f),e(d,T),g(v,l,y),g(v,c,y),e(c,F),e(F,Fe),e(c,Te),e(c,I),e(I,Y),g(v,Z,y),g(v,E,y),e(E,Ve),e(E,W),e(W,Pe),e(E,we),e(E,L),e(L,je),e(E,ee),e(E,te),e(te,Ce),e(E,Ee),e(E,K),e(K,qe),e(E,ye),e(E,H),e(H,fe),e(E,Ie),g(v,de,y),g(v,j,y),e(j,z),e(z,be),e(z,V),e(V,oe),e(z,ze),e(z,U),e(U,Oe),e(j,De),e(j,O),e(O,R),e(O,re),e(re,ce),e(O,Ne),e(O,ne),e(ne,ae),e(j,Le),e(j,B),e(B,Se),e(B,D),e(D,Ke),g(v,J,y),g(v,N,y),e(N,He),e(N,q),e(q,Ue),e(N,We)},d(v){v&&t(d),v&&t(l),v&&t(c),v&&t(Z),v&&t(E),v&&t(de),v&&t(j),v&&t(J),v&&t(N)}}}function Bs(C){let d,w,u,f,T;return{c(){d=a("p"),w=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=a("code"),f=r("Module"),T=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(l){d=s(l,"P",{});var c=i(d);w=n(c,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(c,"CODE",{});var F=i(u);f=n(F,"Module"),F.forEach(t),T=n(c,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),c.forEach(t)},m(l,c){g(l,d,c),e(d,w),e(d,u),e(u,f),e(d,T)},d(l){l&&t(d)}}}function Gs(C){let d,w,u,f,T;return f=new fr({props:{code:`from transformers import AutoFeatureExtractor, TFViTMAEModel from PIL import Image import requests url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/vit-mae-base") model = TFViTMAEModel.from_pretrained("facebook/vit-mae-base") inputs = feature_extractor(images=image, return_tensors="tf") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoFeatureExtractor, TFViTMAEModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = AutoFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;facebook/vit-mae-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFViTMAEModel.from_pretrained(<span class="hljs-string">&quot;facebook/vit-mae-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),{c(){d=a("p"),w=r("Examples:"),u=p(),b(f.$$.fragment)},l(l){d=s(l,"P",{});var c=i(d);w=n(c,"Examples:"),c.forEach(t),u=m(l),$(f.$$.fragment,l)},m(l,c){g(l,d,c),e(d,w),g(l,u,c),M(f,l,c),T=!0},p:ur,i(l){T||(A(f.$$.fragment,l),T=!0)},o(l){k(f.$$.fragment,l),T=!1},d(l){l&&t(d),l&&t(u),x(f,l)}}}function Xs(C){let d,w,u,f,T,l,c,F,Fe,Te,I,Y,Z,E,Ve,W,Pe,we,L,je,ee,te,Ce,Ee,K,qe,ye,H,fe,Ie,de,j,z,be,V,oe,ze,U,Oe,De,O,R,re,ce,Ne,ne,ae,Le,B,Se,D,Ke,J,N,He,q,Ue,We;return{c(){d=a("p"),w=r("TensorFlow models and layers in "),u=a("code"),f=r("transformers"),T=r(" accept two formats as input:"),l=p(),c=a("ul"),F=a("li"),Fe=r("having all inputs as keyword arguments (like PyTorch models), or"),Te=p(),I=a("li"),Y=r("having all inputs as a list, tuple or dict in the first positional argument."),Z=p(),E=a("p"),Ve=r(`The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `),W=a("code"),Pe=r("model.fit()"),we=r(` things should \u201Cjust work\u201D for you - just pass your inputs and labels in any format that `),L=a("code"),je=r("model.fit()"),ee=r(` supports! If, however, you want to use the second format outside of Keras methods like `),te=a("code"),Ce=r("fit()"),Ee=r(" and "),K=a("code"),qe=r("predict()"),ye=r(`, such as when creating your own layers or models with the Keras `),H=a("code"),fe=r("Functional"),Ie=r(` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument:`),de=p(),j=a("ul"),z=a("li"),be=r("a single Tensor with "),V=a("code"),oe=r("pixel_values"),ze=r(" only and nothing else: "),U=a("code"),Oe=r("model(pixel_values)"),De=p(),O=a("li"),R=r(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),re=a("code"),ce=r("model([pixel_values, attention_mask])"),Ne=r(" or "),ne=a("code"),ae=r("model([pixel_values, attention_mask, token_type_ids])"),Le=p(),B=a("li"),Se=r(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),D=a("code"),Ke=r('model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})'),J=p(),N=a("p"),He=r(`Note that when creating models and layers with `),q=a("a"),Ue=r("subclassing"),We=r(` then you don\u2019t need to worry about any of this, as you can just pass inputs like you would to any other Python function!`),this.h()},l(v){d=s(v,"P",{});var y=i(d);w=n(y,"TensorFlow models and layers in "),u=s(y,"CODE",{});var Qe=i(u);f=n(Qe,"transformers"),Qe.forEach(t),T=n(y," accept two formats as input:"),y.forEach(t),l=m(v),c=s(v,"UL",{});var Q=i(c);F=s(Q,"LI",{});var Ze=i(F);Fe=n(Ze,"having all inputs as keyword arguments (like PyTorch models), or"),Ze.forEach(t),Te=m(Q),I=s(Q,"LI",{});var et=i(I);Y=n(et,"having all inputs as a list, tuple or dict in the first positional argument."),et.forEach(t),Q.forEach(t),Z=m(v),E=s(v,"P",{});var P=i(E);Ve=n(P,`The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `),W=s(P,"CODE",{});var ge=i(W);Pe=n(ge,"model.fit()"),ge.forEach(t),we=n(P,` things should \u201Cjust work\u201D for you - just pass your inputs and labels in any format that `),L=s(P,"CODE",{});var tt=i(L);je=n(tt,"model.fit()"),tt.forEach(t),ee=n(P,` supports! If, however, you want to use the second format outside of Keras methods like `),te=s(P,"CODE",{});var $e=i(te);Ce=n($e,"fit()"),$e.forEach(t),Ee=n(P," and "),K=s(P,"CODE",{});var ot=i(K);qe=n(ot,"predict()"),ot.forEach(t),ye=n(P,`, such as when creating your own layers or models with the Keras `),H=s(P,"CODE",{});var rt=i(H);fe=n(rt,"Functional"),rt.forEach(t),Ie=n(P,` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument:`),P.forEach(t),de=m(v),j=s(v,"UL",{});var G=i(j);z=s(G,"LI",{});var X=i(z);be=n(X,"a single Tensor with "),V=s(X,"CODE",{});var nt=i(V);oe=n(nt,"pixel_values"),nt.forEach(t),ze=n(X," only and nothing else: "),U=s(X,"CODE",{});var at=i(U);Oe=n(at,"model(pixel_values)"),at.forEach(t),X.forEach(t),De=m(G),O=s(G,"LI",{});var S=i(O);R=n(S,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),re=s(S,"CODE",{});var st=i(re);ce=n(st,"model([pixel_values, attention_mask])"),st.forEach(t),Ne=n(S," or "),ne=s(S,"CODE",{});var _e=i(ne);ae=n(_e,"model([pixel_values, attention_mask, token_type_ids])"),_e.forEach(t),S.forEach(t),Le=m(G),B=s(G,"LI",{});var Re=i(B);Se=n(Re,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),D=s(Re,"CODE",{});var it=i(D);Ke=n(it,'model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})'),it.forEach(t),Re.forEach(t),G.forEach(t),J=m(v),N=s(v,"P",{});var he=i(N);He=n(he,`Note that when creating models and layers with `),q=s(he,"A",{href:!0,rel:!0});var pe=i(q);Ue=n(pe,"subclassing"),pe.forEach(t),We=n(he,` then you don\u2019t need to worry about any of this, as you can just pass inputs like you would to any other Python function!`),he.forEach(t),this.h()},h(){h(q,"href","https://keras.io/guides/making_new_layers_and_models_via_subclassing/"),h(q,"rel","nofollow")},m(v,y){g(v,d,y),e(d,w),e(d,u),e(u,f),e(d,T),g(v,l,y),g(v,c,y),e(c,F),e(F,Fe),e(c,Te),e(c,I),e(I,Y),g(v,Z,y),g(v,E,y),e(E,Ve),e(E,W),e(W,Pe),e(E,we),e(E,L),e(L,je),e(E,ee),e(E,te),e(te,Ce),e(E,Ee),e(E,K),e(K,qe),e(E,ye),e(E,H),e(H,fe),e(E,Ie),g(v,de,y),g(v,j,y),e(j,z),e(z,be),e(z,V),e(V,oe),e(z,ze),e(z,U),e(U,Oe),e(j,De),e(j,O),e(O,R),e(O,re),e(re,ce),e(O,Ne),e(O,ne),e(ne,ae),e(j,Le),e(j,B),e(B,Se),e(B,D),e(D,Ke),g(v,J,y),g(v,N,y),e(N,He),e(N,q),e(q,Ue),e(N,We)},d(v){v&&t(d),v&&t(l),v&&t(c),v&&t(Z),v&&t(E),v&&t(de),v&&t(j),v&&t(J),v&&t(N)}}}function Ys(C){let d,w,u,f,T;return{c(){d=a("p"),w=r("Although the recipe for forward pass needs to be defined within this function, one should call the "),u=a("code"),f=r("Module"),T=r(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(l){d=s(l,"P",{});var c=i(d);w=n(c,"Although the recipe for forward pass needs to be defined within this function, one should call the "),u=s(c,"CODE",{});var F=i(u);f=n(F,"Module"),F.forEach(t),T=n(c,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),c.forEach(t)},m(l,c){g(l,d,c),e(d,w),e(d,u),e(u,f),e(d,T)},d(l){l&&t(d)}}}function Js(C){let d,w,u,f,T;return f=new fr({props:{code:`from transformers import AutoFeatureExtractor, TFViTMAEForPreTraining from PIL import Image import requests url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/vit-mae-base") model = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base") inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) loss = outputs.loss mask = outputs.mask ids_restore = outputs.ids_restore`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoFeatureExtractor, TFViTMAEForPreTraining <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests <span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = AutoFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;facebook/vit-mae-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFViTMAEForPreTraining.from_pretrained(<span class="hljs-string">&quot;facebook/vit-mae-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(images=image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>mask = outputs.mask <span class="hljs-meta">&gt;&gt;&gt; </span>ids_restore = outputs.ids_restore`}}),{c(){d=a("p"),w=r("Examples:"),u=p(),b(f.$$.fragment)},l(l){d=s(l,"P",{});var c=i(d);w=n(c,"Examples:"),c.forEach(t),u=m(l),$(f.$$.fragment,l)},m(l,c){g(l,d,c),e(d,w),g(l,u,c),M(f,l,c),T=!0},p:ur,i(l){T||(A(f.$$.fragment,l),T=!0)},o(l){k(f.$$.fragment,l),T=!1},d(l){l&&t(d),l&&t(u),x(f,l)}}}function Qs(C){let d,w,u,f,T,l,c,F,Fe,Te,I,Y,Z,E,Ve,W,Pe,we,L,je,ee,te,Ce,Ee,K,qe,ye,H,fe,Ie,de,j,z,be,V,oe,ze,U,Oe,De,O,R,re,ce,Ne,ne,ae,Le,B,Se,D,Ke,J,N,He,q,Ue,We,v,y,Qe,Q,Ze,et,P,ge,tt,$e,ot,rt,G,X,nt,at,S,st,_e,Re,it,he,pe,Oa,gr,Kt,Ur,$o,Wr,_r,me,Rr,Ht,Br,Gr,Ut,Xr,Yr,Wt,Jr,Qr,Rt,Zr,en,vr,lt,wt,Ho,Bt,tn,Uo,on,Tr,ve,Gt,rn,dt,nn,Mo,an,sn,Xt,ln,dn,cn,ct,hn,Ao,pn,mn,ko,un,fn,gn,Et,wr,ht,yt,Wo,Yt,_n,Ro,vn,Er,Be,Jt,Tn,Qt,wn,Zt,En,yn,bn,Me,eo,$n,pt,Mn,xo,An,kn,Bo,xn,Fn,Vn,bt,Pn,$t,yr,mt,Mt,Go,to,jn,Xo,Cn,br,se,oo,qn,Yo,In,zn,At,On,ro,Dn,no,Nn,Ln,Sn,Ae,ao,Kn,ut,Hn,Fo,Un,Wn,Jo,Rn,Bn,Gn,kt,Xn,xt,$r,ft,Ft,Qo,so,Yn,Zo,Jn,Mr,ie,io,Qn,lo,Zn,Vo,ea,ta,oa,co,ra,ho,na,aa,sa,Vt,ia,ke,po,la,gt,da,Po,ca,ha,er,pa,ma,ua,Pt,fa,jt,Ar,_t,Ct,tr,mo,ga,or,_a,kr,le,uo,va,fo,Ta,jo,wa,Ea,ya,go,ba,_o,$a,Ma,Aa,qt,ka,xe,vo,xa,vt,Fa,Co,Va,Pa,rr,ja,Ca,qa,It,Ia,zt,xr;return l=new bo({}),E=new bo({}),Bt=new bo({}),Gt=new Tt({props:{name:"class transformers.ViTMAEConfig",anchor:"transformers.ViTMAEConfig",parameters:[{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.0"},{name:"attention_probs_dropout_prob",val:" = 0.0"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"is_encoder_decoder",val:" = False"},{name:"image_size",val:" = 224"},{name:"patch_size",val:" = 16"},{name:"num_channels",val:" = 3"},{name:"qkv_bias",val:" = True"},{name:"decoder_num_attention_heads",val:" = 16"},{name:"decoder_hidden_size",val:" = 512"},{name:"decoder_num_hidden_layers",val:" = 8"},{name:"decoder_intermediate_size",val:" = 2048"},{name:"mask_ratio",val:" = 0.75"},{name:"norm_pix_loss",val:" = False"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.ViTMAEConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.ViTMAEConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.ViTMAEConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.ViTMAEConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.ViTMAEConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.ViTMAEConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.ViTMAEConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.ViTMAEConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.ViTMAEConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.ViTMAEConfig.image_size",description:`<strong>image_size</strong> (<code>int</code>, <em>optional</em>, defaults to 224) &#x2014; The size (resolution) of each image.`,name:"image_size"},{anchor:"transformers.ViTMAEConfig.patch_size",description:`<strong>patch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; The size (resolution) of each patch.`,name:"patch_size"},{anchor:"transformers.ViTMAEConfig.num_channels",description:`<strong>num_channels</strong> (<code>int</code>, <em>optional</em>, defaults to 3) &#x2014; The number of input channels.`,name:"num_channels"},{anchor:"transformers.ViTMAEConfig.qkv_bias",description:`<strong>qkv_bias</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to add a bias to the queries, keys and values.`,name:"qkv_bias"},{anchor:"transformers.ViTMAEConfig.decoder_num_attention_heads",description:`<strong>decoder_num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the decoder.`,name:"decoder_num_attention_heads"},{anchor:"transformers.ViTMAEConfig.decoder_hidden_size",description:`<strong>decoder_hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; Dimensionality of the decoder.`,name:"decoder_hidden_size"},{anchor:"transformers.ViTMAEConfig.decoder_num_hidden_layers",description:`<strong>decoder_num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; Number of hidden layers in the decoder.`,name:"decoder_num_hidden_layers"},{anchor:"transformers.ViTMAEConfig.decoder_intermediate_size",description:`<strong>decoder_intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2048) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the decoder.`,name:"decoder_intermediate_size"},{anchor:"transformers.ViTMAEConfig.mask_ratio",description:`<strong>mask_ratio</strong> (<code>float</code>, <em>optional</em>, defaults to 0.75) &#x2014; The ratio of the number of masked tokens in the input sequence.`,name:"mask_ratio"},{anchor:"transformers.ViTMAEConfig.norm_pix_loss",description:`<strong>norm_pix_loss</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to train with normalized pixels (see Table 3 in the paper). Using normalized pixels improved representation quality in the experiments of the authors.`,name:"norm_pix_loss"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/vit_mae/configuration_vit_mae.py#L29"}}),Et=new mr({props:{anchor:"transformers.ViTMAEConfig.example",$$slots:{default:[Ls]},$$scope:{ctx:C}}}),Yt=new bo({}),Jt=new Tt({props:{name:"class transformers.ViTMAEModel",anchor:"transformers.ViTMAEModel",parameters:[{name:"config",val:""}],parametersDescription:[{anchor:"transformers.ViTMAEModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/model_doc/vit_mae#transformers.ViTMAEConfig">ViTMAEConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/vit_mae/modeling_vit_mae.py#L639"}}),eo=new Tt({props:{name:"forward",anchor:"transformers.ViTMAEModel.forward",parameters:[{name:"pixel_values",val:": typing.Optional[torch.FloatTensor] = None"},{name:"noise",val:": typing.Optional[torch.FloatTensor] = None"},{name:"head_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],parametersDescription:[{anchor:"transformers.ViTMAEModel.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoFeatureExtractor">AutoFeatureExtractor</a>. See <code>AutoFeatureExtractor.__call__()</code> for details.`,name:"pixel_values"},{anchor:"transformers.ViTMAEModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.ViTMAEModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.ViTMAEModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.ViTMAEModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/vit_mae/modeling_vit_mae.py#L663",returnDescription:` <p>A <code>transformers.models.vit_mae.modeling_vit_mae.ViTMAEModelOutput</code> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/vit_mae#transformers.ViTMAEConfig" >ViTMAEConfig</a>) and inputs.</p> <ul> <li><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</li> <li><strong>mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Tensor indicating which patches are masked (1) and which are not (0).</li> <li><strong>ids_restore</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Tensor containing the original index of the (shuffled) masked patches.</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> `,returnType:` <p><code>transformers.models.vit_mae.modeling_vit_mae.ViTMAEModelOutput</code> or <code>tuple(torch.FloatTensor)</code></p> `}}),bt=new yo({props:{$$slots:{default:[Ss]},$$scope:{ctx:C}}}),$t=new mr({props:{anchor:"transformers.ViTMAEModel.forward.example",$$slots:{default:[Ks]},$$scope:{ctx:C}}}),to=new bo({}),oo=new Tt({props:{name:"class transformers.ViTMAEForPreTraining",anchor:"transformers.ViTMAEForPreTraining",parameters:[{name:"config",val:""}],parametersDescription:[{anchor:"transformers.ViTMAEForPreTraining.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/model_doc/vit_mae#transformers.ViTMAEConfig">ViTMAEConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/vit_mae/modeling_vit_mae.py#L851"}}),At=new yo({props:{$$slots:{default:[Hs]},$$scope:{ctx:C}}}),ao=new Tt({props:{name:"forward",anchor:"transformers.ViTMAEForPreTraining.forward",parameters:[{name:"pixel_values",val:": typing.Optional[torch.FloatTensor] = None"},{name:"noise",val:": typing.Optional[torch.FloatTensor] = None"},{name:"head_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],parametersDescription:[{anchor:"transformers.ViTMAEForPreTraining.forward.pixel_values",description:`<strong>pixel_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoFeatureExtractor">AutoFeatureExtractor</a>. See <code>AutoFeatureExtractor.__call__()</code> for details.`,name:"pixel_values"},{anchor:"transformers.ViTMAEForPreTraining.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.ViTMAEForPreTraining.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.ViTMAEForPreTraining.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.ViTMAEForPreTraining.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/vit_mae/modeling_vit_mae.py#L964",returnDescription:` <p>A <code>transformers.models.vit_mae.modeling_vit_mae.ViTMAEForPreTrainingOutput</code> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/vit_mae#transformers.ViTMAEConfig" >ViTMAEConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>) \u2014 Pixel reconstruction loss.</li> <li><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, patch_size ** 2 * num_channels)</code>) \u2014 Pixel reconstruction logits.</li> <li><strong>mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Tensor indicating which patches are masked (1) and which are not (0).</li> <li><strong>ids_restore</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Tensor containing the original index of the (shuffled) masked patches.</li> <li><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> `,returnType:` <p><code>transformers.models.vit_mae.modeling_vit_mae.ViTMAEForPreTrainingOutput</code> or <code>tuple(torch.FloatTensor)</code></p> `}}),kt=new yo({props:{$$slots:{default:[Us]},$$scope:{ctx:C}}}),xt=new mr({props:{anchor:"transformers.ViTMAEForPreTraining.forward.example",$$slots:{default:[Ws]},$$scope:{ctx:C}}}),so=new bo({}),io=new Tt({props:{name:"class transformers.TFViTMAEModel",anchor:"transformers.TFViTMAEModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.TFViTMAEModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/model_doc/vit_mae#transformers.ViTMAEConfig">ViTMAEConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/vit_mae/modeling_tf_vit_mae.py#L805"}}),Vt=new yo({props:{$$slots:{default:[Rs]},$$scope:{ctx:C}}}),po=new Tt({props:{name:"call",anchor:"transformers.TFViTMAEModel.call",parameters:[{name:"pixel_values",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"noise",val:": Tensor = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"training",val:": bool = False"}],parametersDescription:[{anchor:"transformers.TFViTMAEModel.call.pixel_values",description:`<strong>pixel_values</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoFeatureExtractor">AutoFeatureExtractor</a>. See <code>AutoFeatureExtractor.__call__()</code> for details.`,name:"pixel_values"},{anchor:"transformers.TFViTMAEModel.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFViTMAEModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFViTMAEModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFViTMAEModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFViTMAEModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/vit_mae/modeling_tf_vit_mae.py#L814",returnDescription:` <p>A <code>transformers.models.vit_mae.modeling_tf_vit_mae.TFViTMAEModelOutput</code> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/vit_mae#transformers.ViTMAEConfig" >ViTMAEConfig</a>) and inputs.</p> <ul> <li><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</li> <li><strong>mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Tensor indicating which patches are masked (1) and which are not (0).</li> <li><strong>ids_restore</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Tensor containing the original index of the (shuffled) masked patches.</li> <li><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> `,returnType:` <p><code>transformers.models.vit_mae.modeling_tf_vit_mae.TFViTMAEModelOutput</code> or <code>tuple(tf.Tensor)</code></p> `}}),Pt=new yo({props:{$$slots:{default:[Bs]},$$scope:{ctx:C}}}),jt=new mr({props:{anchor:"transformers.TFViTMAEModel.call.example",$$slots:{default:[Gs]},$$scope:{ctx:C}}}),mo=new bo({}),uo=new Tt({props:{name:"class transformers.TFViTMAEForPreTraining",anchor:"transformers.TFViTMAEForPreTraining",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.TFViTMAEForPreTraining.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/model_doc/vit_mae#transformers.ViTMAEConfig">ViTMAEConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/vit_mae/modeling_tf_vit_mae.py#L978"}}),qt=new yo({props:{$$slots:{default:[Xs]},$$scope:{ctx:C}}}),vo=new Tt({props:{name:"call",anchor:"transformers.TFViTMAEForPreTraining.call",parameters:[{name:"pixel_values",val:": typing.Union[typing.List[tensorflow.python.framework.ops.Tensor], typing.List[numpy.ndarray], typing.List[tensorflow.python.keras.engine.keras_tensor.KerasTensor], typing.Dict[str, tensorflow.python.framework.ops.Tensor], typing.Dict[str, numpy.ndarray], typing.Dict[str, tensorflow.python.keras.engine.keras_tensor.KerasTensor], tensorflow.python.framework.ops.Tensor, numpy.ndarray, tensorflow.python.keras.engine.keras_tensor.KerasTensor, NoneType] = None"},{name:"noise",val:": Tensor = None"},{name:"head_mask",val:": typing.Union[numpy.ndarray, tensorflow.python.framework.ops.Tensor, NoneType] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"training",val:": bool = False"}],parametersDescription:[{anchor:"transformers.TFViTMAEForPreTraining.call.pixel_values",description:`<strong>pixel_values</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>(batch_size, num_channels, height, width)</code>) &#x2014; Pixel values. Pixel values can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoFeatureExtractor">AutoFeatureExtractor</a>. See <code>AutoFeatureExtractor.__call__()</code> for details.`,name:"pixel_values"},{anchor:"transformers.TFViTMAEForPreTraining.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFViTMAEForPreTraining.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFViTMAEForPreTraining.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFViTMAEForPreTraining.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFViTMAEForPreTraining.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/vit_mae/modeling_tf_vit_mae.py#L1105",returnDescription:` <p>A <code>transformers.models.vit_mae.modeling_tf_vit_mae.TFViTMAEForPreTrainingOutput</code> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/vit_mae#transformers.ViTMAEConfig" >ViTMAEConfig</a>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>) \u2014 Pixel reconstruction loss.</li> <li><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, patch_size ** 2 * num_channels)</code>) \u2014 Pixel reconstruction logits.</li> <li><strong>mask</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Tensor indicating which patches are masked (1) and which are not (0).</li> <li><strong>ids_restore</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Tensor containing the original index of the (shuffled) masked patches.</li> <li><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states of the model at the output of each layer plus the initial embedding outputs.</li> <li><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</li> </ul> `,returnType:` <p><code>transformers.models.vit_mae.modeling_tf_vit_mae.TFViTMAEForPreTrainingOutput</code> or <code>tuple(tf.Tensor)</code></p> `}}),It=new yo({props:{$$slots:{default:[Ys]},$$scope:{ctx:C}}}),zt=new mr({props:{anchor:"transformers.TFViTMAEForPreTraining.call.example",$$slots:{default:[Js]},$$scope:{ctx:C}}}),{c(){d=a("meta"),w=p(),u=a("h1"),f=a("a"),T=a("span"),b(l.$$.fragment),c=p(),F=a("span"),Fe=r("ViTMAE"),Te=p(),I=a("h2"),Y=a("a"),Z=a("span"),b(E.$$.fragment),Ve=p(),W=a("span"),Pe=r("Overview"),we=p(),L=a("p"),je=r("The ViTMAE model was proposed in "),ee=a("a"),te=r("Masked Autoencoders Are Scalable Vision Learners"),Ce=r(` by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Doll\xE1r, Ross Girshick. The paper shows that, by pre-training a Vision Transformer (ViT) to reconstruct pixel values for masked patches, one can get results after fine-tuning that outperform supervised pre-training.`),Ee=p(),K=a("p"),qe=r("The abstract from the paper is the following:"),ye=p(),H=a("p"),fe=a("em"),Ie=r(`This paper shows that masked autoencoders (MAE) are scalable self-supervised learners for computer vision. Our MAE approach is simple: we mask random patches of the input image and reconstruct the missing pixels. It is based on two core designs. First, we develop an asymmetric encoder-decoder architecture, with an encoder that operates only on the visible subset of patches (without mask tokens), along with a lightweight decoder that reconstructs the original image from the latent representation and mask tokens. Second, we find that masking a high proportion of the input image, e.g., 75%, yields a nontrivial and meaningful self-supervisory task. Coupling these two designs enables us to train large models efficiently and effectively: we accelerate training (by 3x or more) and improve accuracy. Our scalable approach allows for learning high-capacity models that generalize well: e.g., a vanilla ViT-Huge model achieves the best accuracy (87.8%) among methods that use only ImageNet-1K data. Transfer performance in downstream tasks outperforms supervised pre-training and shows promising scaling behavior.`),de=p(),j=a("p"),z=r("Tips:"),be=p(),V=a("ul"),oe=a("li"),ze=r(`MAE (masked auto encoding) is a method for self-supervised pre-training of Vision Transformers (ViTs). The pre-training objective is relatively simple: by masking a large portion (75%) of the image patches, the model must reconstruct raw pixel values. One can use `),U=a("a"),Oe=r("ViTMAEForPreTraining"),De=r(" for this purpose."),O=p(),R=a("li"),re=r("An example Python script that illustrates how to pre-train "),ce=a("a"),Ne=r("ViTMAEForPreTraining"),ne=r(" from scratch can be found "),ae=a("a"),Le=r("here"),B=r(`. One can easily tweak it for their own use case.`),Se=p(),D=a("li"),Ke=r("A notebook that illustrates how to visualize reconstructed pixel values with "),J=a("a"),N=r("ViTMAEForPreTraining"),He=r(" can be found "),q=a("a"),Ue=r("here"),We=r("."),v=p(),y=a("li"),Qe=r(`After pre-training, one \u201Cthrows away\u201D the decoder used to reconstruct pixels, and one uses the encoder for fine-tuning/linear probing. This means that after fine-tuning, one can directly plug in the weights into a `),Q=a("a"),Ze=r("ViTForImageClassification"),et=r("."),P=p(),ge=a("li"),tt=r("One can use "),$e=a("a"),ot=r("ViTFeatureExtractor"),rt=r(" to prepare images for the model. See the code examples for more info."),G=p(),X=a("li"),nt=r(`Note that the encoder of MAE is only used to encode the visual patches. The encoded patches are then concatenated with mask tokens, which the decoder (which also consists of Transformer blocks) takes as input. Each mask token is a shared, learned vector that indicates the presence of a missing patch to be predicted. Fixed sin/cos position embeddings are added both to the input of the encoder and the decoder.`),at=p(),S=a("li"),st=r("For a visual understanding of how MAEs work you can check out this "),_e=a("a"),Re=r("post"),it=r("."),he=p(),pe=a("img"),gr=p(),Kt=a("small"),Ur=r("MAE architecture. Taken from the "),$o=a("a"),Wr=r("original paper."),_r=p(),me=a("p"),Rr=r("This model was contributed by "),Ht=a("a"),Br=r("nielsr"),Gr=r(". TensorFlow version of the model was contributed by "),Ut=a("a"),Xr=r("sayakpaul"),Yr=r(` and `),Wt=a("a"),Jr=r("ariG23498"),Qr=r(" (equal contribution). The original code can be found "),Rt=a("a"),Zr=r("here"),en=r("."),vr=p(),lt=a("h2"),wt=a("a"),Ho=a("span"),b(Bt.$$.fragment),tn=p(),Uo=a("span"),on=r("ViTMAEConfig"),Tr=p(),ve=a("div"),b(Gt.$$.fragment),rn=p(),dt=a("p"),nn=r("This is the configuration class to store the configuration of a "),Mo=a("a"),an=r("ViTMAEModel"),sn=r(`. It is used to instantiate an ViT MAE model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ViT `),Xt=a("a"),ln=r("facebook/vit-mae-base"),dn=r(" architecture."),cn=p(),ct=a("p"),hn=r("Configuration objects inherit from "),Ao=a("a"),pn=r("PretrainedConfig"),mn=r(` and can be used to control the model outputs. Read the documentation from `),ko=a("a"),un=r("PretrainedConfig"),fn=r(" for more information."),gn=p(),b(Et.$$.fragment),wr=p(),ht=a("h2"),yt=a("a"),Wo=a("span"),b(Yt.$$.fragment),_n=p(),Ro=a("span"),vn=r("ViTMAEModel"),Er=p(),Be=a("div"),b(Jt.$$.fragment),Tn=p(),Qt=a("p"),wn=r(`The bare ViTMAE Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch `),Zt=a("a"),En=r("torch.nn.Module"),yn=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),bn=p(),Me=a("div"),b(eo.$$.fragment),$n=p(),pt=a("p"),Mn=r("The "),xo=a("a"),An=r("ViTMAEModel"),kn=r(" forward method, overrides the "),Bo=a("code"),xn=r("__call__"),Fn=r(" special method."),Vn=p(),b(bt.$$.fragment),Pn=p(),b($t.$$.fragment),yr=p(),mt=a("h2"),Mt=a("a"),Go=a("span"),b(to.$$.fragment),jn=p(),Xo=a("span"),Cn=r("ViTMAEForPreTraining"),br=p(),se=a("div"),b(oo.$$.fragment),qn=p(),Yo=a("p"),In=r("The ViTMAE Model transformer with the decoder on top for self-supervised pre-training."),zn=p(),b(At.$$.fragment),On=p(),ro=a("p"),Dn=r("This model is a PyTorch "),no=a("a"),Nn=r("torch.nn.Module"),Ln=r(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Sn=p(),Ae=a("div"),b(ao.$$.fragment),Kn=p(),ut=a("p"),Hn=r("The "),Fo=a("a"),Un=r("ViTMAEForPreTraining"),Wn=r(" forward method, overrides the "),Jo=a("code"),Rn=r("__call__"),Bn=r(" special method."),Gn=p(),b(kt.$$.fragment),Xn=p(),b(xt.$$.fragment),$r=p(),ft=a("h2"),Ft=a("a"),Qo=a("span"),b(so.$$.fragment),Yn=p(),Zo=a("span"),Jn=r("TFViTMAEModel"),Mr=p(),ie=a("div"),b(io.$$.fragment),Qn=p(),lo=a("p"),Zn=r(`The bare ViTMAE Model transformer outputting raw hidden-states without any specific head on top. This model inherits from `),Vo=a("a"),ea=r("TFPreTrainedModel"),ta=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),oa=p(),co=a("p"),ra=r("This model is also a "),ho=a("a"),na=r("tf.keras.Model"),aa=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),sa=p(),b(Vt.$$.fragment),ia=p(),ke=a("div"),b(po.$$.fragment),la=p(),gt=a("p"),da=r("The "),Po=a("a"),ca=r("TFViTMAEModel"),ha=r(" forward method, overrides the "),er=a("code"),pa=r("__call__"),ma=r(" special method."),ua=p(),b(Pt.$$.fragment),fa=p(),b(jt.$$.fragment),Ar=p(),_t=a("h2"),Ct=a("a"),tr=a("span"),b(mo.$$.fragment),ga=p(),or=a("span"),_a=r("TFViTMAEForPreTraining"),kr=p(),le=a("div"),b(uo.$$.fragment),va=p(),fo=a("p"),Ta=r(`The ViTMAE Model transformer with the decoder on top for self-supervised pre-training. This model inherits from `),jo=a("a"),wa=r("TFPreTrainedModel"),Ea=r(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ya=p(),go=a("p"),ba=r("This model is also a "),_o=a("a"),$a=r("tf.keras.Model"),Ma=r(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Aa=p(),b(qt.$$.fragment),ka=p(),xe=a("div"),b(vo.$$.fragment),xa=p(),vt=a("p"),Fa=r("The "),Co=a("a"),Va=r("TFViTMAEForPreTraining"),Pa=r(" forward method, overrides the "),rr=a("code"),ja=r("__call__"),Ca=r(" special method."),qa=p(),b(It.$$.fragment),Ia=p(),b(zt.$$.fragment),this.h()},l(o){const _=Os('[data-svelte="svelte-1phssyn"]',document.head);d=s(_,"META",{name:!0,content:!0}),_.forEach(t),w=m(o),u=s(o,"H1",{class:!0});var To=i(u);f=s(To,"A",{id:!0,class:!0,href:!0});var nr=i(f);T=s(nr,"SPAN",{});var ar=i(T);$(l.$$.fragment,ar),ar.forEach(t),nr.forEach(t),c=m(To),F=s(To,"SPAN",{});var sr=i(F);Fe=n(sr,"ViTMAE"),sr.forEach(t),To.forEach(t),Te=m(o),I=s(o,"H2",{class:!0});var wo=i(I);Y=s(wo,"A",{id:!0,class:!0,href:!0});var ir=i(Y);Z=s(ir,"SPAN",{});var lr=i(Z);$(E.$$.fragment,lr),lr.forEach(t),ir.forEach(t),Ve=m(wo),W=s(wo,"SPAN",{});var dr=i(W);Pe=n(dr,"Overview"),dr.forEach(t),wo.forEach(t),we=m(o),L=s(o,"P",{});var Eo=i(L);je=n(Eo,"The ViTMAE model was proposed in "),ee=s(Eo,"A",{href:!0,rel:!0});var cr=i(ee);te=n(cr,"Masked Autoencoders Are Scalable Vision Learners"),cr.forEach(t),Ce=n(Eo,` by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Doll\xE1r, Ross Girshick. The paper shows that, by pre-training a Vision Transformer (ViT) to reconstruct pixel values for masked patches, one can get results after fine-tuning that outperform supervised pre-training.`),Eo.forEach(t),Ee=m(o),K=s(o,"P",{});var hr=i(K);qe=n(hr,"The abstract from the paper is the following:"),hr.forEach(t),ye=m(o),H=s(o,"P",{});var pr=i(H);fe=s(pr,"EM",{});var Da=i(fe);Ie=n(Da,`This paper shows that masked autoencoders (MAE) are scalable self-supervised learners for computer vision. Our MAE approach is simple: we mask random patches of the input image and reconstruct the missing pixels. It is based on two core designs. First, we develop an asymmetric encoder-decoder architecture, with an encoder that operates only on the visible subset of patches (without mask tokens), along with a lightweight decoder that reconstructs the original image from the latent representation and mask tokens. Second, we find that masking a high proportion of the input image, e.g., 75%, yields a nontrivial and meaningful self-supervisory task. Coupling these two designs enables us to train large models efficiently and effectively: we accelerate training (by 3x or more) and improve accuracy. Our scalable approach allows for learning high-capacity models that generalize well: e.g., a vanilla ViT-Huge model achieves the best accuracy (87.8%) among methods that use only ImageNet-1K data. Transfer performance in downstream tasks outperforms supervised pre-training and shows promising scaling behavior.`),Da.forEach(t),pr.forEach(t),de=m(o),j=s(o,"P",{});var Na=i(j);z=n(Na,"Tips:"),Na.forEach(t),be=m(o),V=s(o,"UL",{});var ue=i(V);oe=s(ue,"LI",{});var Fr=i(oe);ze=n(Fr,`MAE (masked auto encoding) is a method for self-supervised pre-training of Vision Transformers (ViTs). The pre-training objective is relatively simple: by masking a large portion (75%) of the image patches, the model must reconstruct raw pixel values. One can use `),U=s(Fr,"A",{href:!0});var La=i(U);Oe=n(La,"ViTMAEForPreTraining"),La.forEach(t),De=n(Fr," for this purpose."),Fr.forEach(t),O=m(ue),R=s(ue,"LI",{});var qo=i(R);re=n(qo,"An example Python script that illustrates how to pre-train "),ce=s(qo,"A",{href:!0});var Sa=i(ce);Ne=n(Sa,"ViTMAEForPreTraining"),Sa.forEach(t),ne=n(qo," from scratch can be found "),ae=s(qo,"A",{href:!0,rel:!0});var Ka=i(ae);Le=n(Ka,"here"),Ka.forEach(t),B=n(qo,`. One can easily tweak it for their own use case.`),qo.forEach(t),Se=m(ue),D=s(ue,"LI",{});var Io=i(D);Ke=n(Io,"A notebook that illustrates how to visualize reconstructed pixel values with "),J=s(Io,"A",{href:!0});var Ha=i(J);N=n(Ha,"ViTMAEForPreTraining"),Ha.forEach(t),He=n(Io," can be found "),q=s(Io,"A",{href:!0,rel:!0});var Ua=i(q);Ue=n(Ua,"here"),Ua.forEach(t),We=n(Io,"."),Io.forEach(t),v=m(ue),y=s(ue,"LI",{});var Vr=i(y);Qe=n(Vr,`After pre-training, one \u201Cthrows away\u201D the decoder used to reconstruct pixels, and one uses the encoder for fine-tuning/linear probing. This means that after fine-tuning, one can directly plug in the weights into a `),Q=s(Vr,"A",{href:!0});var Wa=i(Q);Ze=n(Wa,"ViTForImageClassification"),Wa.forEach(t),et=n(Vr,"."),Vr.forEach(t),P=m(ue),ge=s(ue,"LI",{});var Pr=i(ge);tt=n(Pr,"One can use "),$e=s(Pr,"A",{href:!0});var Ra=i($e);ot=n(Ra,"ViTFeatureExtractor"),Ra.forEach(t),rt=n(Pr," to prepare images for the model. See the code examples for more info."),Pr.forEach(t),G=m(ue),X=s(ue,"LI",{});var Ba=i(X);nt=n(Ba,`Note that the encoder of MAE is only used to encode the visual patches. The encoded patches are then concatenated with mask tokens, which the decoder (which also consists of Transformer blocks) takes as input. Each mask token is a shared, learned vector that indicates the presence of a missing patch to be predicted. Fixed sin/cos position embeddings are added both to the input of the encoder and the decoder.`),Ba.forEach(t),at=m(ue),S=s(ue,"LI",{});var jr=i(S);st=n(jr,"For a visual understanding of how MAEs work you can check out this "),_e=s(jr,"A",{href:!0,rel:!0});var Ga=i(_e);Re=n(Ga,"post"),Ga.forEach(t),it=n(jr,"."),jr.forEach(t),ue.forEach(t),he=m(o),pe=s(o,"IMG",{src:!0,alt:!0,width:!0}),gr=m(o),Kt=s(o,"SMALL",{});var za=i(Kt);Ur=n(za,"MAE architecture. Taken from the "),$o=s(za,"A",{href:!0});var Xa=i($o);Wr=n(Xa,"original paper."),Xa.forEach(t),za.forEach(t),_r=m(o),me=s(o,"P",{});var Ge=i(me);Rr=n(Ge,"This model was contributed by "),Ht=s(Ge,"A",{href:!0,rel:!0});var Ya=i(Ht);Br=n(Ya,"nielsr"),Ya.forEach(t),Gr=n(Ge,". TensorFlow version of the model was contributed by "),Ut=s(Ge,"A",{href:!0,rel:!0});var Ja=i(Ut);Xr=n(Ja,"sayakpaul"),Ja.forEach(t),Yr=n(Ge,` and `),Wt=s(Ge,"A",{href:!0,rel:!0});var Qa=i(Wt);Jr=n(Qa,"ariG23498"),Qa.forEach(t),Qr=n(Ge," (equal contribution). The original code can be found "),Rt=s(Ge,"A",{href:!0,rel:!0});var Za=i(Rt);Zr=n(Za,"here"),Za.forEach(t),en=n(Ge,"."),Ge.forEach(t),vr=m(o),lt=s(o,"H2",{class:!0});var Cr=i(lt);wt=s(Cr,"A",{id:!0,class:!0,href:!0});var es=i(wt);Ho=s(es,"SPAN",{});var ts=i(Ho);$(Bt.$$.fragment,ts),ts.forEach(t),es.forEach(t),tn=m(Cr),Uo=s(Cr,"SPAN",{});var os=i(Uo);on=n(os,"ViTMAEConfig"),os.forEach(t),Cr.forEach(t),Tr=m(o),ve=s(o,"DIV",{class:!0});var Ot=i(ve);$(Gt.$$.fragment,Ot),rn=m(Ot),dt=s(Ot,"P",{});var zo=i(dt);nn=n(zo,"This is the configuration class to store the configuration of a "),Mo=s(zo,"A",{href:!0});var rs=i(Mo);an=n(rs,"ViTMAEModel"),rs.forEach(t),sn=n(zo,`. It is used to instantiate an ViT MAE model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ViT `),Xt=s(zo,"A",{href:!0,rel:!0});var ns=i(Xt);ln=n(ns,"facebook/vit-mae-base"),ns.forEach(t),dn=n(zo," architecture."),zo.forEach(t),cn=m(Ot),ct=s(Ot,"P",{});var Oo=i(ct);hn=n(Oo,"Configuration objects inherit from "),Ao=s(Oo,"A",{href:!0});var as=i(Ao);pn=n(as,"PretrainedConfig"),as.forEach(t),mn=n(Oo,` and can be used to control the model outputs. Read the documentation from `),ko=s(Oo,"A",{href:!0});var ss=i(ko);un=n(ss,"PretrainedConfig"),ss.forEach(t),fn=n(Oo," for more information."),Oo.forEach(t),gn=m(Ot),$(Et.$$.fragment,Ot),Ot.forEach(t),wr=m(o),ht=s(o,"H2",{class:!0});var qr=i(ht);yt=s(qr,"A",{id:!0,class:!0,href:!0});var is=i(yt);Wo=s(is,"SPAN",{});var ls=i(Wo);$(Yt.$$.fragment,ls),ls.forEach(t),is.forEach(t),_n=m(qr),Ro=s(qr,"SPAN",{});var ds=i(Ro);vn=n(ds,"ViTMAEModel"),ds.forEach(t),qr.forEach(t),Er=m(o),Be=s(o,"DIV",{class:!0});var Do=i(Be);$(Jt.$$.fragment,Do),Tn=m(Do),Qt=s(Do,"P",{});var Ir=i(Qt);wn=n(Ir,`The bare ViTMAE Model transformer outputting raw hidden-states without any specific head on top. This model is a PyTorch `),Zt=s(Ir,"A",{href:!0,rel:!0});var cs=i(Zt);En=n(cs,"torch.nn.Module"),cs.forEach(t),yn=n(Ir,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ir.forEach(t),bn=m(Do),Me=s(Do,"DIV",{class:!0});var Dt=i(Me);$(eo.$$.fragment,Dt),$n=m(Dt),pt=s(Dt,"P",{});var No=i(pt);Mn=n(No,"The "),xo=s(No,"A",{href:!0});var hs=i(xo);An=n(hs,"ViTMAEModel"),hs.forEach(t),kn=n(No," forward method, overrides the "),Bo=s(No,"CODE",{});var ps=i(Bo);xn=n(ps,"__call__"),ps.forEach(t),Fn=n(No," special method."),No.forEach(t),Vn=m(Dt),$(bt.$$.fragment,Dt),Pn=m(Dt),$($t.$$.fragment,Dt),Dt.forEach(t),Do.forEach(t),yr=m(o),mt=s(o,"H2",{class:!0});var zr=i(mt);Mt=s(zr,"A",{id:!0,class:!0,href:!0});var ms=i(Mt);Go=s(ms,"SPAN",{});var us=i(Go);$(to.$$.fragment,us),us.forEach(t),ms.forEach(t),jn=m(zr),Xo=s(zr,"SPAN",{});var fs=i(Xo);Cn=n(fs,"ViTMAEForPreTraining"),fs.forEach(t),zr.forEach(t),br=m(o),se=s(o,"DIV",{class:!0});var Xe=i(se);$(oo.$$.fragment,Xe),qn=m(Xe),Yo=s(Xe,"P",{});var gs=i(Yo);In=n(gs,"The ViTMAE Model transformer with the decoder on top for self-supervised pre-training."),gs.forEach(t),zn=m(Xe),$(At.$$.fragment,Xe),On=m(Xe),ro=s(Xe,"P",{});var Or=i(ro);Dn=n(Or,"This model is a PyTorch "),no=s(Or,"A",{href:!0,rel:!0});var _s=i(no);Nn=n(_s,"torch.nn.Module"),_s.forEach(t),Ln=n(Or,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Or.forEach(t),Sn=m(Xe),Ae=s(Xe,"DIV",{class:!0});var Nt=i(Ae);$(ao.$$.fragment,Nt),Kn=m(Nt),ut=s(Nt,"P",{});var Lo=i(ut);Hn=n(Lo,"The "),Fo=s(Lo,"A",{href:!0});var vs=i(Fo);Un=n(vs,"ViTMAEForPreTraining"),vs.forEach(t),Wn=n(Lo," forward method, overrides the "),Jo=s(Lo,"CODE",{});var Ts=i(Jo);Rn=n(Ts,"__call__"),Ts.forEach(t),Bn=n(Lo," special method."),Lo.forEach(t),Gn=m(Nt),$(kt.$$.fragment,Nt),Xn=m(Nt),$(xt.$$.fragment,Nt),Nt.forEach(t),Xe.forEach(t),$r=m(o),ft=s(o,"H2",{class:!0});var Dr=i(ft);Ft=s(Dr,"A",{id:!0,class:!0,href:!0});var ws=i(Ft);Qo=s(ws,"SPAN",{});var Es=i(Qo);$(so.$$.fragment,Es),Es.forEach(t),ws.forEach(t),Yn=m(Dr),Zo=s(Dr,"SPAN",{});var ys=i(Zo);Jn=n(ys,"TFViTMAEModel"),ys.forEach(t),Dr.forEach(t),Mr=m(o),ie=s(o,"DIV",{class:!0});var Ye=i(ie);$(io.$$.fragment,Ye),Qn=m(Ye),lo=s(Ye,"P",{});var Nr=i(lo);Zn=n(Nr,`The bare ViTMAE Model transformer outputting raw hidden-states without any specific head on top. This model inherits from `),Vo=s(Nr,"A",{href:!0});var bs=i(Vo);ea=n(bs,"TFPreTrainedModel"),bs.forEach(t),ta=n(Nr,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Nr.forEach(t),oa=m(Ye),co=s(Ye,"P",{});var Lr=i(co);ra=n(Lr,"This model is also a "),ho=s(Lr,"A",{href:!0,rel:!0});var $s=i(ho);na=n($s,"tf.keras.Model"),$s.forEach(t),aa=n(Lr,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Lr.forEach(t),sa=m(Ye),$(Vt.$$.fragment,Ye),ia=m(Ye),ke=s(Ye,"DIV",{class:!0});var Lt=i(ke);$(po.$$.fragment,Lt),la=m(Lt),gt=s(Lt,"P",{});var So=i(gt);da=n(So,"The "),Po=s(So,"A",{href:!0});var Ms=i(Po);ca=n(Ms,"TFViTMAEModel"),Ms.forEach(t),ha=n(So," forward method, overrides the "),er=s(So,"CODE",{});var As=i(er);pa=n(As,"__call__"),As.forEach(t),ma=n(So," special method."),So.forEach(t),ua=m(Lt),$(Pt.$$.fragment,Lt),fa=m(Lt),$(jt.$$.fragment,Lt),Lt.forEach(t),Ye.forEach(t),Ar=m(o),_t=s(o,"H2",{class:!0});var Sr=i(_t);Ct=s(Sr,"A",{id:!0,class:!0,href:!0});var ks=i(Ct);tr=s(ks,"SPAN",{});var xs=i(tr);$(mo.$$.fragment,xs),xs.forEach(t),ks.forEach(t),ga=m(Sr),or=s(Sr,"SPAN",{});var Fs=i(or);_a=n(Fs,"TFViTMAEForPreTraining"),Fs.forEach(t),Sr.forEach(t),kr=m(o),le=s(o,"DIV",{class:!0});var Je=i(le);$(uo.$$.fragment,Je),va=m(Je),fo=s(Je,"P",{});var Kr=i(fo);Ta=n(Kr,`The ViTMAE Model transformer with the decoder on top for self-supervised pre-training. This model inherits from `),jo=s(Kr,"A",{href:!0});var Vs=i(jo);wa=n(Vs,"TFPreTrainedModel"),Vs.forEach(t),Ea=n(Kr,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Kr.forEach(t),ya=m(Je),go=s(Je,"P",{});var Hr=i(go);ba=n(Hr,"This model is also a "),_o=s(Hr,"A",{href:!0,rel:!0});var Ps=i(_o);$a=n(Ps,"tf.keras.Model"),Ps.forEach(t),Ma=n(Hr,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Hr.forEach(t),Aa=m(Je),$(qt.$$.fragment,Je),ka=m(Je),xe=s(Je,"DIV",{class:!0});var St=i(xe);$(vo.$$.fragment,St),xa=m(St),vt=s(St,"P",{});var Ko=i(vt);Fa=n(Ko,"The "),Co=s(Ko,"A",{href:!0});var js=i(Co);Va=n(js,"TFViTMAEForPreTraining"),js.forEach(t),Pa=n(Ko," forward method, overrides the "),rr=s(Ko,"CODE",{});var Cs=i(rr);ja=n(Cs,"__call__"),Cs.forEach(t),Ca=n(Ko," special method."),Ko.forEach(t),qa=m(St),$(It.$$.fragment,St),Ia=m(St),$(zt.$$.fragment,St),St.forEach(t),Je.forEach(t),this.h()},h(){h(d,"name","hf:doc:metadata"),h(d,"content",JSON.stringify(Zs)),h(f,"id","vitmae"),h(f,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(f,"href","#vitmae"),h(u,"class","relative group"),h(Y,"id","overview"),h(Y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Y,"href","#overview"),h(I,"class","relative group"),h(ee,"href","https://arxiv.org/abs/2111.06377v2"),h(ee,"rel","nofollow"),h(U,"href","/docs/transformers/pr_19429/en/model_doc/vit_mae#transformers.ViTMAEForPreTraining"),h(ce,"href","/docs/transformers/pr_19429/en/model_doc/vit_mae#transformers.ViTMAEForPreTraining"),h(ae,"href","https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining"),h(ae,"rel","nofollow"),h(J,"href","/docs/transformers/pr_19429/en/model_doc/vit_mae#transformers.ViTMAEForPreTraining"),h(q,"href","https://github.com/NielsRogge/Transformers-Tutorials/blob/master/ViTMAE/ViT_MAE_visualization_demo.ipynb"),h(q,"rel","nofollow"),h(Q,"href","/docs/transformers/pr_19429/en/model_doc/vit#transformers.ViTForImageClassification"),h($e,"href","/docs/transformers/pr_19429/en/model_doc/vit#transformers.ViTFeatureExtractor"),h(_e,"href","https://keras.io/examples/vision/masked_image_modeling/"),h(_e,"rel","nofollow"),Ds(pe.src,Oa="https://user-images.githubusercontent.com/11435359/146857310-f258c86c-fde6-48e8-9cee-badd2b21bd2c.png")||h(pe,"src",Oa),h(pe,"alt","drawing"),h(pe,"width","600"),h($o,"href","https://arxiv.org/abs/2111.06377"),h(Ht,"href","https://huggingface.co/nielsr"),h(Ht,"rel","nofollow"),h(Ut,"href","https://github.com/sayakpaul"),h(Ut,"rel","nofollow"),h(Wt,"href","https://github.com/ariG23498"),h(Wt,"rel","nofollow"),h(Rt,"href","https://github.com/facebookresearch/mae"),h(Rt,"rel","nofollow"),h(wt,"id","transformers.ViTMAEConfig"),h(wt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(wt,"href","#transformers.ViTMAEConfig"),h(lt,"class","relative group"),h(Mo,"href","/docs/transformers/pr_19429/en/model_doc/vit_mae#transformers.ViTMAEModel"),h(Xt,"href","https://huggingface.co/facebook/vit-mae-base"),h(Xt,"rel","nofollow"),h(Ao,"href","/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig"),h(ko,"href","/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig"),h(ve,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),h(yt,"id","transformers.ViTMAEModel"),h(yt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(yt,"href","#transformers.ViTMAEModel"),h(ht,"class","relative group"),h(Zt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),h(Zt,"rel","nofollow"),h(xo,"href","/docs/transformers/pr_19429/en/model_doc/vit_mae#transformers.ViTMAEModel"),h(Me,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),h(Be,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),h(Mt,"id","transformers.ViTMAEForPreTraining"),h(Mt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Mt,"href","#transformers.ViTMAEForPreTraining"),h(mt,"class","relative group"),h(no,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),h(no,"rel","nofollow"),h(Fo,"href","/docs/transformers/pr_19429/en/model_doc/vit_mae#transformers.ViTMAEForPreTraining"),h(Ae,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),h(se,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),h(Ft,"id","transformers.TFViTMAEModel"),h(Ft,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Ft,"href","#transformers.TFViTMAEModel"),h(ft,"class","relative group"),h(Vo,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel"),h(ho,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),h(ho,"rel","nofollow"),h(Po,"href","/docs/transformers/pr_19429/en/model_doc/vit_mae#transformers.TFViTMAEModel"),h(ke,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),h(ie,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),h(Ct,"id","transformers.TFViTMAEForPreTraining"),h(Ct,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Ct,"href","#transformers.TFViTMAEForPreTraining"),h(_t,"class","relative group"),h(jo,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel"),h(_o,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),h(_o,"rel","nofollow"),h(Co,"href","/docs/transformers/pr_19429/en/model_doc/vit_mae#transformers.TFViTMAEForPreTraining"),h(xe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),h(le,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(o,_){e(document.head,d),g(o,w,_),g(o,u,_),e(u,f),e(f,T),M(l,T,null),e(u,c),e(u,F),e(F,Fe),g(o,Te,_),g(o,I,_),e(I,Y),e(Y,Z),M(E,Z,null),e(I,Ve),e(I,W),e(W,Pe),g(o,we,_),g(o,L,_),e(L,je),e(L,ee),e(ee,te),e(L,Ce),g(o,Ee,_),g(o,K,_),e(K,qe),g(o,ye,_),g(o,H,_),e(H,fe),e(fe,Ie),g(o,de,_),g(o,j,_),e(j,z),g(o,be,_),g(o,V,_),e(V,oe),e(oe,ze),e(oe,U),e(U,Oe),e(oe,De),e(V,O),e(V,R),e(R,re),e(R,ce),e(ce,Ne),e(R,ne),e(R,ae),e(ae,Le),e(R,B),e(V,Se),e(V,D),e(D,Ke),e(D,J),e(J,N),e(D,He),e(D,q),e(q,Ue),e(D,We),e(V,v),e(V,y),e(y,Qe),e(y,Q),e(Q,Ze),e(y,et),e(V,P),e(V,ge),e(ge,tt),e(ge,$e),e($e,ot),e(ge,rt),e(V,G),e(V,X),e(X,nt),e(V,at),e(V,S),e(S,st),e(S,_e),e(_e,Re),e(S,it),g(o,he,_),g(o,pe,_),g(o,gr,_),g(o,Kt,_),e(Kt,Ur),e(Kt,$o),e($o,Wr),g(o,_r,_),g(o,me,_),e(me,Rr),e(me,Ht),e(Ht,Br),e(me,Gr),e(me,Ut),e(Ut,Xr),e(me,Yr),e(me,Wt),e(Wt,Jr),e(me,Qr),e(me,Rt),e(Rt,Zr),e(me,en),g(o,vr,_),g(o,lt,_),e(lt,wt),e(wt,Ho),M(Bt,Ho,null),e(lt,tn),e(lt,Uo),e(Uo,on),g(o,Tr,_),g(o,ve,_),M(Gt,ve,null),e(ve,rn),e(ve,dt),e(dt,nn),e(dt,Mo),e(Mo,an),e(dt,sn),e(dt,Xt),e(Xt,ln),e(dt,dn),e(ve,cn),e(ve,ct),e(ct,hn),e(ct,Ao),e(Ao,pn),e(ct,mn),e(ct,ko),e(ko,un),e(ct,fn),e(ve,gn),M(Et,ve,null),g(o,wr,_),g(o,ht,_),e(ht,yt),e(yt,Wo),M(Yt,Wo,null),e(ht,_n),e(ht,Ro),e(Ro,vn),g(o,Er,_),g(o,Be,_),M(Jt,Be,null),e(Be,Tn),e(Be,Qt),e(Qt,wn),e(Qt,Zt),e(Zt,En),e(Qt,yn),e(Be,bn),e(Be,Me),M(eo,Me,null),e(Me,$n),e(Me,pt),e(pt,Mn),e(pt,xo),e(xo,An),e(pt,kn),e(pt,Bo),e(Bo,xn),e(pt,Fn),e(Me,Vn),M(bt,Me,null),e(Me,Pn),M($t,Me,null),g(o,yr,_),g(o,mt,_),e(mt,Mt),e(Mt,Go),M(to,Go,null),e(mt,jn),e(mt,Xo),e(Xo,Cn),g(o,br,_),g(o,se,_),M(oo,se,null),e(se,qn),e(se,Yo),e(Yo,In),e(se,zn),M(At,se,null),e(se,On),e(se,ro),e(ro,Dn),e(ro,no),e(no,Nn),e(ro,Ln),e(se,Sn),e(se,Ae),M(ao,Ae,null),e(Ae,Kn),e(Ae,ut),e(ut,Hn),e(ut,Fo),e(Fo,Un),e(ut,Wn),e(ut,Jo),e(Jo,Rn),e(ut,Bn),e(Ae,Gn),M(kt,Ae,null),e(Ae,Xn),M(xt,Ae,null),g(o,$r,_),g(o,ft,_),e(ft,Ft),e(Ft,Qo),M(so,Qo,null),e(ft,Yn),e(ft,Zo),e(Zo,Jn),g(o,Mr,_),g(o,ie,_),M(io,ie,null),e(ie,Qn),e(ie,lo),e(lo,Zn),e(lo,Vo),e(Vo,ea),e(lo,ta),e(ie,oa),e(ie,co),e(co,ra),e(co,ho),e(ho,na),e(co,aa),e(ie,sa),M(Vt,ie,null),e(ie,ia),e(ie,ke),M(po,ke,null),e(ke,la),e(ke,gt),e(gt,da),e(gt,Po),e(Po,ca),e(gt,ha),e(gt,er),e(er,pa),e(gt,ma),e(ke,ua),M(Pt,ke,null),e(ke,fa),M(jt,ke,null),g(o,Ar,_),g(o,_t,_),e(_t,Ct),e(Ct,tr),M(mo,tr,null),e(_t,ga),e(_t,or),e(or,_a),g(o,kr,_),g(o,le,_),M(uo,le,null),e(le,va),e(le,fo),e(fo,Ta),e(fo,jo),e(jo,wa),e(fo,Ea),e(le,ya),e(le,go),e(go,ba),e(go,_o),e(_o,$a),e(go,Ma),e(le,Aa),M(qt,le,null),e(le,ka),e(le,xe),M(vo,xe,null),e(xe,xa),e(xe,vt),e(vt,Fa),e(vt,Co),e(Co,Va),e(vt,Pa),e(vt,rr),e(rr,ja),e(vt,Ca),e(xe,qa),M(It,xe,null),e(xe,Ia),M(zt,xe,null),xr=!0},p(o,[_]){const To={};_&2&&(To.$$scope={dirty:_,ctx:o}),Et.$set(To);const nr={};_&2&&(nr.$$scope={dirty:_,ctx:o}),bt.$set(nr);const ar={};_&2&&(ar.$$scope={dirty:_,ctx:o}),$t.$set(ar);const sr={};_&2&&(sr.$$scope={dirty:_,ctx:o}),At.$set(sr);const wo={};_&2&&(wo.$$scope={dirty:_,ctx:o}),kt.$set(wo);const ir={};_&2&&(ir.$$scope={dirty:_,ctx:o}),xt.$set(ir);const lr={};_&2&&(lr.$$scope={dirty:_,ctx:o}),Vt.$set(lr);const dr={};_&2&&(dr.$$scope={dirty:_,ctx:o}),Pt.$set(dr);const Eo={};_&2&&(Eo.$$scope={dirty:_,ctx:o}),jt.$set(Eo);const cr={};_&2&&(cr.$$scope={dirty:_,ctx:o}),qt.$set(cr);const hr={};_&2&&(hr.$$scope={dirty:_,ctx:o}),It.$set(hr);const pr={};_&2&&(pr.$$scope={dirty:_,ctx:o}),zt.$set(pr)},i(o){xr||(A(l.$$.fragment,o),A(E.$$.fragment,o),A(Bt.$$.fragment,o),A(Gt.$$.fragment,o),A(Et.$$.fragment,o),A(Yt.$$.fragment,o),A(Jt.$$.fragment,o),A(eo.$$.fragment,o),A(bt.$$.fragment,o),A($t.$$.fragment,o),A(to.$$.fragment,o),A(oo.$$.fragment,o),A(At.$$.fragment,o),A(ao.$$.fragment,o),A(kt.$$.fragment,o),A(xt.$$.fragment,o),A(so.$$.fragment,o),A(io.$$.fragment,o),A(Vt.$$.fragment,o),A(po.$$.fragment,o),A(Pt.$$.fragment,o),A(jt.$$.fragment,o),A(mo.$$.fragment,o),A(uo.$$.fragment,o),A(qt.$$.fragment,o),A(vo.$$.fragment,o),A(It.$$.fragment,o),A(zt.$$.fragment,o),xr=!0)},o(o){k(l.$$.fragment,o),k(E.$$.fragment,o),k(Bt.$$.fragment,o),k(Gt.$$.fragment,o),k(Et.$$.fragment,o),k(Yt.$$.fragment,o),k(Jt.$$.fragment,o),k(eo.$$.fragment,o),k(bt.$$.fragment,o),k($t.$$.fragment,o),k(to.$$.fragment,o),k(oo.$$.fragment,o),k(At.$$.fragment,o),k(ao.$$.fragment,o),k(kt.$$.fragment,o),k(xt.$$.fragment,o),k(so.$$.fragment,o),k(io.$$.fragment,o),k(Vt.$$.fragment,o),k(po.$$.fragment,o),k(Pt.$$.fragment,o),k(jt.$$.fragment,o),k(mo.$$.fragment,o),k(uo.$$.fragment,o),k(qt.$$.fragment,o),k(vo.$$.fragment,o),k(It.$$.fragment,o),k(zt.$$.fragment,o),xr=!1},d(o){t(d),o&&t(w),o&&t(u),x(l),o&&t(Te),o&&t(I),x(E),o&&t(we),o&&t(L),o&&t(Ee),o&&t(K),o&&t(ye),o&&t(H),o&&t(de),o&&t(j),o&&t(be),o&&t(V),o&&t(he),o&&t(pe),o&&t(gr),o&&t(Kt),o&&t(_r),o&&t(me),o&&t(vr),o&&t(lt),x(Bt),o&&t(Tr),o&&t(ve),x(Gt),x(Et),o&&t(wr),o&&t(ht),x(Yt),o&&t(Er),o&&t(Be),x(Jt),x(eo),x(bt),x($t),o&&t(yr),o&&t(mt),x(to),o&&t(br),o&&t(se),x(oo),x(At),x(ao),x(kt),x(xt),o&&t($r),o&&t(ft),x(so),o&&t(Mr),o&&t(ie),x(io),x(Vt),x(po),x(Pt),x(jt),o&&t(Ar),o&&t(_t),x(mo),o&&t(kr),o&&t(le),x(uo),x(qt),x(vo),x(It),x(zt)}}}const Zs={local:"vitmae",sections:[{local:"overview",title:"Overview"},{local:"transformers.ViTMAEConfig",title:"ViTMAEConfig"},{local:"transformers.ViTMAEModel",title:"ViTMAEModel"},{local:"transformers.ViTMAEForPreTraining",title:"ViTMAEForPreTraining"},{local:"transformers.TFViTMAEModel",title:"TFViTMAEModel"},{local:"transformers.TFViTMAEForPreTraining",title:"TFViTMAEForPreTraining"}],title:"ViTMAE"};function ei(C){return Ns(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class ii extends qs{constructor(d){super();Is(this,d,ei,Qs,zs,{})}}export{ii as default,Zs as metadata};
6
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/model_doc/ibert.mdx-hf-doc-builder.js
import{S as al,i as il,s as ll,e as r,k as u,w as v,t as l,M as dl,c as a,d as o,m as f,a as i,x as w,h as d,b as m,G as e,g as k,y as $,q as T,o as y,B as I,v as cl,L as Q}from"../../chunks/vendor-hf-doc-builder.js";import{T as vo}from"../../chunks/Tip-hf-doc-builder.js";import{D as O}from"../../chunks/Docstring-hf-doc-builder.js";import{C as H}from"../../chunks/CodeBlock-hf-doc-builder.js";import{I as ve}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{E as W}from"../../chunks/ExampleCodeBlock-hf-doc-builder.js";function pl(B){let s,g,c,p,b;return{c(){s=r("p"),g=l("Although the recipe for forward pass needs to be defined within this function, one should call the "),c=r("code"),p=l("Module"),b=l(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(n){s=a(n,"P",{});var h=i(s);g=d(h,"Although the recipe for forward pass needs to be defined within this function, one should call the "),c=a(h,"CODE",{});var q=i(c);p=d(q,"Module"),q.forEach(o),b=d(h,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),h.forEach(o)},m(n,h){k(n,s,h),e(s,g),e(s,c),e(c,p),e(s,b)},d(n){n&&o(s)}}}function hl(B){let s,g,c,p,b;return p=new H({props:{code:`from transformers import RobertaTokenizer, IBertModel import torch tokenizer = RobertaTokenizer.from_pretrained("kssteven/ibert-roberta-base") model = IBertModel.from_pretrained("kssteven/ibert-roberta-base") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, IBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;kssteven/ibert-roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = IBertModel.from_pretrained(<span class="hljs-string">&quot;kssteven/ibert-roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>last_hidden_states = outputs.last_hidden_state`}}),{c(){s=r("p"),g=l("Example:"),c=u(),v(p.$$.fragment)},l(n){s=a(n,"P",{});var h=i(s);g=d(h,"Example:"),h.forEach(o),c=f(n),w(p.$$.fragment,n)},m(n,h){k(n,s,h),e(s,g),k(n,c,h),$(p,n,h),b=!0},p:Q,i(n){b||(T(p.$$.fragment,n),b=!0)},o(n){y(p.$$.fragment,n),b=!1},d(n){n&&o(s),n&&o(c),I(p,n)}}}function ml(B){let s,g,c,p,b;return{c(){s=r("p"),g=l("Although the recipe for forward pass needs to be defined within this function, one should call the "),c=r("code"),p=l("Module"),b=l(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(n){s=a(n,"P",{});var h=i(s);g=d(h,"Although the recipe for forward pass needs to be defined within this function, one should call the "),c=a(h,"CODE",{});var q=i(c);p=d(q,"Module"),q.forEach(o),b=d(h,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),h.forEach(o)},m(n,h){k(n,s,h),e(s,g),e(s,c),e(c,p),e(s,b)},d(n){n&&o(s)}}}function ul(B){let s,g,c,p,b;return p=new H({props:{code:`from transformers import RobertaTokenizer, IBertForMaskedLM import torch tokenizer = RobertaTokenizer.from_pretrained("kssteven/ibert-roberta-base") model = IBertForMaskedLM.from_pretrained("kssteven/ibert-roberta-base") inputs = tokenizer("The capital of France is <mask>.", return_tensors="pt") with torch.no_grad(): logits = model(**inputs).logits # retrieve index of <mask> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0] predicted_token_id = logits[0, mask_token_index].argmax(axis=-1) tokenizer.decode(predicted_token_id) `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, IBertForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;kssteven/ibert-roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = IBertForMaskedLM.from_pretrained(<span class="hljs-string">&quot;kssteven/ibert-roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;The capital of France is &lt;mask&gt;.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># retrieve index of &lt;mask&gt;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[<span class="hljs-number">0</span>].nonzero(as_tuple=<span class="hljs-literal">True</span>)[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_token_id = logits[<span class="hljs-number">0</span>, mask_token_index].argmax(axis=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.decode(predicted_token_id) `}}),{c(){s=r("p"),g=l("Example:"),c=u(),v(p.$$.fragment)},l(n){s=a(n,"P",{});var h=i(s);g=d(h,"Example:"),h.forEach(o),c=f(n),w(p.$$.fragment,n)},m(n,h){k(n,s,h),e(s,g),k(n,c,h),$(p,n,h),b=!0},p:Q,i(n){b||(T(p.$$.fragment,n),b=!0)},o(n){y(p.$$.fragment,n),b=!1},d(n){n&&o(s),n&&o(c),I(p,n)}}}function fl(B){let s,g;return s=new H({props:{code:`labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] # mask labels of non-<mask> tokens labels = torch.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100) outputs = model(**inputs, labels=labels) round(outputs.loss.item(), 2) `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>labels = tokenizer(<span class="hljs-string">&quot;The capital of France is Paris.&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># mask labels of non-&lt;mask&gt; tokens</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.where(inputs.input_ids == tokenizer.mask_token_id, labels, -<span class="hljs-number">100</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, labels=labels) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(outputs.loss.item(), <span class="hljs-number">2</span>) `}}),{c(){v(s.$$.fragment)},l(c){w(s.$$.fragment,c)},m(c,p){$(s,c,p),g=!0},p:Q,i(c){g||(T(s.$$.fragment,c),g=!0)},o(c){y(s.$$.fragment,c),g=!1},d(c){I(s,c)}}}function gl(B){let s,g,c,p,b;return{c(){s=r("p"),g=l("Although the recipe for forward pass needs to be defined within this function, one should call the "),c=r("code"),p=l("Module"),b=l(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(n){s=a(n,"P",{});var h=i(s);g=d(h,"Although the recipe for forward pass needs to be defined within this function, one should call the "),c=a(h,"CODE",{});var q=i(c);p=d(q,"Module"),q.forEach(o),b=d(h,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),h.forEach(o)},m(n,h){k(n,s,h),e(s,g),e(s,c),e(c,p),e(s,b)},d(n){n&&o(s)}}}function _l(B){let s,g,c,p,b;return p=new H({props:{code:`import torch from transformers import RobertaTokenizer, IBertForSequenceClassification tokenizer = RobertaTokenizer.from_pretrained("kssteven/ibert-roberta-base") model = IBertForSequenceClassification.from_pretrained("kssteven/ibert-roberta-base") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") with torch.no_grad(): logits = model(**inputs).logits predicted_class_id = logits.argmax().item() model.config.id2label[predicted_class_id] `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, IBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;kssteven/ibert-roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = IBertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;kssteven/ibert-roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_id = logits.argmax().item() <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.id2label[predicted_class_id] `}}),{c(){s=r("p"),g=l("Example of single-label classification:"),c=u(),v(p.$$.fragment)},l(n){s=a(n,"P",{});var h=i(s);g=d(h,"Example of single-label classification:"),h.forEach(o),c=f(n),w(p.$$.fragment,n)},m(n,h){k(n,s,h),e(s,g),k(n,c,h),$(p,n,h),b=!0},p:Q,i(n){b||(T(p.$$.fragment,n),b=!0)},o(n){y(p.$$.fragment,n),b=!1},d(n){n&&o(s),n&&o(c),I(p,n)}}}function bl(B){let s,g;return s=new H({props:{code:`# To train a model on \`num_labels\` classes, you can pass \`num_labels=num_labels\` to \`.from_pretrained(...)\` num_labels = len(model.config.id2label) model = IBertForSequenceClassification.from_pretrained("kssteven/ibert-roberta-base", num_labels=num_labels) labels = torch.tensor([1]) loss = model(**inputs, labels=labels).loss round(loss.item(), 2) `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># To train a model on \`num_labels\` classes, you can pass \`num_labels=num_labels\` to \`.from_pretrained(...)\`</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_labels = <span class="hljs-built_in">len</span>(model.config.id2label) <span class="hljs-meta">&gt;&gt;&gt; </span>model = IBertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;kssteven/ibert-roberta-base&quot;</span>, num_labels=num_labels) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor([<span class="hljs-number">1</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(**inputs, labels=labels).loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(loss.item(), <span class="hljs-number">2</span>) `}}),{c(){v(s.$$.fragment)},l(c){w(s.$$.fragment,c)},m(c,p){$(s,c,p),g=!0},p:Q,i(c){g||(T(s.$$.fragment,c),g=!0)},o(c){y(s.$$.fragment,c),g=!1},d(c){I(s,c)}}}function kl(B){let s,g,c,p,b;return p=new H({props:{code:`import torch from transformers import RobertaTokenizer, IBertForSequenceClassification tokenizer = RobertaTokenizer.from_pretrained("kssteven/ibert-roberta-base") model = IBertForSequenceClassification.from_pretrained("kssteven/ibert-roberta-base", problem_type="multi_label_classification") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") with torch.no_grad(): logits = model(**inputs).logits predicted_class_id = logits.argmax().item() model.config.id2label[predicted_class_id] `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, IBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;kssteven/ibert-roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = IBertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;kssteven/ibert-roberta-base&quot;</span>, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_id = logits.argmax().item() <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.id2label[predicted_class_id] `}}),{c(){s=r("p"),g=l("Example of multi-label classification:"),c=u(),v(p.$$.fragment)},l(n){s=a(n,"P",{});var h=i(s);g=d(h,"Example of multi-label classification:"),h.forEach(o),c=f(n),w(p.$$.fragment,n)},m(n,h){k(n,s,h),e(s,g),k(n,c,h),$(p,n,h),b=!0},p:Q,i(n){b||(T(p.$$.fragment,n),b=!0)},o(n){y(p.$$.fragment,n),b=!1},d(n){n&&o(s),n&&o(c),I(p,n)}}}function vl(B){let s,g;return s=new H({props:{code:`# To train a model on \`num_labels\` classes, you can pass \`num_labels=num_labels\` to \`.from_pretrained(...)\` num_labels = len(model.config.id2label) model = IBertForSequenceClassification.from_pretrained( "kssteven/ibert-roberta-base", num_labels=num_labels, problem_type="multi_label_classification" ) labels = torch.nn.functional.one_hot(torch.tensor([predicted_class_id]), num_classes=num_labels).to( torch.float ) loss = model(**inputs, labels=labels).loss loss.backward()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># To train a model on \`num_labels\` classes, you can pass \`num_labels=num_labels\` to \`.from_pretrained(...)\`</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_labels = <span class="hljs-built_in">len</span>(model.config.id2label) <span class="hljs-meta">&gt;&gt;&gt; </span>model = IBertForSequenceClassification.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;kssteven/ibert-roberta-base&quot;</span>, num_labels=num_labels, problem_type=<span class="hljs-string">&quot;multi_label_classification&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.nn.functional.one_hot(torch.tensor([predicted_class_id]), num_classes=num_labels).to( <span class="hljs-meta">... </span> torch.<span class="hljs-built_in">float</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(**inputs, labels=labels).loss <span class="hljs-meta">&gt;&gt;&gt; </span>loss.backward()`}}),{c(){v(s.$$.fragment)},l(c){w(s.$$.fragment,c)},m(c,p){$(s,c,p),g=!0},p:Q,i(c){g||(T(s.$$.fragment,c),g=!0)},o(c){y(s.$$.fragment,c),g=!1},d(c){I(s,c)}}}function wl(B){let s,g,c,p,b;return{c(){s=r("p"),g=l("Although the recipe for forward pass needs to be defined within this function, one should call the "),c=r("code"),p=l("Module"),b=l(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(n){s=a(n,"P",{});var h=i(s);g=d(h,"Although the recipe for forward pass needs to be defined within this function, one should call the "),c=a(h,"CODE",{});var q=i(c);p=d(q,"Module"),q.forEach(o),b=d(h,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),h.forEach(o)},m(n,h){k(n,s,h),e(s,g),e(s,c),e(c,p),e(s,b)},d(n){n&&o(s)}}}function $l(B){let s,g,c,p,b;return p=new H({props:{code:`from transformers import RobertaTokenizer, IBertForMultipleChoice import torch tokenizer = RobertaTokenizer.from_pretrained("kssteven/ibert-roberta-base") model = IBertForMultipleChoice.from_pretrained("kssteven/ibert-roberta-base") prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="pt", padding=True) outputs = model(**{k: v.unsqueeze(0) for k, v in encoding.items()}, labels=labels) # batch size is 1 # the linear classifier still needs to be trained loss = outputs.loss logits = outputs.logits`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, IBertForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;kssteven/ibert-roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = IBertForMultipleChoice.from_pretrained(<span class="hljs-string">&quot;kssteven/ibert-roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice0 = <span class="hljs-string">&quot;It is eaten with a fork and a knife.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>choice1 = <span class="hljs-string">&quot;It is eaten while held in the hand.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = torch.tensor(<span class="hljs-number">0</span>).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># choice0 is correct (according to Wikipedia ;)), batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, padding=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**{k: v.unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> encoding.items()}, labels=labels) <span class="hljs-comment"># batch size is 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># the linear classifier still needs to be trained</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits`}}),{c(){s=r("p"),g=l("Example:"),c=u(),v(p.$$.fragment)},l(n){s=a(n,"P",{});var h=i(s);g=d(h,"Example:"),h.forEach(o),c=f(n),w(p.$$.fragment,n)},m(n,h){k(n,s,h),e(s,g),k(n,c,h),$(p,n,h),b=!0},p:Q,i(n){b||(T(p.$$.fragment,n),b=!0)},o(n){y(p.$$.fragment,n),b=!1},d(n){n&&o(s),n&&o(c),I(p,n)}}}function Tl(B){let s,g,c,p,b;return{c(){s=r("p"),g=l("Although the recipe for forward pass needs to be defined within this function, one should call the "),c=r("code"),p=l("Module"),b=l(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(n){s=a(n,"P",{});var h=i(s);g=d(h,"Although the recipe for forward pass needs to be defined within this function, one should call the "),c=a(h,"CODE",{});var q=i(c);p=d(q,"Module"),q.forEach(o),b=d(h,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),h.forEach(o)},m(n,h){k(n,s,h),e(s,g),e(s,c),e(c,p),e(s,b)},d(n){n&&o(s)}}}function yl(B){let s,g,c,p,b;return p=new H({props:{code:`from transformers import RobertaTokenizer, IBertForTokenClassification import torch tokenizer = RobertaTokenizer.from_pretrained("kssteven/ibert-roberta-base") model = IBertForTokenClassification.from_pretrained("kssteven/ibert-roberta-base") inputs = tokenizer( "HuggingFace is a company based in Paris and New York", add_special_tokens=False, return_tensors="pt" ) with torch.no_grad(): logits = model(**inputs).logits predicted_token_class_ids = logits.argmax(-1) # Note that tokens are classified rather then input words which means that # there might be more predicted token classes than words. # Multiple token classes might account for the same word predicted_tokens_classes = [model.config.id2label[t.item()] for t in predicted_token_class_ids[0]] predicted_tokens_classes `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, IBertForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;kssteven/ibert-roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = IBertForTokenClassification.from_pretrained(<span class="hljs-string">&quot;kssteven/ibert-roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;HuggingFace is a company based in Paris and New York&quot;</span>, add_special_tokens=<span class="hljs-literal">False</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_token_class_ids = logits.argmax(-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Note that tokens are classified rather then input words which means that</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># there might be more predicted token classes than words.</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Multiple token classes might account for the same word</span> <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_tokens_classes = [model.config.id2label[t.item()] <span class="hljs-keyword">for</span> t <span class="hljs-keyword">in</span> predicted_token_class_ids[<span class="hljs-number">0</span>]] <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_tokens_classes `}}),{c(){s=r("p"),g=l("Example:"),c=u(),v(p.$$.fragment)},l(n){s=a(n,"P",{});var h=i(s);g=d(h,"Example:"),h.forEach(o),c=f(n),w(p.$$.fragment,n)},m(n,h){k(n,s,h),e(s,g),k(n,c,h),$(p,n,h),b=!0},p:Q,i(n){b||(T(p.$$.fragment,n),b=!0)},o(n){y(p.$$.fragment,n),b=!1},d(n){n&&o(s),n&&o(c),I(p,n)}}}function Il(B){let s,g;return s=new H({props:{code:`labels = predicted_token_class_ids loss = model(**inputs, labels=labels).loss round(loss.item(), 2) `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>labels = predicted_token_class_ids <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(**inputs, labels=labels).loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(loss.item(), <span class="hljs-number">2</span>) `}}),{c(){v(s.$$.fragment)},l(c){w(s.$$.fragment,c)},m(c,p){$(s,c,p),g=!0},p:Q,i(c){g||(T(s.$$.fragment,c),g=!0)},o(c){y(s.$$.fragment,c),g=!1},d(c){I(s,c)}}}function Bl(B){let s,g,c,p,b;return{c(){s=r("p"),g=l("Although the recipe for forward pass needs to be defined within this function, one should call the "),c=r("code"),p=l("Module"),b=l(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(n){s=a(n,"P",{});var h=i(s);g=d(h,"Although the recipe for forward pass needs to be defined within this function, one should call the "),c=a(h,"CODE",{});var q=i(c);p=d(q,"Module"),q.forEach(o),b=d(h,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),h.forEach(o)},m(n,h){k(n,s,h),e(s,g),e(s,c),e(c,p),e(s,b)},d(n){n&&o(s)}}}function ql(B){let s,g,c,p,b;return p=new H({props:{code:`from transformers import RobertaTokenizer, IBertForQuestionAnswering import torch tokenizer = RobertaTokenizer.from_pretrained("kssteven/ibert-roberta-base") model = IBertForQuestionAnswering.from_pretrained("kssteven/ibert-roberta-base") question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs) answer_start_index = outputs.start_logits.argmax() answer_end_index = outputs.end_logits.argmax() predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1] tokenizer.decode(predict_answer_tokens) `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> RobertaTokenizer, IBertForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = RobertaTokenizer.from_pretrained(<span class="hljs-string">&quot;kssteven/ibert-roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = IBertForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;kssteven/ibert-roberta-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>question, text = <span class="hljs-string">&quot;Who was Jim Henson?&quot;</span>, <span class="hljs-string">&quot;Jim Henson was a nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(question, text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> outputs = model(**inputs) <span class="hljs-meta">&gt;&gt;&gt; </span>answer_start_index = outputs.start_logits.argmax() <span class="hljs-meta">&gt;&gt;&gt; </span>answer_end_index = outputs.end_logits.argmax() <span class="hljs-meta">&gt;&gt;&gt; </span>predict_answer_tokens = inputs.input_ids[<span class="hljs-number">0</span>, answer_start_index : answer_end_index + <span class="hljs-number">1</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.decode(predict_answer_tokens) `}}),{c(){s=r("p"),g=l("Example:"),c=u(),v(p.$$.fragment)},l(n){s=a(n,"P",{});var h=i(s);g=d(h,"Example:"),h.forEach(o),c=f(n),w(p.$$.fragment,n)},m(n,h){k(n,s,h),e(s,g),k(n,c,h),$(p,n,h),b=!0},p:Q,i(n){b||(T(p.$$.fragment,n),b=!0)},o(n){y(p.$$.fragment,n),b=!1},d(n){n&&o(s),n&&o(c),I(p,n)}}}function Ml(B){let s,g;return s=new H({props:{code:`# target is "nice puppet" target_start_index = torch.tensor([14]) target_end_index = torch.tensor([15]) outputs = model(**inputs, start_positions=target_start_index, end_positions=target_end_index) loss = outputs.loss round(loss.item(), 2) `,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># target is &quot;nice puppet&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>target_start_index = torch.tensor([<span class="hljs-number">14</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>target_end_index = torch.tensor([<span class="hljs-number">15</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**inputs, start_positions=target_start_index, end_positions=target_end_index) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = outputs.loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(loss.item(), <span class="hljs-number">2</span>) `}}),{c(){v(s.$$.fragment)},l(c){w(s.$$.fragment,c)},m(c,p){$(s,c,p),g=!0},p:Q,i(c){g||(T(s.$$.fragment,c),g=!0)},o(c){y(s.$$.fragment,c),g=!1},d(c){I(s,c)}}}function zl(B){let s,g,c,p,b,n,h,q,Yn,pn,oe,we,wo,Ke,Zn,$o,Xn,hn,$e,es,Ye,ts,os,mn,Ut,ns,un,Gt,To,ss,fn,G,rs,Ze,as,is,Xe,ls,ds,gn,ne,Te,yo,et,cs,Io,ps,_n,U,tt,hs,se,ms,Vt,us,fs,ot,gs,_s,bs,re,ks,Jt,vs,ws,Kt,$s,Ts,bn,ae,ye,Bo,nt,ys,qo,Is,kn,z,st,Bs,Mo,qs,Ms,rt,zs,Yt,Fs,xs,Es,at,Cs,it,js,Ps,As,lt,Ls,dt,Ss,Os,Rs,R,ct,Ns,ie,Ds,Zt,Ws,Qs,zo,Hs,Us,Gs,Ie,Vs,Be,vn,le,qe,Fo,pt,Js,xo,Ks,wn,F,ht,Ys,mt,Zs,Eo,Xs,er,tr,ut,or,Xt,nr,sr,rr,ft,ar,gt,ir,lr,dr,P,_t,cr,de,pr,eo,hr,mr,Co,ur,fr,gr,Me,_r,ze,br,Fe,$n,ce,xe,jo,bt,kr,Po,vr,Tn,x,kt,wr,Ao,$r,Tr,vt,yr,to,Ir,Br,qr,wt,Mr,$t,zr,Fr,xr,M,Tt,Er,pe,Cr,oo,jr,Pr,Lo,Ar,Lr,Sr,Ee,Or,Ce,Rr,je,Nr,Pe,Dr,Ae,yn,he,Le,So,yt,Wr,Oo,Qr,In,E,It,Hr,Ro,Ur,Gr,Bt,Vr,no,Jr,Kr,Yr,qt,Zr,Mt,Xr,ea,ta,N,zt,oa,me,na,so,sa,ra,No,aa,ia,la,Se,da,Oe,Bn,ue,Re,Do,Ft,ca,Wo,pa,qn,C,xt,ha,Qo,ma,ua,Et,fa,ro,ga,_a,ba,Ct,ka,jt,va,wa,$a,A,Pt,Ta,fe,ya,ao,Ia,Ba,Ho,qa,Ma,za,Ne,Fa,De,xa,We,Mn,ge,Qe,Uo,At,Ea,Go,Ca,zn,j,Lt,ja,_e,Pa,Vo,Aa,La,Jo,Sa,Oa,Ra,St,Na,io,Da,Wa,Qa,Ot,Ha,Rt,Ua,Ga,Va,L,Nt,Ja,be,Ka,lo,Ya,Za,Ko,Xa,ei,ti,He,oi,Ue,ni,Ge,Fn;return n=new ve({}),Ke=new ve({}),et=new ve({}),tt=new O({props:{name:"class transformers.IBertConfig",anchor:"transformers.IBertConfig",parameters:[{name:"vocab_size",val:" = 30522"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout_prob",val:" = 0.1"},{name:"attention_probs_dropout_prob",val:" = 0.1"},{name:"max_position_embeddings",val:" = 512"},{name:"type_vocab_size",val:" = 2"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-12"},{name:"pad_token_id",val:" = 1"},{name:"bos_token_id",val:" = 0"},{name:"eos_token_id",val:" = 2"},{name:"position_embedding_type",val:" = 'absolute'"},{name:"quant_mode",val:" = False"},{name:"force_dequant",val:" = 'none'"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.IBertConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 30522) &#x2014; Vocabulary size of the I-BERT model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_19429/en/model_doc/ibert#transformers.IBertModel">IBertModel</a>`,name:"vocab_size"},{anchor:"transformers.IBertConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.IBertConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.IBertConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.IBertConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (often named feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.IBertConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>Callable</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;silu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.IBertConfig.hidden_dropout_prob",description:`<strong>hidden_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout_prob"},{anchor:"transformers.IBertConfig.attention_probs_dropout_prob",description:`<strong>attention_probs_dropout_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_probs_dropout_prob"},{anchor:"transformers.IBertConfig.max_position_embeddings",description:`<strong>max_position_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014; The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048).`,name:"max_position_embeddings"},{anchor:"transformers.IBertConfig.type_vocab_size",description:`<strong>type_vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 2) &#x2014; The vocabulary size of the <code>token_type_ids</code> passed when calling <a href="/docs/transformers/pr_19429/en/model_doc/ibert#transformers.IBertModel">IBertModel</a>`,name:"type_vocab_size"},{anchor:"transformers.IBertConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.IBertConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.IBertConfig.position_embedding_type",description:`<strong>position_embedding_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;absolute&quot;</code>) &#x2014; Type of position embedding. Choose one of <code>&quot;absolute&quot;</code>, <code>&quot;relative_key&quot;</code>, <code>&quot;relative_key_query&quot;</code>. For positional embeddings use <code>&quot;absolute&quot;</code>. For more information on <code>&quot;relative_key&quot;</code>, please refer to <a href="https://arxiv.org/abs/1803.02155" rel="nofollow">Self-Attention with Relative Position Representations (Shaw et al.)</a>. For more information on <code>&quot;relative_key_query&quot;</code>, please refer to <em>Method 4</em> in <a href="https://arxiv.org/abs/2009.13658" rel="nofollow">Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)</a>.`,name:"position_embedding_type"},{anchor:"transformers.IBertConfig.quant_mode",description:`<strong>quant_mode</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to quantize the model or not.`,name:"quant_mode"},{anchor:"transformers.IBertConfig.force_dequant",description:`<strong>force_dequant</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;none&quot;</code>) &#x2014; Force dequantize specific nonlinear layer. Dequatized layers are then executed with full precision. <code>&quot;none&quot;</code>, <code>&quot;gelu&quot;</code>, <code>&quot;softmax&quot;</code>, <code>&quot;layernorm&quot;</code> and <code>&quot;nonlinear&quot;</code> are supported. As deafult, it is set as <code>&quot;none&quot;</code>, which does not dequantize any layers. Please specify <code>&quot;gelu&quot;</code>, <code>&quot;softmax&quot;</code>, or <code>&quot;layernorm&quot;</code> to dequantize GELU, Softmax, or LayerNorm, respectively. <code>&quot;nonlinear&quot;</code> will dequantize all nonlinear layers, i.e., GELU, Softmax, and LayerNorm.`,name:"force_dequant"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/ibert/configuration_ibert.py#L38"}}),nt=new ve({}),st=new O({props:{name:"class transformers.IBertModel",anchor:"transformers.IBertModel",parameters:[{name:"config",val:""},{name:"add_pooling_layer",val:" = True"}],parametersDescription:[{anchor:"transformers.IBertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/model_doc/ibert#transformers.IBertConfig">IBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/ibert/modeling_ibert.py#L734"}}),ct=new O({props:{name:"forward",anchor:"transformers.IBertModel.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"position_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"head_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],parametersDescription:[{anchor:"transformers.IBertModel.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.IBertModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.IBertModel.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.IBertModel.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.IBertModel.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.IBertModel.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.IBertModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.IBertModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.IBertModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/ibert/modeling_ibert.py#L773",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/ibert#transformers.IBertConfig" >IBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) \u2014 Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> <li> <p><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder\u2019s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.</p> </li> <li> <p><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) \u2014 Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" >transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ie=new vo({props:{$$slots:{default:[pl]},$$scope:{ctx:B}}}),Be=new W({props:{anchor:"transformers.IBertModel.forward.example",$$slots:{default:[hl]},$$scope:{ctx:B}}}),pt=new ve({}),ht=new O({props:{name:"class transformers.IBertForMaskedLM",anchor:"transformers.IBertForMaskedLM",parameters:[{name:"config",val:""}],parametersDescription:[{anchor:"transformers.IBertForMaskedLM.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/model_doc/ibert#transformers.IBertConfig">IBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/ibert/modeling_ibert.py#L858"}}),_t=new O({props:{name:"forward",anchor:"transformers.IBertForMaskedLM.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"position_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"head_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"labels",val:": typing.Optional[torch.LongTensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],parametersDescription:[{anchor:"transformers.IBertForMaskedLM.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.IBertForMaskedLM.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.IBertForMaskedLM.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.IBertForMaskedLM.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.IBertForMaskedLM.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.IBertForMaskedLM.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.IBertForMaskedLM.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.IBertForMaskedLM.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.IBertForMaskedLM.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.IBertForMaskedLM.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_ids</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"},{anchor:"transformers.IBertForMaskedLM.forward.kwargs",description:`<strong>kwargs</strong> (<code>Dict[str, any]</code>, optional, defaults to <em>{}</em>) &#x2014; Used to hide legacy arguments that have been deprecated.`,name:"kwargs"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/ibert/modeling_ibert.py#L877",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/ibert#transformers.IBertConfig" >IBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Masked language modeling (MLM) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.MaskedLMOutput" >transformers.modeling_outputs.MaskedLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Me=new vo({props:{$$slots:{default:[ml]},$$scope:{ctx:B}}}),ze=new W({props:{anchor:"transformers.IBertForMaskedLM.forward.example",$$slots:{default:[ul]},$$scope:{ctx:B}}}),Fe=new W({props:{anchor:"transformers.IBertForMaskedLM.forward.example-2",$$slots:{default:[fl]},$$scope:{ctx:B}}}),bt=new ve({}),kt=new O({props:{name:"class transformers.IBertForSequenceClassification",anchor:"transformers.IBertForSequenceClassification",parameters:[{name:"config",val:""}],parametersDescription:[{anchor:"transformers.IBertForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/model_doc/ibert#transformers.IBertConfig">IBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/ibert/modeling_ibert.py#L973"}}),Tt=new O({props:{name:"forward",anchor:"transformers.IBertForSequenceClassification.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"position_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"head_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"labels",val:": typing.Optional[torch.LongTensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],parametersDescription:[{anchor:"transformers.IBertForSequenceClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.IBertForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.IBertForSequenceClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.IBertForSequenceClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.IBertForSequenceClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.IBertForSequenceClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.IBertForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.IBertForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.IBertForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.IBertForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/ibert/modeling_ibert.py#L986",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/ibert#transformers.IBertConfig" >IBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ee=new vo({props:{$$slots:{default:[gl]},$$scope:{ctx:B}}}),Ce=new W({props:{anchor:"transformers.IBertForSequenceClassification.forward.example",$$slots:{default:[_l]},$$scope:{ctx:B}}}),je=new W({props:{anchor:"transformers.IBertForSequenceClassification.forward.example-2",$$slots:{default:[bl]},$$scope:{ctx:B}}}),Pe=new W({props:{anchor:"transformers.IBertForSequenceClassification.forward.example-3",$$slots:{default:[kl]},$$scope:{ctx:B}}}),Ae=new W({props:{anchor:"transformers.IBertForSequenceClassification.forward.example-4",$$slots:{default:[vl]},$$scope:{ctx:B}}}),yt=new ve({}),It=new O({props:{name:"class transformers.IBertForMultipleChoice",anchor:"transformers.IBertForMultipleChoice",parameters:[{name:"config",val:""}],parametersDescription:[{anchor:"transformers.IBertForMultipleChoice.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/model_doc/ibert#transformers.IBertConfig">IBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/ibert/modeling_ibert.py#L1069"}}),zt=new O({props:{name:"forward",anchor:"transformers.IBertForMultipleChoice.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"labels",val:": typing.Optional[torch.LongTensor] = None"},{name:"position_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"head_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],parametersDescription:[{anchor:"transformers.IBertForMultipleChoice.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.IBertForMultipleChoice.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.IBertForMultipleChoice.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.IBertForMultipleChoice.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, num_choices, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.IBertForMultipleChoice.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.IBertForMultipleChoice.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.IBertForMultipleChoice.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.IBertForMultipleChoice.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.IBertForMultipleChoice.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.IBertForMultipleChoice.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the multiple choice classification loss. Indices should be in <code>[0, ..., num_choices-1]</code> where <code>num_choices</code> is the size of the second dimension of the input tensors. (See <code>input_ids</code> above)`,name:"labels"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/ibert/modeling_ibert.py#L1082",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/ibert#transformers.IBertConfig" >IBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) \u2014 <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.MultipleChoiceModelOutput" >transformers.modeling_outputs.MultipleChoiceModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Se=new vo({props:{$$slots:{default:[wl]},$$scope:{ctx:B}}}),Oe=new W({props:{anchor:"transformers.IBertForMultipleChoice.forward.example",$$slots:{default:[$l]},$$scope:{ctx:B}}}),Ft=new ve({}),xt=new O({props:{name:"class transformers.IBertForTokenClassification",anchor:"transformers.IBertForTokenClassification",parameters:[{name:"config",val:""}],parametersDescription:[{anchor:"transformers.IBertForTokenClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/model_doc/ibert#transformers.IBertConfig">IBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/ibert/modeling_ibert.py#L1162"}}),Pt=new O({props:{name:"forward",anchor:"transformers.IBertForTokenClassification.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"position_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"head_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"labels",val:": typing.Optional[torch.LongTensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],parametersDescription:[{anchor:"transformers.IBertForTokenClassification.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.IBertForTokenClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.IBertForTokenClassification.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.IBertForTokenClassification.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.IBertForTokenClassification.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.IBertForTokenClassification.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.IBertForTokenClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.IBertForTokenClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.IBertForTokenClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.IBertForTokenClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the token classification loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>.`,name:"labels"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/ibert/modeling_ibert.py#L1177",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/ibert#transformers.IBertConfig" >IBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) \u2014 Classification scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.TokenClassifierOutput" >transformers.modeling_outputs.TokenClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ne=new vo({props:{$$slots:{default:[Tl]},$$scope:{ctx:B}}}),De=new W({props:{anchor:"transformers.IBertForTokenClassification.forward.example",$$slots:{default:[yl]},$$scope:{ctx:B}}}),We=new W({props:{anchor:"transformers.IBertForTokenClassification.forward.example-2",$$slots:{default:[Il]},$$scope:{ctx:B}}}),At=new ve({}),Lt=new O({props:{name:"class transformers.IBertForQuestionAnswering",anchor:"transformers.IBertForQuestionAnswering",parameters:[{name:"config",val:""}],parametersDescription:[{anchor:"transformers.IBertForQuestionAnswering.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/model_doc/ibert#transformers.IBertConfig">IBertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/ibert/modeling_ibert.py#L1263"}}),Nt=new O({props:{name:"forward",anchor:"transformers.IBertForQuestionAnswering.forward",parameters:[{name:"input_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"attention_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"token_type_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"position_ids",val:": typing.Optional[torch.LongTensor] = None"},{name:"head_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"inputs_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"start_positions",val:": typing.Optional[torch.LongTensor] = None"},{name:"end_positions",val:": typing.Optional[torch.LongTensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],parametersDescription:[{anchor:"transformers.IBertForQuestionAnswering.forward.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/roberta#transformers.RobertaTokenizer">RobertaTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.IBertForQuestionAnswering.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.IBertForQuestionAnswering.forward.token_type_ids",description:`<strong>token_type_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.IBertForQuestionAnswering.forward.position_ids",description:`<strong>position_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.IBertForQuestionAnswering.forward.head_mask",description:`<strong>head_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.IBertForQuestionAnswering.forward.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_ids</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_ids</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.IBertForQuestionAnswering.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.IBertForQuestionAnswering.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.IBertForQuestionAnswering.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.IBertForQuestionAnswering.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"start_positions"},{anchor:"transformers.IBertForQuestionAnswering.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (<code>sequence_length</code>). Position outside of the sequence are not taken into account for computing the loss.`,name:"end_positions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/ibert/modeling_ibert.py#L1277",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/ibert#transformers.IBertConfig" >IBertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.</p> </li> <li> <p><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-start scores (before SoftMax).</p> </li> <li> <p><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) \u2014 Span-end scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.QuestionAnsweringModelOutput" >transformers.modeling_outputs.QuestionAnsweringModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),He=new vo({props:{$$slots:{default:[Bl]},$$scope:{ctx:B}}}),Ue=new W({props:{anchor:"transformers.IBertForQuestionAnswering.forward.example",$$slots:{default:[ql]},$$scope:{ctx:B}}}),Ge=new W({props:{anchor:"transformers.IBertForQuestionAnswering.forward.example-2",$$slots:{default:[Ml]},$$scope:{ctx:B}}}),{c(){s=r("meta"),g=u(),c=r("h1"),p=r("a"),b=r("span"),v(n.$$.fragment),h=u(),q=r("span"),Yn=l("I-BERT"),pn=u(),oe=r("h2"),we=r("a"),wo=r("span"),v(Ke.$$.fragment),Zn=u(),$o=r("span"),Xn=l("Overview"),hn=u(),$e=r("p"),es=l("The I-BERT model was proposed in "),Ye=r("a"),ts=l("I-BERT: Integer-only BERT Quantization"),os=l(` by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney and Kurt Keutzer. It\u2019s a quantized version of RoBERTa running inference up to four times faster.`),mn=u(),Ut=r("p"),ns=l("The abstract from the paper is the following:"),un=u(),Gt=r("p"),To=r("em"),ss=l(`Transformer based models, like BERT and RoBERTa, have achieved state-of-the-art results in many Natural Language Processing tasks. However, their memory footprint, inference latency, and power consumption are prohibitive for efficient inference at the edge, and even at the data center. While quantization can be a viable solution for this, previous work on quantizing Transformer based models use floating-point arithmetic during inference, which cannot efficiently utilize integer-only logical units such as the recent Turing Tensor Cores, or traditional integer-only ARM processors. In this work, we propose I-BERT, a novel quantization scheme for Transformer based models that quantizes the entire inference with integer-only arithmetic. Based on lightweight integer-only approximation methods for nonlinear operations, e.g., GELU, Softmax, and Layer Normalization, I-BERT performs an end-to-end integer-only BERT inference without any floating point calculation. We evaluate our approach on GLUE downstream tasks using RoBERTa-Base/Large. We show that for both cases, I-BERT achieves similar (and slightly higher) accuracy as compared to the full-precision baseline. Furthermore, our preliminary implementation of I-BERT shows a speedup of 2.4 - 4.0x for INT8 inference on a T4 GPU system as compared to FP32 inference. The framework has been developed in PyTorch and has been open-sourced.`),fn=u(),G=r("p"),rs=l("This model was contributed by "),Ze=r("a"),as=l("kssteven"),is=l(". The original code can be found "),Xe=r("a"),ls=l("here"),ds=l("."),gn=u(),ne=r("h2"),Te=r("a"),yo=r("span"),v(et.$$.fragment),cs=u(),Io=r("span"),ps=l("IBertConfig"),_n=u(),U=r("div"),v(tt.$$.fragment),hs=u(),se=r("p"),ms=l("This is the configuration class to store the configuration of a "),Vt=r("a"),us=l("IBertModel"),fs=l(`. It is used to instantiate a I-BERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the IBERT `),ot=r("a"),gs=l("kssteven/ibert-roberta-base"),_s=l(" architecture."),bs=u(),re=r("p"),ks=l("Configuration objects inherit from "),Jt=r("a"),vs=l("PretrainedConfig"),ws=l(` and can be used to control the model outputs. Read the documentation from `),Kt=r("a"),$s=l("PretrainedConfig"),Ts=l(" for more information."),bn=u(),ae=r("h2"),ye=r("a"),Bo=r("span"),v(nt.$$.fragment),ys=u(),qo=r("span"),Is=l("IBertModel"),kn=u(),z=r("div"),v(st.$$.fragment),Bs=u(),Mo=r("p"),qs=l("The bare I-BERT Model transformer outputting raw hidden-states without any specific head on top."),Ms=u(),rt=r("p"),zs=l("This model inherits from "),Yt=r("a"),Fs=l("PreTrainedModel"),xs=l(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Es=u(),at=r("p"),Cs=l("This model is also a PyTorch "),it=r("a"),js=l("torch.nn.Module"),Ps=l(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),As=u(),lt=r("p"),Ls=l(`The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `),dt=r("a"),Ss=l(`Attention is all you need`),Os=l(` by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.`),Rs=u(),R=r("div"),v(ct.$$.fragment),Ns=u(),ie=r("p"),Ds=l("The "),Zt=r("a"),Ws=l("IBertModel"),Qs=l(" forward method, overrides the "),zo=r("code"),Hs=l("__call__"),Us=l(" special method."),Gs=u(),v(Ie.$$.fragment),Vs=u(),v(Be.$$.fragment),vn=u(),le=r("h2"),qe=r("a"),Fo=r("span"),v(pt.$$.fragment),Js=u(),xo=r("span"),Ks=l("IBertForMaskedLM"),wn=u(),F=r("div"),v(ht.$$.fragment),Ys=u(),mt=r("p"),Zs=l("I-BERT Model with a "),Eo=r("code"),Xs=l("language modeling"),er=l(" head on top."),tr=u(),ut=r("p"),or=l("This model inherits from "),Xt=r("a"),nr=l("PreTrainedModel"),sr=l(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),rr=u(),ft=r("p"),ar=l("This model is also a PyTorch "),gt=r("a"),ir=l("torch.nn.Module"),lr=l(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),dr=u(),P=r("div"),v(_t.$$.fragment),cr=u(),de=r("p"),pr=l("The "),eo=r("a"),hr=l("IBertForMaskedLM"),mr=l(" forward method, overrides the "),Co=r("code"),ur=l("__call__"),fr=l(" special method."),gr=u(),v(Me.$$.fragment),_r=u(),v(ze.$$.fragment),br=u(),v(Fe.$$.fragment),$n=u(),ce=r("h2"),xe=r("a"),jo=r("span"),v(bt.$$.fragment),kr=u(),Po=r("span"),vr=l("IBertForSequenceClassification"),Tn=u(),x=r("div"),v(kt.$$.fragment),wr=u(),Ao=r("p"),$r=l(`I-BERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Tr=u(),vt=r("p"),yr=l("This model inherits from "),to=r("a"),Ir=l("PreTrainedModel"),Br=l(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),qr=u(),wt=r("p"),Mr=l("This model is also a PyTorch "),$t=r("a"),zr=l("torch.nn.Module"),Fr=l(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),xr=u(),M=r("div"),v(Tt.$$.fragment),Er=u(),pe=r("p"),Cr=l("The "),oo=r("a"),jr=l("IBertForSequenceClassification"),Pr=l(" forward method, overrides the "),Lo=r("code"),Ar=l("__call__"),Lr=l(" special method."),Sr=u(),v(Ee.$$.fragment),Or=u(),v(Ce.$$.fragment),Rr=u(),v(je.$$.fragment),Nr=u(),v(Pe.$$.fragment),Dr=u(),v(Ae.$$.fragment),yn=u(),he=r("h2"),Le=r("a"),So=r("span"),v(yt.$$.fragment),Wr=u(),Oo=r("span"),Qr=l("IBertForMultipleChoice"),In=u(),E=r("div"),v(It.$$.fragment),Hr=u(),Ro=r("p"),Ur=l(`I-BERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Gr=u(),Bt=r("p"),Vr=l("This model inherits from "),no=r("a"),Jr=l("PreTrainedModel"),Kr=l(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Yr=u(),qt=r("p"),Zr=l("This model is also a PyTorch "),Mt=r("a"),Xr=l("torch.nn.Module"),ea=l(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ta=u(),N=r("div"),v(zt.$$.fragment),oa=u(),me=r("p"),na=l("The "),so=r("a"),sa=l("IBertForMultipleChoice"),ra=l(" forward method, overrides the "),No=r("code"),aa=l("__call__"),ia=l(" special method."),la=u(),v(Se.$$.fragment),da=u(),v(Oe.$$.fragment),Bn=u(),ue=r("h2"),Re=r("a"),Do=r("span"),v(Ft.$$.fragment),ca=u(),Wo=r("span"),pa=l("IBertForTokenClassification"),qn=u(),C=r("div"),v(xt.$$.fragment),ha=u(),Qo=r("p"),ma=l(`I-BERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),ua=u(),Et=r("p"),fa=l("This model inherits from "),ro=r("a"),ga=l("PreTrainedModel"),_a=l(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ba=u(),Ct=r("p"),ka=l("This model is also a PyTorch "),jt=r("a"),va=l("torch.nn.Module"),wa=l(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),$a=u(),A=r("div"),v(Pt.$$.fragment),Ta=u(),fe=r("p"),ya=l("The "),ao=r("a"),Ia=l("IBertForTokenClassification"),Ba=l(" forward method, overrides the "),Ho=r("code"),qa=l("__call__"),Ma=l(" special method."),za=u(),v(Ne.$$.fragment),Fa=u(),v(De.$$.fragment),xa=u(),v(We.$$.fragment),Mn=u(),ge=r("h2"),Qe=r("a"),Uo=r("span"),v(At.$$.fragment),Ea=u(),Go=r("span"),Ca=l("IBertForQuestionAnswering"),zn=u(),j=r("div"),v(Lt.$$.fragment),ja=u(),_e=r("p"),Pa=l(`I-BERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Vo=r("code"),Aa=l("span start logits"),La=l(" and "),Jo=r("code"),Sa=l("span end logits"),Oa=l(")."),Ra=u(),St=r("p"),Na=l("This model inherits from "),io=r("a"),Da=l("PreTrainedModel"),Wa=l(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Qa=u(),Ot=r("p"),Ha=l("This model is also a PyTorch "),Rt=r("a"),Ua=l("torch.nn.Module"),Ga=l(` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Va=u(),L=r("div"),v(Nt.$$.fragment),Ja=u(),be=r("p"),Ka=l("The "),lo=r("a"),Ya=l("IBertForQuestionAnswering"),Za=l(" forward method, overrides the "),Ko=r("code"),Xa=l("__call__"),ei=l(" special method."),ti=u(),v(He.$$.fragment),oi=u(),v(Ue.$$.fragment),ni=u(),v(Ge.$$.fragment),this.h()},l(t){const _=dl('[data-svelte="svelte-1phssyn"]',document.head);s=a(_,"META",{name:!0,content:!0}),_.forEach(o),g=f(t),c=a(t,"H1",{class:!0});var Dt=i(c);p=a(Dt,"A",{id:!0,class:!0,href:!0});var Yo=i(p);b=a(Yo,"SPAN",{});var Zo=i(b);w(n.$$.fragment,Zo),Zo.forEach(o),Yo.forEach(o),h=f(Dt),q=a(Dt,"SPAN",{});var Xo=i(q);Yn=d(Xo,"I-BERT"),Xo.forEach(o),Dt.forEach(o),pn=f(t),oe=a(t,"H2",{class:!0});var Wt=i(oe);we=a(Wt,"A",{id:!0,class:!0,href:!0});var en=i(we);wo=a(en,"SPAN",{});var tn=i(wo);w(Ke.$$.fragment,tn),tn.forEach(o),en.forEach(o),Zn=f(Wt),$o=a(Wt,"SPAN",{});var on=i($o);Xn=d(on,"Overview"),on.forEach(o),Wt.forEach(o),hn=f(t),$e=a(t,"P",{});var Qt=i($e);es=d(Qt,"The I-BERT model was proposed in "),Ye=a(Qt,"A",{href:!0,rel:!0});var nn=i(Ye);ts=d(nn,"I-BERT: Integer-only BERT Quantization"),nn.forEach(o),os=d(Qt,` by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney and Kurt Keutzer. It\u2019s a quantized version of RoBERTa running inference up to four times faster.`),Qt.forEach(o),mn=f(t),Ut=a(t,"P",{});var sn=i(Ut);ns=d(sn,"The abstract from the paper is the following:"),sn.forEach(o),un=f(t),Gt=a(t,"P",{});var rn=i(Gt);To=a(rn,"EM",{});var an=i(To);ss=d(an,`Transformer based models, like BERT and RoBERTa, have achieved state-of-the-art results in many Natural Language Processing tasks. However, their memory footprint, inference latency, and power consumption are prohibitive for efficient inference at the edge, and even at the data center. While quantization can be a viable solution for this, previous work on quantizing Transformer based models use floating-point arithmetic during inference, which cannot efficiently utilize integer-only logical units such as the recent Turing Tensor Cores, or traditional integer-only ARM processors. In this work, we propose I-BERT, a novel quantization scheme for Transformer based models that quantizes the entire inference with integer-only arithmetic. Based on lightweight integer-only approximation methods for nonlinear operations, e.g., GELU, Softmax, and Layer Normalization, I-BERT performs an end-to-end integer-only BERT inference without any floating point calculation. We evaluate our approach on GLUE downstream tasks using RoBERTa-Base/Large. We show that for both cases, I-BERT achieves similar (and slightly higher) accuracy as compared to the full-precision baseline. Furthermore, our preliminary implementation of I-BERT shows a speedup of 2.4 - 4.0x for INT8 inference on a T4 GPU system as compared to FP32 inference. The framework has been developed in PyTorch and has been open-sourced.`),an.forEach(o),rn.forEach(o),fn=f(t),G=a(t,"P",{});var ke=i(G);rs=d(ke,"This model was contributed by "),Ze=a(ke,"A",{href:!0,rel:!0});var ln=i(Ze);as=d(ln,"kssteven"),ln.forEach(o),is=d(ke,". The original code can be found "),Xe=a(ke,"A",{href:!0,rel:!0});var dn=i(Xe);ls=d(dn,"here"),dn.forEach(o),ds=d(ke,"."),ke.forEach(o),gn=f(t),ne=a(t,"H2",{class:!0});var Ht=i(ne);Te=a(Ht,"A",{id:!0,class:!0,href:!0});var cn=i(Te);yo=a(cn,"SPAN",{});var si=i(yo);w(et.$$.fragment,si),si.forEach(o),cn.forEach(o),cs=f(Ht),Io=a(Ht,"SPAN",{});var ri=i(Io);ps=d(ri,"IBertConfig"),ri.forEach(o),Ht.forEach(o),_n=f(t),U=a(t,"DIV",{class:!0});var co=i(U);w(tt.$$.fragment,co),hs=f(co),se=a(co,"P",{});var po=i(se);ms=d(po,"This is the configuration class to store the configuration of a "),Vt=a(po,"A",{href:!0});var ai=i(Vt);us=d(ai,"IBertModel"),ai.forEach(o),fs=d(po,`. It is used to instantiate a I-BERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the IBERT `),ot=a(po,"A",{href:!0,rel:!0});var ii=i(ot);gs=d(ii,"kssteven/ibert-roberta-base"),ii.forEach(o),_s=d(po," architecture."),po.forEach(o),bs=f(co),re=a(co,"P",{});var ho=i(re);ks=d(ho,"Configuration objects inherit from "),Jt=a(ho,"A",{href:!0});var li=i(Jt);vs=d(li,"PretrainedConfig"),li.forEach(o),ws=d(ho,` and can be used to control the model outputs. Read the documentation from `),Kt=a(ho,"A",{href:!0});var di=i(Kt);$s=d(di,"PretrainedConfig"),di.forEach(o),Ts=d(ho," for more information."),ho.forEach(o),co.forEach(o),bn=f(t),ae=a(t,"H2",{class:!0});var xn=i(ae);ye=a(xn,"A",{id:!0,class:!0,href:!0});var ci=i(ye);Bo=a(ci,"SPAN",{});var pi=i(Bo);w(nt.$$.fragment,pi),pi.forEach(o),ci.forEach(o),ys=f(xn),qo=a(xn,"SPAN",{});var hi=i(qo);Is=d(hi,"IBertModel"),hi.forEach(o),xn.forEach(o),kn=f(t),z=a(t,"DIV",{class:!0});var D=i(z);w(st.$$.fragment,D),Bs=f(D),Mo=a(D,"P",{});var mi=i(Mo);qs=d(mi,"The bare I-BERT Model transformer outputting raw hidden-states without any specific head on top."),mi.forEach(o),Ms=f(D),rt=a(D,"P",{});var En=i(rt);zs=d(En,"This model inherits from "),Yt=a(En,"A",{href:!0});var ui=i(Yt);Fs=d(ui,"PreTrainedModel"),ui.forEach(o),xs=d(En,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),En.forEach(o),Es=f(D),at=a(D,"P",{});var Cn=i(at);Cs=d(Cn,"This model is also a PyTorch "),it=a(Cn,"A",{href:!0,rel:!0});var fi=i(it);js=d(fi,"torch.nn.Module"),fi.forEach(o),Ps=d(Cn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Cn.forEach(o),As=f(D),lt=a(D,"P",{});var jn=i(lt);Ls=d(jn,`The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `),dt=a(jn,"A",{href:!0,rel:!0});var gi=i(dt);Ss=d(gi,`Attention is all you need`),gi.forEach(o),Os=d(jn,` by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.`),jn.forEach(o),Rs=f(D),R=a(D,"DIV",{class:!0});var Ve=i(R);w(ct.$$.fragment,Ve),Ns=f(Ve),ie=a(Ve,"P",{});var mo=i(ie);Ds=d(mo,"The "),Zt=a(mo,"A",{href:!0});var _i=i(Zt);Ws=d(_i,"IBertModel"),_i.forEach(o),Qs=d(mo," forward method, overrides the "),zo=a(mo,"CODE",{});var bi=i(zo);Hs=d(bi,"__call__"),bi.forEach(o),Us=d(mo," special method."),mo.forEach(o),Gs=f(Ve),w(Ie.$$.fragment,Ve),Vs=f(Ve),w(Be.$$.fragment,Ve),Ve.forEach(o),D.forEach(o),vn=f(t),le=a(t,"H2",{class:!0});var Pn=i(le);qe=a(Pn,"A",{id:!0,class:!0,href:!0});var ki=i(qe);Fo=a(ki,"SPAN",{});var vi=i(Fo);w(pt.$$.fragment,vi),vi.forEach(o),ki.forEach(o),Js=f(Pn),xo=a(Pn,"SPAN",{});var wi=i(xo);Ks=d(wi,"IBertForMaskedLM"),wi.forEach(o),Pn.forEach(o),wn=f(t),F=a(t,"DIV",{class:!0});var V=i(F);w(ht.$$.fragment,V),Ys=f(V),mt=a(V,"P",{});var An=i(mt);Zs=d(An,"I-BERT Model with a "),Eo=a(An,"CODE",{});var $i=i(Eo);Xs=d($i,"language modeling"),$i.forEach(o),er=d(An," head on top."),An.forEach(o),tr=f(V),ut=a(V,"P",{});var Ln=i(ut);or=d(Ln,"This model inherits from "),Xt=a(Ln,"A",{href:!0});var Ti=i(Xt);nr=d(Ti,"PreTrainedModel"),Ti.forEach(o),sr=d(Ln,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Ln.forEach(o),rr=f(V),ft=a(V,"P",{});var Sn=i(ft);ar=d(Sn,"This model is also a PyTorch "),gt=a(Sn,"A",{href:!0,rel:!0});var yi=i(gt);ir=d(yi,"torch.nn.Module"),yi.forEach(o),lr=d(Sn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Sn.forEach(o),dr=f(V),P=a(V,"DIV",{class:!0});var J=i(P);w(_t.$$.fragment,J),cr=f(J),de=a(J,"P",{});var uo=i(de);pr=d(uo,"The "),eo=a(uo,"A",{href:!0});var Ii=i(eo);hr=d(Ii,"IBertForMaskedLM"),Ii.forEach(o),mr=d(uo," forward method, overrides the "),Co=a(uo,"CODE",{});var Bi=i(Co);ur=d(Bi,"__call__"),Bi.forEach(o),fr=d(uo," special method."),uo.forEach(o),gr=f(J),w(Me.$$.fragment,J),_r=f(J),w(ze.$$.fragment,J),br=f(J),w(Fe.$$.fragment,J),J.forEach(o),V.forEach(o),$n=f(t),ce=a(t,"H2",{class:!0});var On=i(ce);xe=a(On,"A",{id:!0,class:!0,href:!0});var qi=i(xe);jo=a(qi,"SPAN",{});var Mi=i(jo);w(bt.$$.fragment,Mi),Mi.forEach(o),qi.forEach(o),kr=f(On),Po=a(On,"SPAN",{});var zi=i(Po);vr=d(zi,"IBertForSequenceClassification"),zi.forEach(o),On.forEach(o),Tn=f(t),x=a(t,"DIV",{class:!0});var K=i(x);w(kt.$$.fragment,K),wr=f(K),Ao=a(K,"P",{});var Fi=i(Ao);$r=d(Fi,`I-BERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.`),Fi.forEach(o),Tr=f(K),vt=a(K,"P",{});var Rn=i(vt);yr=d(Rn,"This model inherits from "),to=a(Rn,"A",{href:!0});var xi=i(to);Ir=d(xi,"PreTrainedModel"),xi.forEach(o),Br=d(Rn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Rn.forEach(o),qr=f(K),wt=a(K,"P",{});var Nn=i(wt);Mr=d(Nn,"This model is also a PyTorch "),$t=a(Nn,"A",{href:!0,rel:!0});var Ei=i($t);zr=d(Ei,"torch.nn.Module"),Ei.forEach(o),Fr=d(Nn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Nn.forEach(o),xr=f(K),M=a(K,"DIV",{class:!0});var S=i(M);w(Tt.$$.fragment,S),Er=f(S),pe=a(S,"P",{});var fo=i(pe);Cr=d(fo,"The "),oo=a(fo,"A",{href:!0});var Ci=i(oo);jr=d(Ci,"IBertForSequenceClassification"),Ci.forEach(o),Pr=d(fo," forward method, overrides the "),Lo=a(fo,"CODE",{});var ji=i(Lo);Ar=d(ji,"__call__"),ji.forEach(o),Lr=d(fo," special method."),fo.forEach(o),Sr=f(S),w(Ee.$$.fragment,S),Or=f(S),w(Ce.$$.fragment,S),Rr=f(S),w(je.$$.fragment,S),Nr=f(S),w(Pe.$$.fragment,S),Dr=f(S),w(Ae.$$.fragment,S),S.forEach(o),K.forEach(o),yn=f(t),he=a(t,"H2",{class:!0});var Dn=i(he);Le=a(Dn,"A",{id:!0,class:!0,href:!0});var Pi=i(Le);So=a(Pi,"SPAN",{});var Ai=i(So);w(yt.$$.fragment,Ai),Ai.forEach(o),Pi.forEach(o),Wr=f(Dn),Oo=a(Dn,"SPAN",{});var Li=i(Oo);Qr=d(Li,"IBertForMultipleChoice"),Li.forEach(o),Dn.forEach(o),In=f(t),E=a(t,"DIV",{class:!0});var Y=i(E);w(It.$$.fragment,Y),Hr=f(Y),Ro=a(Y,"P",{});var Si=i(Ro);Ur=d(Si,`I-BERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.`),Si.forEach(o),Gr=f(Y),Bt=a(Y,"P",{});var Wn=i(Bt);Vr=d(Wn,"This model inherits from "),no=a(Wn,"A",{href:!0});var Oi=i(no);Jr=d(Oi,"PreTrainedModel"),Oi.forEach(o),Kr=d(Wn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Wn.forEach(o),Yr=f(Y),qt=a(Y,"P",{});var Qn=i(qt);Zr=d(Qn,"This model is also a PyTorch "),Mt=a(Qn,"A",{href:!0,rel:!0});var Ri=i(Mt);Xr=d(Ri,"torch.nn.Module"),Ri.forEach(o),ea=d(Qn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Qn.forEach(o),ta=f(Y),N=a(Y,"DIV",{class:!0});var Je=i(N);w(zt.$$.fragment,Je),oa=f(Je),me=a(Je,"P",{});var go=i(me);na=d(go,"The "),so=a(go,"A",{href:!0});var Ni=i(so);sa=d(Ni,"IBertForMultipleChoice"),Ni.forEach(o),ra=d(go," forward method, overrides the "),No=a(go,"CODE",{});var Di=i(No);aa=d(Di,"__call__"),Di.forEach(o),ia=d(go," special method."),go.forEach(o),la=f(Je),w(Se.$$.fragment,Je),da=f(Je),w(Oe.$$.fragment,Je),Je.forEach(o),Y.forEach(o),Bn=f(t),ue=a(t,"H2",{class:!0});var Hn=i(ue);Re=a(Hn,"A",{id:!0,class:!0,href:!0});var Wi=i(Re);Do=a(Wi,"SPAN",{});var Qi=i(Do);w(Ft.$$.fragment,Qi),Qi.forEach(o),Wi.forEach(o),ca=f(Hn),Wo=a(Hn,"SPAN",{});var Hi=i(Wo);pa=d(Hi,"IBertForTokenClassification"),Hi.forEach(o),Hn.forEach(o),qn=f(t),C=a(t,"DIV",{class:!0});var Z=i(C);w(xt.$$.fragment,Z),ha=f(Z),Qo=a(Z,"P",{});var Ui=i(Qo);ma=d(Ui,`I-BERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.`),Ui.forEach(o),ua=f(Z),Et=a(Z,"P",{});var Un=i(Et);fa=d(Un,"This model inherits from "),ro=a(Un,"A",{href:!0});var Gi=i(ro);ga=d(Gi,"PreTrainedModel"),Gi.forEach(o),_a=d(Un,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Un.forEach(o),ba=f(Z),Ct=a(Z,"P",{});var Gn=i(Ct);ka=d(Gn,"This model is also a PyTorch "),jt=a(Gn,"A",{href:!0,rel:!0});var Vi=i(jt);va=d(Vi,"torch.nn.Module"),Vi.forEach(o),wa=d(Gn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Gn.forEach(o),$a=f(Z),A=a(Z,"DIV",{class:!0});var X=i(A);w(Pt.$$.fragment,X),Ta=f(X),fe=a(X,"P",{});var _o=i(fe);ya=d(_o,"The "),ao=a(_o,"A",{href:!0});var Ji=i(ao);Ia=d(Ji,"IBertForTokenClassification"),Ji.forEach(o),Ba=d(_o," forward method, overrides the "),Ho=a(_o,"CODE",{});var Ki=i(Ho);qa=d(Ki,"__call__"),Ki.forEach(o),Ma=d(_o," special method."),_o.forEach(o),za=f(X),w(Ne.$$.fragment,X),Fa=f(X),w(De.$$.fragment,X),xa=f(X),w(We.$$.fragment,X),X.forEach(o),Z.forEach(o),Mn=f(t),ge=a(t,"H2",{class:!0});var Vn=i(ge);Qe=a(Vn,"A",{id:!0,class:!0,href:!0});var Yi=i(Qe);Uo=a(Yi,"SPAN",{});var Zi=i(Uo);w(At.$$.fragment,Zi),Zi.forEach(o),Yi.forEach(o),Ea=f(Vn),Go=a(Vn,"SPAN",{});var Xi=i(Go);Ca=d(Xi,"IBertForQuestionAnswering"),Xi.forEach(o),Vn.forEach(o),zn=f(t),j=a(t,"DIV",{class:!0});var ee=i(j);w(Lt.$$.fragment,ee),ja=f(ee),_e=a(ee,"P",{});var bo=i(_e);Pa=d(bo,`I-BERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `),Vo=a(bo,"CODE",{});var el=i(Vo);Aa=d(el,"span start logits"),el.forEach(o),La=d(bo," and "),Jo=a(bo,"CODE",{});var tl=i(Jo);Sa=d(tl,"span end logits"),tl.forEach(o),Oa=d(bo,")."),bo.forEach(o),Ra=f(ee),St=a(ee,"P",{});var Jn=i(St);Na=d(Jn,"This model inherits from "),io=a(Jn,"A",{href:!0});var ol=i(io);Da=d(ol,"PreTrainedModel"),ol.forEach(o),Wa=d(Jn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Jn.forEach(o),Qa=f(ee),Ot=a(ee,"P",{});var Kn=i(Ot);Ha=d(Kn,"This model is also a PyTorch "),Rt=a(Kn,"A",{href:!0,rel:!0});var nl=i(Rt);Ua=d(nl,"torch.nn.Module"),nl.forEach(o),Ga=d(Kn,` subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Kn.forEach(o),Va=f(ee),L=a(ee,"DIV",{class:!0});var te=i(L);w(Nt.$$.fragment,te),Ja=f(te),be=a(te,"P",{});var ko=i(be);Ka=d(ko,"The "),lo=a(ko,"A",{href:!0});var sl=i(lo);Ya=d(sl,"IBertForQuestionAnswering"),sl.forEach(o),Za=d(ko," forward method, overrides the "),Ko=a(ko,"CODE",{});var rl=i(Ko);Xa=d(rl,"__call__"),rl.forEach(o),ei=d(ko," special method."),ko.forEach(o),ti=f(te),w(He.$$.fragment,te),oi=f(te),w(Ue.$$.fragment,te),ni=f(te),w(Ge.$$.fragment,te),te.forEach(o),ee.forEach(o),this.h()},h(){m(s,"name","hf:doc:metadata"),m(s,"content",JSON.stringify(Fl)),m(p,"id","ibert"),m(p,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(p,"href","#ibert"),m(c,"class","relative group"),m(we,"id","overview"),m(we,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(we,"href","#overview"),m(oe,"class","relative group"),m(Ye,"href","https://arxiv.org/abs/2101.01321"),m(Ye,"rel","nofollow"),m(Ze,"href","https://huggingface.co/kssteven"),m(Ze,"rel","nofollow"),m(Xe,"href","https://github.com/kssteven418/I-BERT"),m(Xe,"rel","nofollow"),m(Te,"id","transformers.IBertConfig"),m(Te,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Te,"href","#transformers.IBertConfig"),m(ne,"class","relative group"),m(Vt,"href","/docs/transformers/pr_19429/en/model_doc/ibert#transformers.IBertModel"),m(ot,"href","https://huggingface.co/kssteven/ibert-roberta-base"),m(ot,"rel","nofollow"),m(Jt,"href","/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig"),m(Kt,"href","/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig"),m(U,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(ye,"id","transformers.IBertModel"),m(ye,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(ye,"href","#transformers.IBertModel"),m(ae,"class","relative group"),m(Yt,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel"),m(it,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),m(it,"rel","nofollow"),m(dt,"href","https://arxiv.org/abs/1706.03762"),m(dt,"rel","nofollow"),m(Zt,"href","/docs/transformers/pr_19429/en/model_doc/ibert#transformers.IBertModel"),m(R,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(z,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(qe,"id","transformers.IBertForMaskedLM"),m(qe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(qe,"href","#transformers.IBertForMaskedLM"),m(le,"class","relative group"),m(Xt,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel"),m(gt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),m(gt,"rel","nofollow"),m(eo,"href","/docs/transformers/pr_19429/en/model_doc/ibert#transformers.IBertForMaskedLM"),m(P,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(F,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(xe,"id","transformers.IBertForSequenceClassification"),m(xe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(xe,"href","#transformers.IBertForSequenceClassification"),m(ce,"class","relative group"),m(to,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel"),m($t,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),m($t,"rel","nofollow"),m(oo,"href","/docs/transformers/pr_19429/en/model_doc/ibert#transformers.IBertForSequenceClassification"),m(M,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(x,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Le,"id","transformers.IBertForMultipleChoice"),m(Le,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Le,"href","#transformers.IBertForMultipleChoice"),m(he,"class","relative group"),m(no,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel"),m(Mt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),m(Mt,"rel","nofollow"),m(so,"href","/docs/transformers/pr_19429/en/model_doc/ibert#transformers.IBertForMultipleChoice"),m(N,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(E,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Re,"id","transformers.IBertForTokenClassification"),m(Re,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Re,"href","#transformers.IBertForTokenClassification"),m(ue,"class","relative group"),m(ro,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel"),m(jt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),m(jt,"rel","nofollow"),m(ao,"href","/docs/transformers/pr_19429/en/model_doc/ibert#transformers.IBertForTokenClassification"),m(A,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(C,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Qe,"id","transformers.IBertForQuestionAnswering"),m(Qe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Qe,"href","#transformers.IBertForQuestionAnswering"),m(ge,"class","relative group"),m(io,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel"),m(Rt,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),m(Rt,"rel","nofollow"),m(lo,"href","/docs/transformers/pr_19429/en/model_doc/ibert#transformers.IBertForQuestionAnswering"),m(L,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(j,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(t,_){e(document.head,s),k(t,g,_),k(t,c,_),e(c,p),e(p,b),$(n,b,null),e(c,h),e(c,q),e(q,Yn),k(t,pn,_),k(t,oe,_),e(oe,we),e(we,wo),$(Ke,wo,null),e(oe,Zn),e(oe,$o),e($o,Xn),k(t,hn,_),k(t,$e,_),e($e,es),e($e,Ye),e(Ye,ts),e($e,os),k(t,mn,_),k(t,Ut,_),e(Ut,ns),k(t,un,_),k(t,Gt,_),e(Gt,To),e(To,ss),k(t,fn,_),k(t,G,_),e(G,rs),e(G,Ze),e(Ze,as),e(G,is),e(G,Xe),e(Xe,ls),e(G,ds),k(t,gn,_),k(t,ne,_),e(ne,Te),e(Te,yo),$(et,yo,null),e(ne,cs),e(ne,Io),e(Io,ps),k(t,_n,_),k(t,U,_),$(tt,U,null),e(U,hs),e(U,se),e(se,ms),e(se,Vt),e(Vt,us),e(se,fs),e(se,ot),e(ot,gs),e(se,_s),e(U,bs),e(U,re),e(re,ks),e(re,Jt),e(Jt,vs),e(re,ws),e(re,Kt),e(Kt,$s),e(re,Ts),k(t,bn,_),k(t,ae,_),e(ae,ye),e(ye,Bo),$(nt,Bo,null),e(ae,ys),e(ae,qo),e(qo,Is),k(t,kn,_),k(t,z,_),$(st,z,null),e(z,Bs),e(z,Mo),e(Mo,qs),e(z,Ms),e(z,rt),e(rt,zs),e(rt,Yt),e(Yt,Fs),e(rt,xs),e(z,Es),e(z,at),e(at,Cs),e(at,it),e(it,js),e(at,Ps),e(z,As),e(z,lt),e(lt,Ls),e(lt,dt),e(dt,Ss),e(lt,Os),e(z,Rs),e(z,R),$(ct,R,null),e(R,Ns),e(R,ie),e(ie,Ds),e(ie,Zt),e(Zt,Ws),e(ie,Qs),e(ie,zo),e(zo,Hs),e(ie,Us),e(R,Gs),$(Ie,R,null),e(R,Vs),$(Be,R,null),k(t,vn,_),k(t,le,_),e(le,qe),e(qe,Fo),$(pt,Fo,null),e(le,Js),e(le,xo),e(xo,Ks),k(t,wn,_),k(t,F,_),$(ht,F,null),e(F,Ys),e(F,mt),e(mt,Zs),e(mt,Eo),e(Eo,Xs),e(mt,er),e(F,tr),e(F,ut),e(ut,or),e(ut,Xt),e(Xt,nr),e(ut,sr),e(F,rr),e(F,ft),e(ft,ar),e(ft,gt),e(gt,ir),e(ft,lr),e(F,dr),e(F,P),$(_t,P,null),e(P,cr),e(P,de),e(de,pr),e(de,eo),e(eo,hr),e(de,mr),e(de,Co),e(Co,ur),e(de,fr),e(P,gr),$(Me,P,null),e(P,_r),$(ze,P,null),e(P,br),$(Fe,P,null),k(t,$n,_),k(t,ce,_),e(ce,xe),e(xe,jo),$(bt,jo,null),e(ce,kr),e(ce,Po),e(Po,vr),k(t,Tn,_),k(t,x,_),$(kt,x,null),e(x,wr),e(x,Ao),e(Ao,$r),e(x,Tr),e(x,vt),e(vt,yr),e(vt,to),e(to,Ir),e(vt,Br),e(x,qr),e(x,wt),e(wt,Mr),e(wt,$t),e($t,zr),e(wt,Fr),e(x,xr),e(x,M),$(Tt,M,null),e(M,Er),e(M,pe),e(pe,Cr),e(pe,oo),e(oo,jr),e(pe,Pr),e(pe,Lo),e(Lo,Ar),e(pe,Lr),e(M,Sr),$(Ee,M,null),e(M,Or),$(Ce,M,null),e(M,Rr),$(je,M,null),e(M,Nr),$(Pe,M,null),e(M,Dr),$(Ae,M,null),k(t,yn,_),k(t,he,_),e(he,Le),e(Le,So),$(yt,So,null),e(he,Wr),e(he,Oo),e(Oo,Qr),k(t,In,_),k(t,E,_),$(It,E,null),e(E,Hr),e(E,Ro),e(Ro,Ur),e(E,Gr),e(E,Bt),e(Bt,Vr),e(Bt,no),e(no,Jr),e(Bt,Kr),e(E,Yr),e(E,qt),e(qt,Zr),e(qt,Mt),e(Mt,Xr),e(qt,ea),e(E,ta),e(E,N),$(zt,N,null),e(N,oa),e(N,me),e(me,na),e(me,so),e(so,sa),e(me,ra),e(me,No),e(No,aa),e(me,ia),e(N,la),$(Se,N,null),e(N,da),$(Oe,N,null),k(t,Bn,_),k(t,ue,_),e(ue,Re),e(Re,Do),$(Ft,Do,null),e(ue,ca),e(ue,Wo),e(Wo,pa),k(t,qn,_),k(t,C,_),$(xt,C,null),e(C,ha),e(C,Qo),e(Qo,ma),e(C,ua),e(C,Et),e(Et,fa),e(Et,ro),e(ro,ga),e(Et,_a),e(C,ba),e(C,Ct),e(Ct,ka),e(Ct,jt),e(jt,va),e(Ct,wa),e(C,$a),e(C,A),$(Pt,A,null),e(A,Ta),e(A,fe),e(fe,ya),e(fe,ao),e(ao,Ia),e(fe,Ba),e(fe,Ho),e(Ho,qa),e(fe,Ma),e(A,za),$(Ne,A,null),e(A,Fa),$(De,A,null),e(A,xa),$(We,A,null),k(t,Mn,_),k(t,ge,_),e(ge,Qe),e(Qe,Uo),$(At,Uo,null),e(ge,Ea),e(ge,Go),e(Go,Ca),k(t,zn,_),k(t,j,_),$(Lt,j,null),e(j,ja),e(j,_e),e(_e,Pa),e(_e,Vo),e(Vo,Aa),e(_e,La),e(_e,Jo),e(Jo,Sa),e(_e,Oa),e(j,Ra),e(j,St),e(St,Na),e(St,io),e(io,Da),e(St,Wa),e(j,Qa),e(j,Ot),e(Ot,Ha),e(Ot,Rt),e(Rt,Ua),e(Ot,Ga),e(j,Va),e(j,L),$(Nt,L,null),e(L,Ja),e(L,be),e(be,Ka),e(be,lo),e(lo,Ya),e(be,Za),e(be,Ko),e(Ko,Xa),e(be,ei),e(L,ti),$(He,L,null),e(L,oi),$(Ue,L,null),e(L,ni),$(Ge,L,null),Fn=!0},p(t,[_]){const Dt={};_&2&&(Dt.$$scope={dirty:_,ctx:t}),Ie.$set(Dt);const Yo={};_&2&&(Yo.$$scope={dirty:_,ctx:t}),Be.$set(Yo);const Zo={};_&2&&(Zo.$$scope={dirty:_,ctx:t}),Me.$set(Zo);const Xo={};_&2&&(Xo.$$scope={dirty:_,ctx:t}),ze.$set(Xo);const Wt={};_&2&&(Wt.$$scope={dirty:_,ctx:t}),Fe.$set(Wt);const en={};_&2&&(en.$$scope={dirty:_,ctx:t}),Ee.$set(en);const tn={};_&2&&(tn.$$scope={dirty:_,ctx:t}),Ce.$set(tn);const on={};_&2&&(on.$$scope={dirty:_,ctx:t}),je.$set(on);const Qt={};_&2&&(Qt.$$scope={dirty:_,ctx:t}),Pe.$set(Qt);const nn={};_&2&&(nn.$$scope={dirty:_,ctx:t}),Ae.$set(nn);const sn={};_&2&&(sn.$$scope={dirty:_,ctx:t}),Se.$set(sn);const rn={};_&2&&(rn.$$scope={dirty:_,ctx:t}),Oe.$set(rn);const an={};_&2&&(an.$$scope={dirty:_,ctx:t}),Ne.$set(an);const ke={};_&2&&(ke.$$scope={dirty:_,ctx:t}),De.$set(ke);const ln={};_&2&&(ln.$$scope={dirty:_,ctx:t}),We.$set(ln);const dn={};_&2&&(dn.$$scope={dirty:_,ctx:t}),He.$set(dn);const Ht={};_&2&&(Ht.$$scope={dirty:_,ctx:t}),Ue.$set(Ht);const cn={};_&2&&(cn.$$scope={dirty:_,ctx:t}),Ge.$set(cn)},i(t){Fn||(T(n.$$.fragment,t),T(Ke.$$.fragment,t),T(et.$$.fragment,t),T(tt.$$.fragment,t),T(nt.$$.fragment,t),T(st.$$.fragment,t),T(ct.$$.fragment,t),T(Ie.$$.fragment,t),T(Be.$$.fragment,t),T(pt.$$.fragment,t),T(ht.$$.fragment,t),T(_t.$$.fragment,t),T(Me.$$.fragment,t),T(ze.$$.fragment,t),T(Fe.$$.fragment,t),T(bt.$$.fragment,t),T(kt.$$.fragment,t),T(Tt.$$.fragment,t),T(Ee.$$.fragment,t),T(Ce.$$.fragment,t),T(je.$$.fragment,t),T(Pe.$$.fragment,t),T(Ae.$$.fragment,t),T(yt.$$.fragment,t),T(It.$$.fragment,t),T(zt.$$.fragment,t),T(Se.$$.fragment,t),T(Oe.$$.fragment,t),T(Ft.$$.fragment,t),T(xt.$$.fragment,t),T(Pt.$$.fragment,t),T(Ne.$$.fragment,t),T(De.$$.fragment,t),T(We.$$.fragment,t),T(At.$$.fragment,t),T(Lt.$$.fragment,t),T(Nt.$$.fragment,t),T(He.$$.fragment,t),T(Ue.$$.fragment,t),T(Ge.$$.fragment,t),Fn=!0)},o(t){y(n.$$.fragment,t),y(Ke.$$.fragment,t),y(et.$$.fragment,t),y(tt.$$.fragment,t),y(nt.$$.fragment,t),y(st.$$.fragment,t),y(ct.$$.fragment,t),y(Ie.$$.fragment,t),y(Be.$$.fragment,t),y(pt.$$.fragment,t),y(ht.$$.fragment,t),y(_t.$$.fragment,t),y(Me.$$.fragment,t),y(ze.$$.fragment,t),y(Fe.$$.fragment,t),y(bt.$$.fragment,t),y(kt.$$.fragment,t),y(Tt.$$.fragment,t),y(Ee.$$.fragment,t),y(Ce.$$.fragment,t),y(je.$$.fragment,t),y(Pe.$$.fragment,t),y(Ae.$$.fragment,t),y(yt.$$.fragment,t),y(It.$$.fragment,t),y(zt.$$.fragment,t),y(Se.$$.fragment,t),y(Oe.$$.fragment,t),y(Ft.$$.fragment,t),y(xt.$$.fragment,t),y(Pt.$$.fragment,t),y(Ne.$$.fragment,t),y(De.$$.fragment,t),y(We.$$.fragment,t),y(At.$$.fragment,t),y(Lt.$$.fragment,t),y(Nt.$$.fragment,t),y(He.$$.fragment,t),y(Ue.$$.fragment,t),y(Ge.$$.fragment,t),Fn=!1},d(t){o(s),t&&o(g),t&&o(c),I(n),t&&o(pn),t&&o(oe),I(Ke),t&&o(hn),t&&o($e),t&&o(mn),t&&o(Ut),t&&o(un),t&&o(Gt),t&&o(fn),t&&o(G),t&&o(gn),t&&o(ne),I(et),t&&o(_n),t&&o(U),I(tt),t&&o(bn),t&&o(ae),I(nt),t&&o(kn),t&&o(z),I(st),I(ct),I(Ie),I(Be),t&&o(vn),t&&o(le),I(pt),t&&o(wn),t&&o(F),I(ht),I(_t),I(Me),I(ze),I(Fe),t&&o($n),t&&o(ce),I(bt),t&&o(Tn),t&&o(x),I(kt),I(Tt),I(Ee),I(Ce),I(je),I(Pe),I(Ae),t&&o(yn),t&&o(he),I(yt),t&&o(In),t&&o(E),I(It),I(zt),I(Se),I(Oe),t&&o(Bn),t&&o(ue),I(Ft),t&&o(qn),t&&o(C),I(xt),I(Pt),I(Ne),I(De),I(We),t&&o(Mn),t&&o(ge),I(At),t&&o(zn),t&&o(j),I(Lt),I(Nt),I(He),I(Ue),I(Ge)}}}const Fl={local:"ibert",sections:[{local:"overview",title:"Overview"},{local:"transformers.IBertConfig",title:"IBertConfig"},{local:"transformers.IBertModel",title:"IBertModel"},{local:"transformers.IBertForMaskedLM",title:"IBertForMaskedLM"},{local:"transformers.IBertForSequenceClassification",title:"IBertForSequenceClassification"},{local:"transformers.IBertForMultipleChoice",title:"IBertForMultipleChoice"},{local:"transformers.IBertForTokenClassification",title:"IBertForTokenClassification"},{local:"transformers.IBertForQuestionAnswering",title:"IBertForQuestionAnswering"}],title:"I-BERT"};function xl(B){return cl(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Sl extends al{constructor(s){super();il(this,s,xl,zl,ll,{})}}export{Sl as default,Fl as metadata};
7
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/model_doc/hubert.mdx-hf-doc-builder.js
import{S as yl,i as wl,s as Tl,e as a,k as u,w,t as s,M as kl,c as r,d as t,m as h,a as l,x as T,h as n,b as f,G as e,g,y as k,q as $,o as C,B as H,v as $l,L as Kt}from"../../chunks/vendor-hf-doc-builder.js";import{T as Do}from"../../chunks/Tip-hf-doc-builder.js";import{D as Xe}from"../../chunks/Docstring-hf-doc-builder.js";import{C as Yt}from"../../chunks/CodeBlock-hf-doc-builder.js";import{I as Vt}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{E as Ut}from"../../chunks/ExampleCodeBlock-hf-doc-builder.js";function Cl(F){let d,v,c,m,y;return m=new Yt({props:{code:`from transformers import HubertModel, HubertConfig # Initializing a Hubert facebook/hubert-base-ls960 style configuration configuration = HubertConfig() # Initializing a model from the facebook/hubert-base-ls960 style configuration model = HubertModel(configuration) # Accessing the model configuration configuration = model.config`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> HubertModel, HubertConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a Hubert facebook/hubert-base-ls960 style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = HubertConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Initializing a model from the facebook/hubert-base-ls960 style configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = HubertModel(configuration) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Accessing the model configuration</span> <span class="hljs-meta">&gt;&gt;&gt; </span>configuration = model.config`}}),{c(){d=a("p"),v=s("Example:"),c=u(),w(m.$$.fragment)},l(i){d=r(i,"P",{});var p=l(d);v=n(p,"Example:"),p.forEach(t),c=h(i),T(m.$$.fragment,i)},m(i,p){g(i,d,p),e(d,v),g(i,c,p),k(m,i,p),y=!0},p:Kt,i(i){y||($(m.$$.fragment,i),y=!0)},o(i){C(m.$$.fragment,i),y=!1},d(i){i&&t(d),i&&t(c),H(m,i)}}}function Hl(F){let d,v,c,m,y;return{c(){d=a("p"),v=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),c=a("code"),m=s("Module"),y=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(i){d=r(i,"P",{});var p=l(d);v=n(p,"Although the recipe for forward pass needs to be defined within this function, one should call the "),c=r(p,"CODE",{});var x=l(c);m=n(x,"Module"),x.forEach(t),y=n(p,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),p.forEach(t)},m(i,p){g(i,d,p),e(d,v),e(d,c),e(c,m),e(d,y)},d(i){i&&t(d)}}}function El(F){let d,v,c,m,y;return m=new Yt({props:{code:`from transformers import Wav2Vec2Processor, HubertModel from datasets import load_dataset import soundfile as sf processor = Wav2Vec2Processor.from_pretrained("facebook/hubert-large-ls960-ft") model = HubertModel.from_pretrained("facebook/hubert-large-ls960-ft") def map_to_array(batch): speech, _ = sf.read(batch["file"]) batch["speech"] = speech return batch ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.map(map_to_array) input_values = processor(ds["speech"][0], return_tensors="pt").input_values # Batch size 1 hidden_states = model(input_values).last_hidden_state`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, HubertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> soundfile <span class="hljs-keyword">as</span> sf <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&quot;facebook/hubert-large-ls960-ft&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = HubertModel.from_pretrained(<span class="hljs-string">&quot;facebook/hubert-large-ls960-ft&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">map_to_array</span>(<span class="hljs-params">batch</span>): <span class="hljs-meta">... </span> speech, _ = sf.read(batch[<span class="hljs-string">&quot;file&quot;</span>]) <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;speech&quot;</span>] = speech <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch <span class="hljs-meta">&gt;&gt;&gt; </span>ds = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_dummy&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ds = ds.<span class="hljs-built_in">map</span>(map_to_array) <span class="hljs-meta">&gt;&gt;&gt; </span>input_values = processor(ds[<span class="hljs-string">&quot;speech&quot;</span>][<span class="hljs-number">0</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_values <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>hidden_states = model(input_values).last_hidden_state`}}),{c(){d=a("p"),v=s("Example:"),c=u(),w(m.$$.fragment)},l(i){d=r(i,"P",{});var p=l(d);v=n(p,"Example:"),p.forEach(t),c=h(i),T(m.$$.fragment,i)},m(i,p){g(i,d,p),e(d,v),g(i,c,p),k(m,i,p),y=!0},p:Kt,i(i){y||($(m.$$.fragment,i),y=!0)},o(i){C(m.$$.fragment,i),y=!1},d(i){i&&t(d),i&&t(c),H(m,i)}}}function jl(F){let d,v,c,m,y;return{c(){d=a("p"),v=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),c=a("code"),m=s("Module"),y=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(i){d=r(i,"P",{});var p=l(d);v=n(p,"Although the recipe for forward pass needs to be defined within this function, one should call the "),c=r(p,"CODE",{});var x=l(c);m=n(x,"Module"),x.forEach(t),y=n(p,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),p.forEach(t)},m(i,p){g(i,d,p),e(d,v),e(d,c),e(c,m),e(d,y)},d(i){i&&t(d)}}}function Fl(F){let d,v,c,m,y;return m=new Yt({props:{code:`from transformers import Wav2Vec2Processor, HubertForCTC from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") dataset = dataset.sort("id") sampling_rate = dataset.features["audio"].sampling_rate processor = Wav2Vec2Processor.from_pretrained("facebook/hubert-large-ls960-ft") model = HubertForCTC.from_pretrained("facebook/hubert-large-ls960-ft") # audio file is decoded on the fly inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") with torch.no_grad(): logits = model(**inputs).logits predicted_ids = torch.argmax(logits, dim=-1) # transcribe speech transcription = processor.batch_decode(predicted_ids) transcription[0]`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, HubertForCTC <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = dataset.sort(<span class="hljs-string">&quot;id&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&quot;facebook/hubert-large-ls960-ft&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = HubertForCTC.from_pretrained(<span class="hljs-string">&quot;facebook/hubert-large-ls960-ft&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = processor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=sampling_rate, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_ids = torch.argmax(logits, dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># transcribe speech</span> <span class="hljs-meta">&gt;&gt;&gt; </span>transcription = processor.batch_decode(predicted_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>transcription[<span class="hljs-number">0</span>] <span class="hljs-string">&#x27;MISTER QUILTER IS THE APOSTLE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL&#x27;</span>`}}),{c(){d=a("p"),v=s("Example:"),c=u(),w(m.$$.fragment)},l(i){d=r(i,"P",{});var p=l(d);v=n(p,"Example:"),p.forEach(t),c=h(i),T(m.$$.fragment,i)},m(i,p){g(i,d,p),e(d,v),g(i,c,p),k(m,i,p),y=!0},p:Kt,i(i){y||($(m.$$.fragment,i),y=!0)},o(i){C(m.$$.fragment,i),y=!1},d(i){i&&t(d),i&&t(c),H(m,i)}}}function xl(F){let d,v;return d=new Yt({props:{code:`inputs["labels"] = processor(text=dataset[0]["text"], return_tensors="pt").input_ids # compute loss loss = model(**inputs).loss round(loss.item(), 2)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = processor(text=dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;text&quot;</span>], return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute loss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(**inputs).loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(loss.item(), <span class="hljs-number">2</span>) <span class="hljs-number">22.68</span>`}}),{c(){w(d.$$.fragment)},l(c){T(d.$$.fragment,c)},m(c,m){k(d,c,m),v=!0},p:Kt,i(c){v||($(d.$$.fragment,c),v=!0)},o(c){C(d.$$.fragment,c),v=!1},d(c){H(d,c)}}}function ql(F){let d,v,c,m,y;return{c(){d=a("p"),v=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),c=a("code"),m=s("Module"),y=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(i){d=r(i,"P",{});var p=l(d);v=n(p,"Although the recipe for forward pass needs to be defined within this function, one should call the "),c=r(p,"CODE",{});var x=l(c);m=n(x,"Module"),x.forEach(t),y=n(p,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),p.forEach(t)},m(i,p){g(i,d,p),e(d,v),e(d,c),e(c,m),e(d,y)},d(i){i&&t(d)}}}function Ml(F){let d,v,c,m,y;return m=new Yt({props:{code:`from transformers import Wav2Vec2FeatureExtractor, HubertForSequenceClassification from datasets import load_dataset import torch dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") dataset = dataset.sort("id") sampling_rate = dataset.features["audio"].sampling_rate feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("superb/hubert-base-superb-ks") model = HubertForSequenceClassification.from_pretrained("superb/hubert-base-superb-ks") # audio file is decoded on the fly inputs = feature_extractor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") with torch.no_grad(): logits = model(**inputs).logits predicted_class_ids = torch.argmax(logits, dim=-1).item() predicted_label = model.config.id2label[predicted_class_ids] predicted_label`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor, HubertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = dataset.sort(<span class="hljs-string">&quot;id&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sampling_rate = dataset.features[<span class="hljs-string">&quot;audio&quot;</span>].sampling_rate <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&quot;superb/hubert-base-superb-ks&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = HubertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;superb/hubert-base-superb-ks&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># audio file is decoded on the fly</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = feature_extractor(dataset[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=sampling_rate, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> logits = model(**inputs).logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_ids = torch.argmax(logits, dim=-<span class="hljs-number">1</span>).item() <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_label = model.config.id2label[predicted_class_ids] <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_label <span class="hljs-string">&#x27;_unknown_&#x27;</span>`}}),{c(){d=a("p"),v=s("Example:"),c=u(),w(m.$$.fragment)},l(i){d=r(i,"P",{});var p=l(d);v=n(p,"Example:"),p.forEach(t),c=h(i),T(m.$$.fragment,i)},m(i,p){g(i,d,p),e(d,v),g(i,c,p),k(m,i,p),y=!0},p:Kt,i(i){y||($(m.$$.fragment,i),y=!0)},o(i){C(m.$$.fragment,i),y=!1},d(i){i&&t(d),i&&t(c),H(m,i)}}}function Pl(F){let d,v;return d=new Yt({props:{code:`# compute loss - target_label is e.g. "down" target_label = model.config.id2label[0] inputs["labels"] = torch.tensor([model.config.label2id[target_label]]) loss = model(**inputs).loss round(loss.item(), 2)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute loss - target_label is e.g. &quot;down&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>target_label = model.config.id2label[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>inputs[<span class="hljs-string">&quot;labels&quot;</span>] = torch.tensor([model.config.label2id[target_label]]) <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(**inputs).loss <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">round</span>(loss.item(), <span class="hljs-number">2</span>) <span class="hljs-number">8.53</span>`}}),{c(){w(d.$$.fragment)},l(c){T(d.$$.fragment,c)},m(c,m){k(d,c,m),v=!0},p:Kt,i(c){v||($(d.$$.fragment,c),v=!0)},o(c){C(d.$$.fragment,c),v=!1},d(c){H(d,c)}}}function Sl(F){let d,v,c,m,y,i,p,x,De,ve,D,J,ee,E,ze,U,Ae,ye,L,Ie,te,oe,Le,we,B,We,Te,R,ge,Ne,he,M,z,ke,W,_e,Be,se,ne,Re,S,Ve,ae,$e,Q,re,le,Ue,K,Ce,A,Z,ie,P,Ke,I,Ye,He;return{c(){d=a("p"),v=s("TensorFlow models and layers in "),c=a("code"),m=s("transformers"),y=s(" accept two formats as input:"),i=u(),p=a("ul"),x=a("li"),De=s("having all inputs as keyword arguments (like PyTorch models), or"),ve=u(),D=a("li"),J=s("having all inputs as a list, tuple or dict in the first positional argument."),ee=u(),E=a("p"),ze=s(`The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `),U=a("code"),Ae=s("model.fit()"),ye=s(` things should \u201Cjust work\u201D for you - just pass your inputs and labels in any format that `),L=a("code"),Ie=s("model.fit()"),te=s(` supports! If, however, you want to use the second format outside of Keras methods like `),oe=a("code"),Le=s("fit()"),we=s(" and "),B=a("code"),We=s("predict()"),Te=s(`, such as when creating your own layers or models with the Keras `),R=a("code"),ge=s("Functional"),Ne=s(` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument:`),he=u(),M=a("ul"),z=a("li"),ke=s("a single Tensor with "),W=a("code"),_e=s("input_values"),Be=s(" only and nothing else: "),se=a("code"),ne=s("model(input_values)"),Re=u(),S=a("li"),Ve=s(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),ae=a("code"),$e=s("model([input_values, attention_mask])"),Q=s(" or "),re=a("code"),le=s("model([input_values, attention_mask, token_type_ids])"),Ue=u(),K=a("li"),Ce=s(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=a("code"),Z=s('model({"input_values": input_values, "token_type_ids": token_type_ids})'),ie=u(),P=a("p"),Ke=s(`Note that when creating models and layers with `),I=a("a"),Ye=s("subclassing"),He=s(` then you don\u2019t need to worry about any of this, as you can just pass inputs like you would to any other Python function!`),this.h()},l(b){d=r(b,"P",{});var j=l(d);v=n(j,"TensorFlow models and layers in "),c=r(j,"CODE",{});var ot=l(c);m=n(ot,"transformers"),ot.forEach(t),y=n(j," accept two formats as input:"),j.forEach(t),i=h(b),p=r(b,"UL",{});var N=l(p);x=r(N,"LI",{});var st=l(x);De=n(st,"having all inputs as keyword arguments (like PyTorch models), or"),st.forEach(t),ve=h(N),D=r(N,"LI",{});var Ee=l(D);J=n(Ee,"having all inputs as a list, tuple or dict in the first positional argument."),Ee.forEach(t),N.forEach(t),ee=h(b),E=r(b,"P",{});var q=l(E);ze=n(q,`The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `),U=r(q,"CODE",{});var nt=l(U);Ae=n(nt,"model.fit()"),nt.forEach(t),ye=n(q,` things should \u201Cjust work\u201D for you - just pass your inputs and labels in any format that `),L=r(q,"CODE",{});var be=l(L);Ie=n(be,"model.fit()"),be.forEach(t),te=n(q,` supports! If, however, you want to use the second format outside of Keras methods like `),oe=r(q,"CODE",{});var at=l(oe);Le=n(at,"fit()"),at.forEach(t),we=n(q," and "),B=r(q,"CODE",{});var rt=l(B);We=n(rt,"predict()"),rt.forEach(t),Te=n(q,`, such as when creating your own layers or models with the Keras `),R=r(q,"CODE",{});var lt=l(R);ge=n(lt,"Functional"),lt.forEach(t),Ne=n(q,` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument:`),q.forEach(t),he=h(b),M=r(b,"UL",{});var O=l(M);z=r(O,"LI",{});var de=l(z);ke=n(de,"a single Tensor with "),W=r(de,"CODE",{});var je=l(W);_e=n(je,"input_values"),je.forEach(t),Be=n(de," only and nothing else: "),se=r(de,"CODE",{});var it=l(se);ne=n(it,"model(input_values)"),it.forEach(t),de.forEach(t),Re=h(O),S=r(O,"LI",{});var ce=l(S);Ve=n(ce,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),ae=r(ce,"CODE",{});var Fe=l(ae);$e=n(Fe,"model([input_values, attention_mask])"),Fe.forEach(t),Q=n(ce," or "),re=r(ce,"CODE",{});var dt=l(re);le=n(dt,"model([input_values, attention_mask, token_type_ids])"),dt.forEach(t),ce.forEach(t),Ue=h(O),K=r(O,"LI",{});var Ge=l(K);Ce=n(Ge,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=r(Ge,"CODE",{});var ct=l(A);Z=n(ct,'model({"input_values": input_values, "token_type_ids": token_type_ids})'),ct.forEach(t),Ge.forEach(t),O.forEach(t),ie=h(b),P=r(b,"P",{});var V=l(P);Ke=n(V,`Note that when creating models and layers with `),I=r(V,"A",{href:!0,rel:!0});var Je=l(I);Ye=n(Je,"subclassing"),Je.forEach(t),He=n(V,` then you don\u2019t need to worry about any of this, as you can just pass inputs like you would to any other Python function!`),V.forEach(t),this.h()},h(){f(I,"href","https://keras.io/guides/making_new_layers_and_models_via_subclassing/"),f(I,"rel","nofollow")},m(b,j){g(b,d,j),e(d,v),e(d,c),e(c,m),e(d,y),g(b,i,j),g(b,p,j),e(p,x),e(x,De),e(p,ve),e(p,D),e(D,J),g(b,ee,j),g(b,E,j),e(E,ze),e(E,U),e(U,Ae),e(E,ye),e(E,L),e(L,Ie),e(E,te),e(E,oe),e(oe,Le),e(E,we),e(E,B),e(B,We),e(E,Te),e(E,R),e(R,ge),e(E,Ne),g(b,he,j),g(b,M,j),e(M,z),e(z,ke),e(z,W),e(W,_e),e(z,Be),e(z,se),e(se,ne),e(M,Re),e(M,S),e(S,Ve),e(S,ae),e(ae,$e),e(S,Q),e(S,re),e(re,le),e(M,Ue),e(M,K),e(K,Ce),e(K,A),e(A,Z),g(b,ie,j),g(b,P,j),e(P,Ke),e(P,I),e(I,Ye),e(P,He)},d(b){b&&t(d),b&&t(i),b&&t(p),b&&t(ee),b&&t(E),b&&t(he),b&&t(M),b&&t(ie),b&&t(P)}}}function Ol(F){let d,v,c,m,y;return{c(){d=a("p"),v=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),c=a("code"),m=s("Module"),y=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(i){d=r(i,"P",{});var p=l(d);v=n(p,"Although the recipe for forward pass needs to be defined within this function, one should call the "),c=r(p,"CODE",{});var x=l(c);m=n(x,"Module"),x.forEach(t),y=n(p,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),p.forEach(t)},m(i,p){g(i,d,p),e(d,v),e(d,c),e(c,m),e(d,y)},d(i){i&&t(d)}}}function Dl(F){let d,v,c,m,y;return m=new Yt({props:{code:`from transformers import Wav2Vec2Processor, TFHubertModel from datasets import load_dataset import soundfile as sf processor = Wav2Vec2Processor.from_pretrained("facebook/hubert-base-960h") model = TFHubertModel.from_pretrained("facebook/hubert-base-960h") def map_to_array(batch): speech, _ = sf.read(batch["file"]) batch["speech"] = speech return batch ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.map(map_to_array) input_values = processor(ds["speech"][0], return_tensors="tf").input_values # Batch size 1 hidden_states = model(input_values).last_hidden_state`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, TFHubertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> soundfile <span class="hljs-keyword">as</span> sf <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&quot;facebook/hubert-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFHubertModel.from_pretrained(<span class="hljs-string">&quot;facebook/hubert-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">map_to_array</span>(<span class="hljs-params">batch</span>): <span class="hljs-meta">... </span> speech, _ = sf.read(batch[<span class="hljs-string">&quot;file&quot;</span>]) <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;speech&quot;</span>] = speech <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch <span class="hljs-meta">&gt;&gt;&gt; </span>ds = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_dummy&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ds = ds.<span class="hljs-built_in">map</span>(map_to_array) <span class="hljs-meta">&gt;&gt;&gt; </span>input_values = processor(ds[<span class="hljs-string">&quot;speech&quot;</span>][<span class="hljs-number">0</span>], return_tensors=<span class="hljs-string">&quot;tf&quot;</span>).input_values <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>hidden_states = model(input_values).last_hidden_state`}}),{c(){d=a("p"),v=s("Example:"),c=u(),w(m.$$.fragment)},l(i){d=r(i,"P",{});var p=l(d);v=n(p,"Example:"),p.forEach(t),c=h(i),T(m.$$.fragment,i)},m(i,p){g(i,d,p),e(d,v),g(i,c,p),k(m,i,p),y=!0},p:Kt,i(i){y||($(m.$$.fragment,i),y=!0)},o(i){C(m.$$.fragment,i),y=!1},d(i){i&&t(d),i&&t(c),H(m,i)}}}function zl(F){let d,v,c,m,y,i,p,x,De,ve,D,J,ee,E,ze,U,Ae,ye,L,Ie,te,oe,Le,we,B,We,Te,R,ge,Ne,he,M,z,ke,W,_e,Be,se,ne,Re,S,Ve,ae,$e,Q,re,le,Ue,K,Ce,A,Z,ie,P,Ke,I,Ye,He;return{c(){d=a("p"),v=s("TensorFlow models and layers in "),c=a("code"),m=s("transformers"),y=s(" accept two formats as input:"),i=u(),p=a("ul"),x=a("li"),De=s("having all inputs as keyword arguments (like PyTorch models), or"),ve=u(),D=a("li"),J=s("having all inputs as a list, tuple or dict in the first positional argument."),ee=u(),E=a("p"),ze=s(`The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `),U=a("code"),Ae=s("model.fit()"),ye=s(` things should \u201Cjust work\u201D for you - just pass your inputs and labels in any format that `),L=a("code"),Ie=s("model.fit()"),te=s(` supports! If, however, you want to use the second format outside of Keras methods like `),oe=a("code"),Le=s("fit()"),we=s(" and "),B=a("code"),We=s("predict()"),Te=s(`, such as when creating your own layers or models with the Keras `),R=a("code"),ge=s("Functional"),Ne=s(` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument:`),he=u(),M=a("ul"),z=a("li"),ke=s("a single Tensor with "),W=a("code"),_e=s("input_values"),Be=s(" only and nothing else: "),se=a("code"),ne=s("model(input_values)"),Re=u(),S=a("li"),Ve=s(`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),ae=a("code"),$e=s("model([input_values, attention_mask])"),Q=s(" or "),re=a("code"),le=s("model([input_values, attention_mask, token_type_ids])"),Ue=u(),K=a("li"),Ce=s(`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=a("code"),Z=s('model({"input_values": input_values, "token_type_ids": token_type_ids})'),ie=u(),P=a("p"),Ke=s(`Note that when creating models and layers with `),I=a("a"),Ye=s("subclassing"),He=s(` then you don\u2019t need to worry about any of this, as you can just pass inputs like you would to any other Python function!`),this.h()},l(b){d=r(b,"P",{});var j=l(d);v=n(j,"TensorFlow models and layers in "),c=r(j,"CODE",{});var ot=l(c);m=n(ot,"transformers"),ot.forEach(t),y=n(j," accept two formats as input:"),j.forEach(t),i=h(b),p=r(b,"UL",{});var N=l(p);x=r(N,"LI",{});var st=l(x);De=n(st,"having all inputs as keyword arguments (like PyTorch models), or"),st.forEach(t),ve=h(N),D=r(N,"LI",{});var Ee=l(D);J=n(Ee,"having all inputs as a list, tuple or dict in the first positional argument."),Ee.forEach(t),N.forEach(t),ee=h(b),E=r(b,"P",{});var q=l(E);ze=n(q,`The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `),U=r(q,"CODE",{});var nt=l(U);Ae=n(nt,"model.fit()"),nt.forEach(t),ye=n(q,` things should \u201Cjust work\u201D for you - just pass your inputs and labels in any format that `),L=r(q,"CODE",{});var be=l(L);Ie=n(be,"model.fit()"),be.forEach(t),te=n(q,` supports! If, however, you want to use the second format outside of Keras methods like `),oe=r(q,"CODE",{});var at=l(oe);Le=n(at,"fit()"),at.forEach(t),we=n(q," and "),B=r(q,"CODE",{});var rt=l(B);We=n(rt,"predict()"),rt.forEach(t),Te=n(q,`, such as when creating your own layers or models with the Keras `),R=r(q,"CODE",{});var lt=l(R);ge=n(lt,"Functional"),lt.forEach(t),Ne=n(q,` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument:`),q.forEach(t),he=h(b),M=r(b,"UL",{});var O=l(M);z=r(O,"LI",{});var de=l(z);ke=n(de,"a single Tensor with "),W=r(de,"CODE",{});var je=l(W);_e=n(je,"input_values"),je.forEach(t),Be=n(de," only and nothing else: "),se=r(de,"CODE",{});var it=l(se);ne=n(it,"model(input_values)"),it.forEach(t),de.forEach(t),Re=h(O),S=r(O,"LI",{});var ce=l(S);Ve=n(ce,`a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `),ae=r(ce,"CODE",{});var Fe=l(ae);$e=n(Fe,"model([input_values, attention_mask])"),Fe.forEach(t),Q=n(ce," or "),re=r(ce,"CODE",{});var dt=l(re);le=n(dt,"model([input_values, attention_mask, token_type_ids])"),dt.forEach(t),ce.forEach(t),Ue=h(O),K=r(O,"LI",{});var Ge=l(K);Ce=n(Ge,`a dictionary with one or several input Tensors associated to the input names given in the docstring: `),A=r(Ge,"CODE",{});var ct=l(A);Z=n(ct,'model({"input_values": input_values, "token_type_ids": token_type_ids})'),ct.forEach(t),Ge.forEach(t),O.forEach(t),ie=h(b),P=r(b,"P",{});var V=l(P);Ke=n(V,`Note that when creating models and layers with `),I=r(V,"A",{href:!0,rel:!0});var Je=l(I);Ye=n(Je,"subclassing"),Je.forEach(t),He=n(V,` then you don\u2019t need to worry about any of this, as you can just pass inputs like you would to any other Python function!`),V.forEach(t),this.h()},h(){f(I,"href","https://keras.io/guides/making_new_layers_and_models_via_subclassing/"),f(I,"rel","nofollow")},m(b,j){g(b,d,j),e(d,v),e(d,c),e(c,m),e(d,y),g(b,i,j),g(b,p,j),e(p,x),e(x,De),e(p,ve),e(p,D),e(D,J),g(b,ee,j),g(b,E,j),e(E,ze),e(E,U),e(U,Ae),e(E,ye),e(E,L),e(L,Ie),e(E,te),e(E,oe),e(oe,Le),e(E,we),e(E,B),e(B,We),e(E,Te),e(E,R),e(R,ge),e(E,Ne),g(b,he,j),g(b,M,j),e(M,z),e(z,ke),e(z,W),e(W,_e),e(z,Be),e(z,se),e(se,ne),e(M,Re),e(M,S),e(S,Ve),e(S,ae),e(ae,$e),e(S,Q),e(S,re),e(re,le),e(M,Ue),e(M,K),e(K,Ce),e(K,A),e(A,Z),g(b,ie,j),g(b,P,j),e(P,Ke),e(P,I),e(I,Ye),e(P,He)},d(b){b&&t(d),b&&t(i),b&&t(p),b&&t(ee),b&&t(E),b&&t(he),b&&t(M),b&&t(ie),b&&t(P)}}}function Al(F){let d,v,c,m,y;return{c(){d=a("p"),v=s("Although the recipe for forward pass needs to be defined within this function, one should call the "),c=a("code"),m=s("Module"),y=s(` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`)},l(i){d=r(i,"P",{});var p=l(d);v=n(p,"Although the recipe for forward pass needs to be defined within this function, one should call the "),c=r(p,"CODE",{});var x=l(c);m=n(x,"Module"),x.forEach(t),y=n(p,` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them.`),p.forEach(t)},m(i,p){g(i,d,p),e(d,v),e(d,c),e(c,m),e(d,y)},d(i){i&&t(d)}}}function Il(F){let d,v,c,m,y;return m=new Yt({props:{code:`import tensorflow as tf from transformers import Wav2Vec2Processor, TFHubertForCTC from datasets import load_dataset import soundfile as sf processor = Wav2Vec2Processor.from_pretrained("facebook/hubert-base-960h") model = TFHubertForCTC.from_pretrained("facebook/hubert-base-960h") def map_to_array(batch): speech, _ = sf.read(batch["file"]) batch["speech"] = speech return batch ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.map(map_to_array) input_values = processor(ds["speech"][0], return_tensors="tf").input_values # Batch size 1 logits = model(input_values).logits predicted_ids = tf.argmax(logits, axis=-1) transcription = processor.decode(predicted_ids[0]) # compute loss target_transcription = "A MAN SAID TO THE UNIVERSE SIR I EXIST" # Pass the transcription as text to encode labels labels = processor(text=transcription, return_tensors="tf").input_values loss = model(input_values, labels=labels).loss`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor, TFHubertForCTC <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> soundfile <span class="hljs-keyword">as</span> sf <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor.from_pretrained(<span class="hljs-string">&quot;facebook/hubert-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFHubertForCTC.from_pretrained(<span class="hljs-string">&quot;facebook/hubert-base-960h&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">map_to_array</span>(<span class="hljs-params">batch</span>): <span class="hljs-meta">... </span> speech, _ = sf.read(batch[<span class="hljs-string">&quot;file&quot;</span>]) <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;speech&quot;</span>] = speech <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch <span class="hljs-meta">&gt;&gt;&gt; </span>ds = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_dummy&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ds = ds.<span class="hljs-built_in">map</span>(map_to_array) <span class="hljs-meta">&gt;&gt;&gt; </span>input_values = processor(ds[<span class="hljs-string">&quot;speech&quot;</span>][<span class="hljs-number">0</span>], return_tensors=<span class="hljs-string">&quot;tf&quot;</span>).input_values <span class="hljs-comment"># Batch size 1</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits = model(input_values).logits <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_ids = tf.argmax(logits, axis=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>transcription = processor.decode(predicted_ids[<span class="hljs-number">0</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># compute loss</span> <span class="hljs-meta">&gt;&gt;&gt; </span>target_transcription = <span class="hljs-string">&quot;A MAN SAID TO THE UNIVERSE SIR I EXIST&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Pass the transcription as text to encode labels</span> <span class="hljs-meta">&gt;&gt;&gt; </span>labels = processor(text=transcription, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>).input_values <span class="hljs-meta">&gt;&gt;&gt; </span>loss = model(input_values, labels=labels).loss`}}),{c(){d=a("p"),v=s("Example:"),c=u(),w(m.$$.fragment)},l(i){d=r(i,"P",{});var p=l(d);v=n(p,"Example:"),p.forEach(t),c=h(i),T(m.$$.fragment,i)},m(i,p){g(i,d,p),e(d,v),g(i,c,p),k(m,i,p),y=!0},p:Kt,i(i){y||($(m.$$.fragment,i),y=!0)},o(i){C(m.$$.fragment,i),y=!1},d(i){i&&t(d),i&&t(c),H(m,i)}}}function Ll(F){let d,v,c,m,y,i,p,x,De,ve,D,J,ee,E,ze,U,Ae,ye,L,Ie,te,oe,Le,we,B,We,Te,R,ge,Ne,he,M,z,ke,W,_e,Be,se,ne,Re,S,Ve,ae,$e,Q,re,le,Ue,K,Ce,A,Z,ie,P,Ke,I,Ye,He,b,j,ot,N,st,Ee,q,nt,be,at,rt,lt,O,de,je,it,ce,Fe,dt,Ge,ct,V,Je,pt,Tt,ts,Gt,dn,os,cn,qs,pe,Xt,pn,Jt,un,Qt,hn,mn,fn,Zt,gn,zo,_n,bn,vn,eo,yn,to,wn,Tn,kn,xe,oo,$n,ut,Cn,Ao,Hn,En,ss,jn,Fn,xn,kt,qn,$t,Ms,ht,Ct,ns,so,Mn,as,Pn,Ps,ue,no,Sn,mt,On,rs,Dn,zn,ao,An,In,Ln,ro,Wn,Io,Nn,Bn,Rn,lo,Vn,io,Un,Kn,Yn,me,co,Gn,ft,Xn,Lo,Jn,Qn,ls,Zn,ea,ta,Ht,oa,Et,sa,jt,Ss,gt,Ft,is,po,na,ds,aa,Os,Y,uo,ra,cs,la,ia,ho,da,mo,ca,pa,ua,fo,ha,Wo,ma,fa,ga,go,_a,_o,ba,va,ya,fe,bo,wa,_t,Ta,No,ka,$a,ps,Ca,Ha,Ea,xt,ja,qt,Fa,Mt,Ds,bt,Pt,us,vo,xa,hs,qa,zs,G,yo,Ma,ms,Pa,Sa,wo,Oa,Bo,Da,za,Aa,To,Ia,ko,La,Wa,Na,St,Ba,qe,$o,Ra,vt,Va,Ro,Ua,Ka,fs,Ya,Ga,Xa,Ot,Ja,Dt,As,yt,zt,gs,Co,Qa,_s,Za,Is,X,Ho,er,Eo,tr,bs,or,sr,nr,jo,ar,Vo,rr,lr,ir,Fo,dr,xo,cr,pr,ur,At,hr,Me,qo,mr,wt,fr,Uo,gr,_r,vs,br,vr,yr,It,wr,Lt,Ls;return i=new Vt({}),E=new Vt({}),P=new Vt({}),j=new Xe({props:{name:"class transformers.HubertConfig",anchor:"transformers.HubertConfig",parameters:[{name:"vocab_size",val:" = 32"},{name:"hidden_size",val:" = 768"},{name:"num_hidden_layers",val:" = 12"},{name:"num_attention_heads",val:" = 12"},{name:"intermediate_size",val:" = 3072"},{name:"hidden_act",val:" = 'gelu'"},{name:"hidden_dropout",val:" = 0.1"},{name:"activation_dropout",val:" = 0.1"},{name:"attention_dropout",val:" = 0.1"},{name:"feat_proj_layer_norm",val:" = True"},{name:"feat_proj_dropout",val:" = 0.0"},{name:"final_dropout",val:" = 0.1"},{name:"layerdrop",val:" = 0.1"},{name:"initializer_range",val:" = 0.02"},{name:"layer_norm_eps",val:" = 1e-05"},{name:"feat_extract_norm",val:" = 'group'"},{name:"feat_extract_activation",val:" = 'gelu'"},{name:"conv_dim",val:" = (512, 512, 512, 512, 512, 512, 512)"},{name:"conv_stride",val:" = (5, 2, 2, 2, 2, 2, 2)"},{name:"conv_kernel",val:" = (10, 3, 3, 3, 3, 2, 2)"},{name:"conv_bias",val:" = False"},{name:"num_conv_pos_embeddings",val:" = 128"},{name:"num_conv_pos_embedding_groups",val:" = 16"},{name:"do_stable_layer_norm",val:" = False"},{name:"apply_spec_augment",val:" = True"},{name:"mask_time_prob",val:" = 0.05"},{name:"mask_time_length",val:" = 10"},{name:"mask_time_min_masks",val:" = 2"},{name:"mask_feature_prob",val:" = 0.0"},{name:"mask_feature_length",val:" = 10"},{name:"mask_feature_min_masks",val:" = 0"},{name:"ctc_loss_reduction",val:" = 'sum'"},{name:"ctc_zero_infinity",val:" = False"},{name:"use_weighted_layer_sum",val:" = False"},{name:"classifier_proj_size",val:" = 256"},{name:"pad_token_id",val:" = 0"},{name:"bos_token_id",val:" = 1"},{name:"eos_token_id",val:" = 2"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.HubertConfig.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; Vocabulary size of the Hubert model. Defines the number of different tokens that can be represented by the <code>inputs_ids</code> passed when calling <a href="/docs/transformers/pr_19429/en/model_doc/hubert#transformers.HubertModel">HubertModel</a>. Vocabulary size of the model. Defines the different tokens that can be represented by the <em>inputs_ids</em> passed to the forward method of <a href="/docs/transformers/pr_19429/en/model_doc/hubert#transformers.HubertModel">HubertModel</a>.`,name:"vocab_size"},{anchor:"transformers.HubertConfig.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>, <em>optional</em>, defaults to 768) &#x2014; Dimensionality of the encoder layers and the pooler layer.`,name:"hidden_size"},{anchor:"transformers.HubertConfig.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of hidden layers in the Transformer encoder.`,name:"num_hidden_layers"},{anchor:"transformers.HubertConfig.num_attention_heads",description:`<strong>num_attention_heads</strong> (<code>int</code>, <em>optional</em>, defaults to 12) &#x2014; Number of attention heads for each attention layer in the Transformer encoder.`,name:"num_attention_heads"},{anchor:"transformers.HubertConfig.intermediate_size",description:`<strong>intermediate_size</strong> (<code>int</code>, <em>optional</em>, defaults to 3072) &#x2014; Dimensionality of the &#x201C;intermediate&#x201D; (i.e., feed-forward) layer in the Transformer encoder.`,name:"intermediate_size"},{anchor:"transformers.HubertConfig.hidden_act",description:`<strong>hidden_act</strong> (<code>str</code> or <code>function</code>, <em>optional</em>, defaults to <code>&quot;gelu&quot;</code>) &#x2014; The non-linear activation function (function or string) in the encoder and pooler. If string, <code>&quot;gelu&quot;</code>, <code>&quot;relu&quot;</code>, <code>&quot;selu&quot;</code> and <code>&quot;gelu_new&quot;</code> are supported.`,name:"hidden_act"},{anchor:"transformers.HubertConfig.hidden_dropout(float,",description:`<strong>hidden_dropout(<code>float</code>,</strong> <em>optional</em>, defaults to 0.1) &#x2014; The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.`,name:"hidden_dropout(float,"},{anchor:"transformers.HubertConfig.attention_dropout(float,",description:`<strong>attention_dropout(<code>float</code>,</strong> <em>optional</em>, defaults to 0.1) &#x2014; The dropout ratio for the attention probabilities.`,name:"attention_dropout(float,"},{anchor:"transformers.HubertConfig.final_dropout",description:`<strong>final_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The dropout probabilitiy for the final projection layer of <a href="/docs/transformers/pr_19429/en/model_doc/wav2vec2#transformers.Wav2Vec2ForCTC">Wav2Vec2ForCTC</a>.`,name:"final_dropout"},{anchor:"transformers.HubertConfig.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation of the truncated_normal_initializer for initializing all weight matrices.`,name:"initializer_range"},{anchor:"transformers.HubertConfig.layer_norm_eps",description:`<strong>layer_norm_eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-12) &#x2014; The epsilon used by the layer normalization layers.`,name:"layer_norm_eps"},{anchor:"transformers.HubertConfig.feat_extract_norm",description:`<strong>feat_extract_norm</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;group&quot;</code>) &#x2014; The norm to be applied to 1D convolutional layers in feature encoder. One of <code>&quot;group&quot;</code> for group normalization of only the first 1D convolutional layer or <code>&quot;layer&quot;</code> for layer normalization of all 1D convolutional layers.`,name:"feat_extract_norm"},{anchor:"transformers.HubertConfig.feat_proj_dropout",description:`<strong>feat_proj_dropout</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The dropout probability for output of the feature encoder.`,name:"feat_proj_dropout"},{anchor:"transformers.HubertConfig.feat_proj_layer_norm",description:`<strong>feat_proj_layer_norm</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to apply LayerNorm to the output of the feature encoder.`,name:"feat_proj_layer_norm"},{anchor:"transformers.HubertConfig.feat_extract_activation",description:"<strong>feat_extract_activation</strong> (<code>str, </code>optional<code>, defaults to </code>&#x201C;gelu&#x201D;<code>) -- The non-linear activation function (function or string) in the 1D convolutional layers of the feature extractor. If string, </code>&#x201C;gelu&#x201D;<code>, </code>&#x201C;relu&#x201D;<code>, </code>&#x201C;selu&#x201D;<code>and</code>&#x201C;gelu_new&#x201D;` are supported.",name:"feat_extract_activation"},{anchor:"transformers.HubertConfig.conv_dim",description:`<strong>conv_dim</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(512, 512, 512, 512, 512, 512, 512)</code>) &#x2014; A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the feature encoder. The length of <em>conv_dim</em> defines the number of 1D convolutional layers.`,name:"conv_dim"},{anchor:"transformers.HubertConfig.conv_stride",description:`<strong>conv_stride</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(5, 2, 2, 2, 2, 2, 2)</code>) &#x2014; A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length of <em>conv_stride</em> defines the number of convolutional layers and has to match the length of <em>conv_dim</em>.`,name:"conv_stride"},{anchor:"transformers.HubertConfig.conv_kernel",description:`<strong>conv_kernel</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to <code>(10, 3, 3, 3, 3, 3, 3)</code>) &#x2014; A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The length of <em>conv_kernel</em> defines the number of convolutional layers and has to match the length of <em>conv_dim</em>.`,name:"conv_kernel"},{anchor:"transformers.HubertConfig.conv_bias",description:`<strong>conv_bias</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the 1D convolutional layers have a bias.`,name:"conv_bias"},{anchor:"transformers.HubertConfig.num_conv_pos_embeddings",description:`<strong>num_conv_pos_embeddings</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional embeddings layer.`,name:"num_conv_pos_embeddings"},{anchor:"transformers.HubertConfig.num_conv_pos_embedding_groups",description:`<strong>num_conv_pos_embedding_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 16) &#x2014; Number of groups of 1D convolutional positional embeddings layer.`,name:"num_conv_pos_embedding_groups"},{anchor:"transformers.HubertConfig.do_stable_layer_norm",description:`<strong>do_stable_layer_norm</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether do apply <em>stable</em> layer norm architecture of the Transformer encoder. <code>do_stable_layer_norm is True</code> corresponds to applying layer norm before the attention layer, whereas <code>do_stable_layer_norm is False</code> corresponds to applying layer norm after the attention layer.`,name:"do_stable_layer_norm"},{anchor:"transformers.HubertConfig.apply_spec_augment",description:`<strong>apply_spec_augment</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to apply <em>SpecAugment</em> data augmentation to the outputs of the feature encoder. For reference see <a href="https://arxiv.org/abs/1904.08779" rel="nofollow">SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition</a>.`,name:"apply_spec_augment"},{anchor:"transformers.HubertConfig.mask_time_prob",description:`<strong>mask_time_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.05) &#x2014; Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking procecure generates &#x201D;mask_time_prob<em>len(time_axis)/mask_time_length&#x201D; independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, </em>mask_time_prob<em> should be \`prob_vector_start</em>mask_time_length<code>. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if </code>apply_spec_augment is True\`.`,name:"mask_time_prob"},{anchor:"transformers.HubertConfig.mask_time_length",description:`<strong>mask_time_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; Length of vector span along the time axis.`,name:"mask_time_length"},{anchor:"transformers.HubertConfig.mask_time_min_masks",description:`<strong>mask_time_min_masks</strong> (<code>int</code>, <em>optional</em>, defaults to 2), &#x2014; The minimum number of masks of length <code>mask_feature_length</code> generated along the time axis, each time step, irrespectively of <code>mask_feature_prob</code>. Only relevant if &#x201D;mask_time_prob*len(time_axis)/mask_time_length &lt; mask_time_min_masks&#x201D;`,name:"mask_time_min_masks"},{anchor:"transformers.HubertConfig.mask_feature_prob",description:`<strong>mask_feature_prob</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The masking procecure generates &#x201D;mask_feature_prob<em>len(feature_axis)/mask_time_length&#x201D; independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, </em>mask_feature_prob<em> should be \`prob_vector_start</em>mask_feature_length<code>. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if </code>apply_spec_augment is True\`.`,name:"mask_feature_prob"},{anchor:"transformers.HubertConfig.mask_feature_length",description:`<strong>mask_feature_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; Length of vector span along the feature axis.`,name:"mask_feature_length"},{anchor:"transformers.HubertConfig.mask_feature_min_masks",description:`<strong>mask_feature_min_masks</strong> (<code>int</code>, <em>optional</em>, defaults to 0), &#x2014; The minimum number of masks of length <code>mask_feature_length</code> generated along the feature axis, each time step, irrespectively of <code>mask_feature_prob</code>. Only relevant if &#x201D;mask_feature_prob*len(feature_axis)/mask_feature_length &lt; mask_feature_min_masks&#x201D;`,name:"mask_feature_min_masks"},{anchor:"transformers.HubertConfig.ctc_loss_reduction",description:`<strong>ctc_loss_reduction</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;sum&quot;</code>) &#x2014; Specifies the reduction to apply to the output of <code>torch.nn.CTCLoss</code>. Only relevant when training an instance of <a href="/docs/transformers/pr_19429/en/model_doc/hubert#transformers.HubertForCTC">HubertForCTC</a>.`,name:"ctc_loss_reduction"},{anchor:"transformers.HubertConfig.ctc_zero_infinity",description:`<strong>ctc_zero_infinity</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to zero infinite losses and the associated gradients of <code>torch.nn.CTCLoss</code>. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of <a href="/docs/transformers/pr_19429/en/model_doc/hubert#transformers.HubertForCTC">HubertForCTC</a>.`,name:"ctc_zero_infinity"},{anchor:"transformers.HubertConfig.use_weighted_layer_sum",description:`<strong>use_weighted_layer_sum</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an instance of <a href="/docs/transformers/pr_19429/en/model_doc/hubert#transformers.HubertForSequenceClassification">HubertForSequenceClassification</a>.`,name:"use_weighted_layer_sum"},{anchor:"transformers.HubertConfig.classifier_proj_size",description:`<strong>classifier_proj_size</strong> (<code>int</code>, <em>optional</em>, defaults to 256) &#x2014; Dimensionality of the projection before token mean-pooling for classification.`,name:"classifier_proj_size"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/hubert/configuration_hubert.py#L32"}}),V=new Ut({props:{anchor:"transformers.HubertConfig.example",$$slots:{default:[Cl]},$$scope:{ctx:F}}}),Gt=new Vt({}),Xt=new Xe({props:{name:"class transformers.HubertModel",anchor:"transformers.HubertModel",parameters:[{name:"config",val:": HubertConfig"}],parametersDescription:[{anchor:"transformers.HubertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/model_doc/hubert#transformers.HubertConfig">HubertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/hubert/modeling_hubert.py#L954"}}),oo=new Xe({props:{name:"forward",anchor:"transformers.HubertModel.forward",parameters:[{name:"input_values",val:": typing.Optional[torch.Tensor]"},{name:"attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"mask_time_indices",val:": typing.Optional[torch.FloatTensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"}],parametersDescription:[{anchor:"transformers.HubertModel.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/pr_19429/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/pr_19429/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.HubertModel.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/hubert-base-ls960" rel="nofollow">hubert-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.HubertModel.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.HubertModel.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.HubertModel.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/hubert/modeling_hubert.py#L1019",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/hubert#transformers.HubertConfig" >HubertConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.BaseModelOutput" >transformers.modeling_outputs.BaseModelOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),kt=new Do({props:{$$slots:{default:[Hl]},$$scope:{ctx:F}}}),$t=new Ut({props:{anchor:"transformers.HubertModel.forward.example",$$slots:{default:[El]},$$scope:{ctx:F}}}),so=new Vt({}),no=new Xe({props:{name:"class transformers.HubertForCTC",anchor:"transformers.HubertForCTC",parameters:[{name:"config",val:""}],parametersDescription:[{anchor:"transformers.HubertForCTC.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/model_doc/hubert#transformers.HubertConfig">HubertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/hubert/modeling_hubert.py#L1098"}}),co=new Xe({props:{name:"forward",anchor:"transformers.HubertForCTC.forward",parameters:[{name:"input_values",val:": typing.Optional[torch.Tensor]"},{name:"attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Optional[torch.Tensor] = None"}],parametersDescription:[{anchor:"transformers.HubertForCTC.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/pr_19429/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/pr_19429/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.HubertForCTC.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/hubert-base-ls960" rel="nofollow">hubert-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.HubertForCTC.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.HubertForCTC.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.HubertForCTC.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.HubertForCTC.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, target_length)</code>, <em>optional</em>) &#x2014; Labels for connectionist temporal classification. Note that <code>target_length</code> has to be smaller or equal to the sequence length of the output logits. Indices are selected in <code>[-100, 0, ..., config.vocab_size - 1]</code>. All labels set to <code>-100</code> are ignored (masked), the loss is only computed for labels in <code>[0, ..., config.vocab_size - 1]</code>.`,name:"labels"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/hubert/modeling_hubert.py#L1139",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.CausalLMOutput" >transformers.modeling_outputs.CausalLMOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/hubert#transformers.HubertConfig" >HubertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.CausalLMOutput" >transformers.modeling_outputs.CausalLMOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),Ht=new Do({props:{$$slots:{default:[jl]},$$scope:{ctx:F}}}),Et=new Ut({props:{anchor:"transformers.HubertForCTC.forward.example",$$slots:{default:[Fl]},$$scope:{ctx:F}}}),jt=new Ut({props:{anchor:"transformers.HubertForCTC.forward.example-2",$$slots:{default:[xl]},$$scope:{ctx:F}}}),po=new Vt({}),uo=new Xe({props:{name:"class transformers.HubertForSequenceClassification",anchor:"transformers.HubertForSequenceClassification",parameters:[{name:"config",val:""}],parametersDescription:[{anchor:"transformers.HubertForSequenceClassification.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/model_doc/hubert#transformers.HubertConfig">HubertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/hubert/modeling_hubert.py#L1229"}}),bo=new Xe({props:{name:"forward",anchor:"transformers.HubertForSequenceClassification.forward",parameters:[{name:"input_values",val:": typing.Optional[torch.Tensor]"},{name:"attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Optional[torch.Tensor] = None"}],parametersDescription:[{anchor:"transformers.HubertForSequenceClassification.forward.input_values",description:`<strong>input_values</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Float values of input raw speech waveform. Values can be obtained by loading a <em>.flac</em> or <em>.wav</em> audio file into an array of type <em>List[float]</em> or a <em>numpy.ndarray</em>, <em>e.g.</em> via the soundfile library (<em>pip install soundfile</em>). To prepare the array into <em>input_values</em>, the <a href="/docs/transformers/pr_19429/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor">Wav2Vec2Processor</a> should be used for padding and conversion into a tensor of type <em>torch.FloatTensor</em>. See <a href="/docs/transformers/pr_19429/en/model_doc/wav2vec2#transformers.Wav2Vec2Processor.__call__">Wav2Vec2Processor.<strong>call</strong>()</a> for details.`,name:"input_values"},{anchor:"transformers.HubertForSequenceClassification.forward.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing convolution and attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><code>attention_mask</code> should only be passed if the corresponding processor has <code>config.return_attention_mask == True</code>. For all models whose processor has <code>config.return_attention_mask == False</code>, such as <a href="https://huggingface.co/facebook/hubert-base-ls960" rel="nofollow">hubert-base</a>, <code>attention_mask</code> should <strong>not</strong> be passed to avoid degraded performance when doing batched inference. For such models <code>input_values</code> should simply be padded with 0 and passed without <code>attention_mask</code>. Be aware that these models also yield slightly different results depending on whether <code>input_values</code> is padded or not.</p> </div>`,name:"attention_mask"},{anchor:"transformers.HubertForSequenceClassification.forward.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail.`,name:"output_attentions"},{anchor:"transformers.HubertForSequenceClassification.forward.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail.`,name:"output_hidden_states"},{anchor:"transformers.HubertForSequenceClassification.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.HubertForSequenceClassification.forward.labels",description:`<strong>labels</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Labels for computing the sequence classification/regression loss. Indices should be in <code>[0, ..., config.num_labels - 1]</code>. If <code>config.num_labels == 1</code> a regression loss is computed (Mean-Square loss), If <code>config.num_labels &gt; 1</code> a classification loss is computed (Cross-Entropy).`,name:"labels"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/hubert/modeling_hubert.py#L1274",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/hubert#transformers.HubertConfig" >HubertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) \u2014 Classification (or regression if config.num_labels==1) loss.</p> </li> <li> <p><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) \u2014 Classification (or regression if config.num_labels==1) scores (before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput" >transformers.modeling_outputs.SequenceClassifierOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),xt=new Do({props:{$$slots:{default:[ql]},$$scope:{ctx:F}}}),qt=new Ut({props:{anchor:"transformers.HubertForSequenceClassification.forward.example",$$slots:{default:[Ml]},$$scope:{ctx:F}}}),Mt=new Ut({props:{anchor:"transformers.HubertForSequenceClassification.forward.example-2",$$slots:{default:[Pl]},$$scope:{ctx:F}}}),vo=new Vt({}),yo=new Xe({props:{name:"class transformers.TFHubertModel",anchor:"transformers.TFHubertModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.TFHubertModel.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/model_doc/hubert#transformers.HubertConfig">HubertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/hubert/modeling_tf_hubert.py#L1428"}}),St=new Do({props:{$$slots:{default:[Sl]},$$scope:{ctx:F}}}),$o=new Xe({props:{name:"call",anchor:"transformers.TFHubertModel.call",parameters:[{name:"input_values",val:": Tensor"},{name:"attention_mask",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"token_type_ids",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"position_ids",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"head_mask",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"inputs_embeds",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"training",val:": bool = False"}],parametersDescription:[{anchor:"transformers.TFHubertModel.call.input_values",description:`<strong>input_values</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>({0})</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_values"},{anchor:"transformers.TFHubertModel.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFHubertModel.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFHubertModel.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFHubertModel.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFHubertModel.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0}, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_values</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_values</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFHubertModel.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFHubertModel.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFHubertModel.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFHubertModel.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/hubert/modeling_tf_hubert.py#L1434",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/hubert#transformers.HubertConfig" >HubertConfig</a>) and inputs.</p> <ul> <li> <p><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) \u2014 Sequence of hidden-states at the output of the last layer of the model.</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_tf_outputs.TFBaseModelOutput" >transformers.modeling_tf_outputs.TFBaseModelOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),Ot=new Do({props:{$$slots:{default:[Ol]},$$scope:{ctx:F}}}),Dt=new Ut({props:{anchor:"transformers.TFHubertModel.call.example",$$slots:{default:[Dl]},$$scope:{ctx:F}}}),Co=new Vt({}),Ho=new Xe({props:{name:"class transformers.TFHubertForCTC",anchor:"transformers.TFHubertForCTC",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.TFHubertForCTC.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/model_doc/hubert#transformers.HubertConfig">HubertConfig</a>) &#x2014; Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method to load the model weights.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/hubert/modeling_tf_hubert.py#L1527"}}),At=new Do({props:{$$slots:{default:[zl]},$$scope:{ctx:F}}}),qo=new Xe({props:{name:"call",anchor:"transformers.TFHubertForCTC.call",parameters:[{name:"input_values",val:": Tensor"},{name:"attention_mask",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"token_type_ids",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"position_ids",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"head_mask",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"inputs_embeds",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"labels",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"return_dict",val:": typing.Optional[bool] = None"},{name:"training",val:": typing.Optional[bool] = False"}],parametersDescription:[{anchor:"transformers.TFHubertForCTC.call.input_values",description:`<strong>input_values</strong> (<code>np.ndarray</code>, <code>tf.Tensor</code>, <code>List[tf.Tensor]</code> \`<code>Dict[str, tf.Tensor]</code> or <code>Dict[str, np.ndarray]</code> and each example must have the shape <code>({0})</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_values"},{anchor:"transformers.TFHubertForCTC.call.attention_mask",description:`<strong>attention_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 for tokens that are <strong>not masked</strong>,</li> <li>0 for tokens that are <strong>masked</strong>.</li> </ul> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.TFHubertForCTC.call.token_type_ids",description:`<strong>token_type_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Segment token indices to indicate first and second portions of the inputs. Indices are selected in <code>[0, 1]</code>:</p> <ul> <li>0 corresponds to a <em>sentence A</em> token,</li> <li>1 corresponds to a <em>sentence B</em> token.</li> </ul> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"token_type_ids"},{anchor:"transformers.TFHubertForCTC.call.position_ids",description:`<strong>position_ids</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0})</code>, <em>optional</em>) &#x2014; Indices of positions of each input sequence tokens in the position embeddings. Selected in the range <code>[0, config.max_position_embeddings - 1]</code>.</p> <p><a href="../glossary#position-ids">What are position IDs?</a>`,name:"position_ids"},{anchor:"transformers.TFHubertForCTC.call.head_mask",description:`<strong>head_mask</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>(num_heads,)</code> or <code>(num_layers, num_heads)</code>, <em>optional</em>) &#x2014; Mask to nullify selected heads of the self-attention modules. Mask values selected in <code>[0, 1]</code>:</p> <ul> <li>1 indicates the head is <strong>not masked</strong>,</li> <li>0 indicates the head is <strong>masked</strong>.</li> </ul>`,name:"head_mask"},{anchor:"transformers.TFHubertForCTC.call.inputs_embeds",description:`<strong>inputs_embeds</strong> (<code>np.ndarray</code> or <code>tf.Tensor</code> of shape <code>({0}, hidden_size)</code>, <em>optional</em>) &#x2014; Optionally, instead of passing <code>input_values</code> you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert <code>input_values</code> indices into associated vectors than the model&#x2019;s internal embedding lookup matrix.`,name:"inputs_embeds"},{anchor:"transformers.TFHubertForCTC.call.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_attentions"},{anchor:"transformers.TFHubertForCTC.call.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead.`,name:"output_hidden_states"},{anchor:"transformers.TFHubertForCTC.call.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True.`,name:"return_dict"},{anchor:"transformers.TFHubertForCTC.call.training",description:`<strong>training</strong> (<code>bool</code>, <em>optional</em>, defaults to \`False&#x201C;) &#x2014; Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation).`,name:"training"},{anchor:"transformers.TFHubertForCTC.call.labels",description:`<strong>labels</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Labels for computing the masked language modeling loss. Indices should be in <code>[-100, 0, ..., config.vocab_size]</code> (see <code>input_values</code> docstring) Tokens with indices set to <code>-100</code> are ignored (masked), the loss is only computed for the tokens with labels in <code>[0, ..., config.vocab_size]</code>`,name:"labels"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/hubert/modeling_tf_hubert.py#L1554",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutput" >transformers.modeling_tf_outputs.TFCausalLMOutput</a> or a tuple of <code>tf.Tensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<a href="/docs/transformers/pr_19429/en/model_doc/hubert#transformers.HubertConfig" >HubertConfig</a>) and inputs.</p> <ul> <li> <p><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) \u2014 Language modeling loss (for next-token prediction).</p> </li> <li> <p><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) \u2014 Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).</p> </li> <li> <p><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.</p> </li> <li> <p><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) \u2014 Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_tf_outputs.TFCausalLMOutput" >transformers.modeling_tf_outputs.TFCausalLMOutput</a> or <code>tuple(tf.Tensor)</code></p> `}}),It=new Do({props:{$$slots:{default:[Al]},$$scope:{ctx:F}}}),Lt=new Ut({props:{anchor:"transformers.TFHubertForCTC.call.example",$$slots:{default:[Il]},$$scope:{ctx:F}}}),{c(){d=a("meta"),v=u(),c=a("h1"),m=a("a"),y=a("span"),w(i.$$.fragment),p=u(),x=a("span"),De=s("Hubert"),ve=u(),D=a("h2"),J=a("a"),ee=a("span"),w(E.$$.fragment),ze=u(),U=a("span"),Ae=s("Overview"),ye=u(),L=a("p"),Ie=s("Hubert was proposed in "),te=a("a"),oe=s("HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units"),Le=s(` by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.`),we=u(),B=a("p"),We=s("The abstract from the paper is the following:"),Te=u(),R=a("p"),ge=a("em"),Ne=s(`Self-supervised approaches for speech representation learning are challenged by three unique problems: (1) there are multiple sound units in each input utterance, (2) there is no lexicon of input sound units during the pre-training phase, and (3) sound units have variable lengths with no explicit segmentation. To deal with these three problems, we propose the Hidden-Unit BERT (HuBERT) approach for self-supervised speech representation learning, which utilizes an offline clustering step to provide aligned target labels for a BERT-like prediction loss. A key ingredient of our approach is applying the prediction loss over the masked regions only, which forces the model to learn a combined acoustic and language model over the continuous inputs. HuBERT relies primarily on the consistency of the unsupervised clustering step rather than the intrinsic quality of the assigned cluster labels. Starting with a simple k-means teacher of 100 clusters, and using two iterations of clustering, the HuBERT model either matches or improves upon the state-of-the-art wav2vec 2.0 performance on the Librispeech (960h) and Libri-light (60,000h) benchmarks with 10min, 1h, 10h, 100h, and 960h fine-tuning subsets. Using a 1B parameter model, HuBERT shows up to 19% and 13% relative WER reduction on the more challenging dev-other and test-other evaluation subsets.`),he=u(),M=a("p"),z=s("Tips:"),ke=u(),W=a("ul"),_e=a("li"),Be=s("Hubert is a speech model that accepts a float array corresponding to the raw waveform of the speech signal."),se=u(),ne=a("li"),Re=s(`Hubert model was fine-tuned using connectionist temporal classification (CTC) so the model output has to be decoded using `),S=a("a"),Ve=s("Wav2Vec2CTCTokenizer"),ae=s("."),$e=u(),Q=a("p"),re=s("This model was contributed by "),le=a("a"),Ue=s("patrickvonplaten"),K=s("."),Ce=u(),A=a("h2"),Z=a("a"),ie=a("span"),w(P.$$.fragment),Ke=u(),I=a("span"),Ye=s("HubertConfig"),He=u(),b=a("div"),w(j.$$.fragment),ot=u(),N=a("p"),st=s("This is the configuration class to store the configuration of a "),Ee=a("a"),q=s("HubertModel"),nt=s(`. It is used to instantiate an Hubert model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Hubert `),be=a("a"),at=s("facebook/hubert-base-ls960"),rt=s(" architecture."),lt=u(),O=a("p"),de=s("Configuration objects inherit from "),je=a("a"),it=s("PretrainedConfig"),ce=s(` and can be used to control the model outputs. Read the documentation from `),Fe=a("a"),dt=s("PretrainedConfig"),Ge=s(" for more information."),ct=u(),w(V.$$.fragment),Je=u(),pt=a("h2"),Tt=a("a"),ts=a("span"),w(Gt.$$.fragment),dn=u(),os=a("span"),cn=s("HubertModel"),qs=u(),pe=a("div"),w(Xt.$$.fragment),pn=u(),Jt=a("p"),un=s(`The bare Hubert Model transformer outputting raw hidden-states without any specific head on top. Hubert was proposed in `),Qt=a("a"),hn=s(`HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units`),mn=s(` by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.`),fn=u(),Zt=a("p"),gn=s("This model inherits from "),zo=a("a"),_n=s("PreTrainedModel"),bn=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),vn=u(),eo=a("p"),yn=s("This model is a PyTorch "),to=a("a"),wn=s("torch.nn.Module"),Tn=s(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),kn=u(),xe=a("div"),w(oo.$$.fragment),$n=u(),ut=a("p"),Cn=s("The "),Ao=a("a"),Hn=s("HubertModel"),En=s(" forward method, overrides the "),ss=a("code"),jn=s("__call__"),Fn=s(" special method."),xn=u(),w(kt.$$.fragment),qn=u(),w($t.$$.fragment),Ms=u(),ht=a("h2"),Ct=a("a"),ns=a("span"),w(so.$$.fragment),Mn=u(),as=a("span"),Pn=s("HubertForCTC"),Ps=u(),ue=a("div"),w(no.$$.fragment),Sn=u(),mt=a("p"),On=s("Hubert Model with a "),rs=a("code"),Dn=s("language modeling"),zn=s(` head on top for Connectionist Temporal Classification (CTC). Hubert was proposed in `),ao=a("a"),An=s(`HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units`),In=s(` by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.`),Ln=u(),ro=a("p"),Wn=s("This model inherits from "),Io=a("a"),Nn=s("PreTrainedModel"),Bn=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Rn=u(),lo=a("p"),Vn=s("This model is a PyTorch "),io=a("a"),Un=s("torch.nn.Module"),Kn=s(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Yn=u(),me=a("div"),w(co.$$.fragment),Gn=u(),ft=a("p"),Xn=s("The "),Lo=a("a"),Jn=s("HubertForCTC"),Qn=s(" forward method, overrides the "),ls=a("code"),Zn=s("__call__"),ea=s(" special method."),ta=u(),w(Ht.$$.fragment),oa=u(),w(Et.$$.fragment),sa=u(),w(jt.$$.fragment),Ss=u(),gt=a("h2"),Ft=a("a"),is=a("span"),w(po.$$.fragment),na=u(),ds=a("span"),aa=s("HubertForSequenceClassification"),Os=u(),Y=a("div"),w(uo.$$.fragment),ra=u(),cs=a("p"),la=s(`Hubert Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting.`),ia=u(),ho=a("p"),da=s("Hubert was proposed in "),mo=a("a"),ca=s(`HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units`),pa=s(` by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.`),ua=u(),fo=a("p"),ha=s("This model inherits from "),Wo=a("a"),ma=s("PreTrainedModel"),fa=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),ga=u(),go=a("p"),_a=s("This model is a PyTorch "),_o=a("a"),ba=s("torch.nn.Module"),va=s(` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),ya=u(),fe=a("div"),w(bo.$$.fragment),wa=u(),_t=a("p"),Ta=s("The "),No=a("a"),ka=s("HubertForSequenceClassification"),$a=s(" forward method, overrides the "),ps=a("code"),Ca=s("__call__"),Ha=s(" special method."),Ea=u(),w(xt.$$.fragment),ja=u(),w(qt.$$.fragment),Fa=u(),w(Mt.$$.fragment),Ds=u(),bt=a("h2"),Pt=a("a"),us=a("span"),w(vo.$$.fragment),xa=u(),hs=a("span"),qa=s("TFHubertModel"),zs=u(),G=a("div"),w(yo.$$.fragment),Ma=u(),ms=a("p"),Pa=s("The bare TFHubert Model transformer outputing raw hidden-states without any specific head on top."),Sa=u(),wo=a("p"),Oa=s("This model inherits from "),Bo=a("a"),Da=s("TFPreTrainedModel"),za=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),Aa=u(),To=a("p"),Ia=s("This model is also a "),ko=a("a"),La=s("tf.keras.Model"),Wa=s(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),Na=u(),w(St.$$.fragment),Ba=u(),qe=a("div"),w($o.$$.fragment),Ra=u(),vt=a("p"),Va=s("The "),Ro=a("a"),Ua=s("TFHubertModel"),Ka=s(" forward method, overrides the "),fs=a("code"),Ya=s("__call__"),Ga=s(" special method."),Xa=u(),w(Ot.$$.fragment),Ja=u(),w(Dt.$$.fragment),As=u(),yt=a("h2"),zt=a("a"),gs=a("span"),w(Co.$$.fragment),Qa=u(),_s=a("span"),Za=s("TFHubertForCTC"),Is=u(),X=a("div"),w(Ho.$$.fragment),er=u(),Eo=a("p"),tr=s("TFHubert Model with a "),bs=a("code"),or=s("language modeling"),sr=s(" head on top for Connectionist Temporal Classification (CTC)."),nr=u(),jo=a("p"),ar=s("This model inherits from "),Vo=a("a"),rr=s("TFPreTrainedModel"),lr=s(`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),ir=u(),Fo=a("p"),dr=s("This model is also a "),xo=a("a"),cr=s("tf.keras.Model"),pr=s(` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),ur=u(),w(At.$$.fragment),hr=u(),Me=a("div"),w(qo.$$.fragment),mr=u(),wt=a("p"),fr=s("The "),Uo=a("a"),gr=s("TFHubertForCTC"),_r=s(" forward method, overrides the "),vs=a("code"),br=s("__call__"),vr=s(" special method."),yr=u(),w(It.$$.fragment),wr=u(),w(Lt.$$.fragment),this.h()},l(o){const _=kl('[data-svelte="svelte-1phssyn"]',document.head);d=r(_,"META",{name:!0,content:!0}),_.forEach(t),v=h(o),c=r(o,"H1",{class:!0});var Mo=l(c);m=r(Mo,"A",{id:!0,class:!0,href:!0});var ys=l(m);y=r(ys,"SPAN",{});var ws=l(y);T(i.$$.fragment,ws),ws.forEach(t),ys.forEach(t),p=h(Mo),x=r(Mo,"SPAN",{});var Ts=l(x);De=n(Ts,"Hubert"),Ts.forEach(t),Mo.forEach(t),ve=h(o),D=r(o,"H2",{class:!0});var Po=l(D);J=r(Po,"A",{id:!0,class:!0,href:!0});var ks=l(J);ee=r(ks,"SPAN",{});var $s=l(ee);T(E.$$.fragment,$s),$s.forEach(t),ks.forEach(t),ze=h(Po),U=r(Po,"SPAN",{});var Cs=l(U);Ae=n(Cs,"Overview"),Cs.forEach(t),Po.forEach(t),ye=h(o),L=r(o,"P",{});var So=l(L);Ie=n(So,"Hubert was proposed in "),te=r(So,"A",{href:!0,rel:!0});var Hs=l(te);oe=n(Hs,"HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units"),Hs.forEach(t),Le=n(So,` by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.`),So.forEach(t),we=h(o),B=r(o,"P",{});var Es=l(B);We=n(Es,"The abstract from the paper is the following:"),Es.forEach(t),Te=h(o),R=r(o,"P",{});var js=l(R);ge=r(js,"EM",{});var Fs=l(ge);Ne=n(Fs,`Self-supervised approaches for speech representation learning are challenged by three unique problems: (1) there are multiple sound units in each input utterance, (2) there is no lexicon of input sound units during the pre-training phase, and (3) sound units have variable lengths with no explicit segmentation. To deal with these three problems, we propose the Hidden-Unit BERT (HuBERT) approach for self-supervised speech representation learning, which utilizes an offline clustering step to provide aligned target labels for a BERT-like prediction loss. A key ingredient of our approach is applying the prediction loss over the masked regions only, which forces the model to learn a combined acoustic and language model over the continuous inputs. HuBERT relies primarily on the consistency of the unsupervised clustering step rather than the intrinsic quality of the assigned cluster labels. Starting with a simple k-means teacher of 100 clusters, and using two iterations of clustering, the HuBERT model either matches or improves upon the state-of-the-art wav2vec 2.0 performance on the Librispeech (960h) and Libri-light (60,000h) benchmarks with 10min, 1h, 10h, 100h, and 960h fine-tuning subsets. Using a 1B parameter model, HuBERT shows up to 19% and 13% relative WER reduction on the more challenging dev-other and test-other evaluation subsets.`),Fs.forEach(t),js.forEach(t),he=h(o),M=r(o,"P",{});var xs=l(M);z=n(xs,"Tips:"),xs.forEach(t),ke=h(o),W=r(o,"UL",{});var Oo=l(W);_e=r(Oo,"LI",{});var Tr=l(_e);Be=n(Tr,"Hubert is a speech model that accepts a float array corresponding to the raw waveform of the speech signal."),Tr.forEach(t),se=h(Oo),ne=r(Oo,"LI",{});var Ws=l(ne);Re=n(Ws,`Hubert model was fine-tuned using connectionist temporal classification (CTC) so the model output has to be decoded using `),S=r(Ws,"A",{href:!0});var kr=l(S);Ve=n(kr,"Wav2Vec2CTCTokenizer"),kr.forEach(t),ae=n(Ws,"."),Ws.forEach(t),Oo.forEach(t),$e=h(o),Q=r(o,"P",{});var Ns=l(Q);re=n(Ns,"This model was contributed by "),le=r(Ns,"A",{href:!0,rel:!0});var $r=l(le);Ue=n($r,"patrickvonplaten"),$r.forEach(t),K=n(Ns,"."),Ns.forEach(t),Ce=h(o),A=r(o,"H2",{class:!0});var Bs=l(A);Z=r(Bs,"A",{id:!0,class:!0,href:!0});var Cr=l(Z);ie=r(Cr,"SPAN",{});var Hr=l(ie);T(P.$$.fragment,Hr),Hr.forEach(t),Cr.forEach(t),Ke=h(Bs),I=r(Bs,"SPAN",{});var Er=l(I);Ye=n(Er,"HubertConfig"),Er.forEach(t),Bs.forEach(t),He=h(o),b=r(o,"DIV",{class:!0});var Wt=l(b);T(j.$$.fragment,Wt),ot=h(Wt),N=r(Wt,"P",{});var Ko=l(N);st=n(Ko,"This is the configuration class to store the configuration of a "),Ee=r(Ko,"A",{href:!0});var jr=l(Ee);q=n(jr,"HubertModel"),jr.forEach(t),nt=n(Ko,`. It is used to instantiate an Hubert model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Hubert `),be=r(Ko,"A",{href:!0,rel:!0});var Fr=l(be);at=n(Fr,"facebook/hubert-base-ls960"),Fr.forEach(t),rt=n(Ko," architecture."),Ko.forEach(t),lt=h(Wt),O=r(Wt,"P",{});var Yo=l(O);de=n(Yo,"Configuration objects inherit from "),je=r(Yo,"A",{href:!0});var xr=l(je);it=n(xr,"PretrainedConfig"),xr.forEach(t),ce=n(Yo,` and can be used to control the model outputs. Read the documentation from `),Fe=r(Yo,"A",{href:!0});var qr=l(Fe);dt=n(qr,"PretrainedConfig"),qr.forEach(t),Ge=n(Yo," for more information."),Yo.forEach(t),ct=h(Wt),T(V.$$.fragment,Wt),Wt.forEach(t),Je=h(o),pt=r(o,"H2",{class:!0});var Rs=l(pt);Tt=r(Rs,"A",{id:!0,class:!0,href:!0});var Mr=l(Tt);ts=r(Mr,"SPAN",{});var Pr=l(ts);T(Gt.$$.fragment,Pr),Pr.forEach(t),Mr.forEach(t),dn=h(Rs),os=r(Rs,"SPAN",{});var Sr=l(os);cn=n(Sr,"HubertModel"),Sr.forEach(t),Rs.forEach(t),qs=h(o),pe=r(o,"DIV",{class:!0});var Qe=l(pe);T(Xt.$$.fragment,Qe),pn=h(Qe),Jt=r(Qe,"P",{});var Vs=l(Jt);un=n(Vs,`The bare Hubert Model transformer outputting raw hidden-states without any specific head on top. Hubert was proposed in `),Qt=r(Vs,"A",{href:!0,rel:!0});var Or=l(Qt);hn=n(Or,`HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units`),Or.forEach(t),mn=n(Vs,` by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.`),Vs.forEach(t),fn=h(Qe),Zt=r(Qe,"P",{});var Us=l(Zt);gn=n(Us,"This model inherits from "),zo=r(Us,"A",{href:!0});var Dr=l(zo);_n=n(Dr,"PreTrainedModel"),Dr.forEach(t),bn=n(Us,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Us.forEach(t),vn=h(Qe),eo=r(Qe,"P",{});var Ks=l(eo);yn=n(Ks,"This model is a PyTorch "),to=r(Ks,"A",{href:!0,rel:!0});var zr=l(to);wn=n(zr,"torch.nn.Module"),zr.forEach(t),Tn=n(Ks,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Ks.forEach(t),kn=h(Qe),xe=r(Qe,"DIV",{class:!0});var Nt=l(xe);T(oo.$$.fragment,Nt),$n=h(Nt),ut=r(Nt,"P",{});var Go=l(ut);Cn=n(Go,"The "),Ao=r(Go,"A",{href:!0});var Ar=l(Ao);Hn=n(Ar,"HubertModel"),Ar.forEach(t),En=n(Go," forward method, overrides the "),ss=r(Go,"CODE",{});var Ir=l(ss);jn=n(Ir,"__call__"),Ir.forEach(t),Fn=n(Go," special method."),Go.forEach(t),xn=h(Nt),T(kt.$$.fragment,Nt),qn=h(Nt),T($t.$$.fragment,Nt),Nt.forEach(t),Qe.forEach(t),Ms=h(o),ht=r(o,"H2",{class:!0});var Ys=l(ht);Ct=r(Ys,"A",{id:!0,class:!0,href:!0});var Lr=l(Ct);ns=r(Lr,"SPAN",{});var Wr=l(ns);T(so.$$.fragment,Wr),Wr.forEach(t),Lr.forEach(t),Mn=h(Ys),as=r(Ys,"SPAN",{});var Nr=l(as);Pn=n(Nr,"HubertForCTC"),Nr.forEach(t),Ys.forEach(t),Ps=h(o),ue=r(o,"DIV",{class:!0});var Ze=l(ue);T(no.$$.fragment,Ze),Sn=h(Ze),mt=r(Ze,"P",{});var Xo=l(mt);On=n(Xo,"Hubert Model with a "),rs=r(Xo,"CODE",{});var Br=l(rs);Dn=n(Br,"language modeling"),Br.forEach(t),zn=n(Xo,` head on top for Connectionist Temporal Classification (CTC). Hubert was proposed in `),ao=r(Xo,"A",{href:!0,rel:!0});var Rr=l(ao);An=n(Rr,`HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units`),Rr.forEach(t),In=n(Xo,` by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.`),Xo.forEach(t),Ln=h(Ze),ro=r(Ze,"P",{});var Gs=l(ro);Wn=n(Gs,"This model inherits from "),Io=r(Gs,"A",{href:!0});var Vr=l(Io);Nn=n(Vr,"PreTrainedModel"),Vr.forEach(t),Bn=n(Gs,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Gs.forEach(t),Rn=h(Ze),lo=r(Ze,"P",{});var Xs=l(lo);Vn=n(Xs,"This model is a PyTorch "),io=r(Xs,"A",{href:!0,rel:!0});var Ur=l(io);Un=n(Ur,"torch.nn.Module"),Ur.forEach(t),Kn=n(Xs,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),Xs.forEach(t),Yn=h(Ze),me=r(Ze,"DIV",{class:!0});var et=l(me);T(co.$$.fragment,et),Gn=h(et),ft=r(et,"P",{});var Jo=l(ft);Xn=n(Jo,"The "),Lo=r(Jo,"A",{href:!0});var Kr=l(Lo);Jn=n(Kr,"HubertForCTC"),Kr.forEach(t),Qn=n(Jo," forward method, overrides the "),ls=r(Jo,"CODE",{});var Yr=l(ls);Zn=n(Yr,"__call__"),Yr.forEach(t),ea=n(Jo," special method."),Jo.forEach(t),ta=h(et),T(Ht.$$.fragment,et),oa=h(et),T(Et.$$.fragment,et),sa=h(et),T(jt.$$.fragment,et),et.forEach(t),Ze.forEach(t),Ss=h(o),gt=r(o,"H2",{class:!0});var Js=l(gt);Ft=r(Js,"A",{id:!0,class:!0,href:!0});var Gr=l(Ft);is=r(Gr,"SPAN",{});var Xr=l(is);T(po.$$.fragment,Xr),Xr.forEach(t),Gr.forEach(t),na=h(Js),ds=r(Js,"SPAN",{});var Jr=l(ds);aa=n(Jr,"HubertForSequenceClassification"),Jr.forEach(t),Js.forEach(t),Os=h(o),Y=r(o,"DIV",{class:!0});var Pe=l(Y);T(uo.$$.fragment,Pe),ra=h(Pe),cs=r(Pe,"P",{});var Qr=l(cs);la=n(Qr,`Hubert Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting.`),Qr.forEach(t),ia=h(Pe),ho=r(Pe,"P",{});var Qs=l(ho);da=n(Qs,"Hubert was proposed in "),mo=r(Qs,"A",{href:!0,rel:!0});var Zr=l(mo);ca=n(Zr,`HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units`),Zr.forEach(t),pa=n(Qs,` by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.`),Qs.forEach(t),ua=h(Pe),fo=r(Pe,"P",{});var Zs=l(fo);ha=n(Zs,"This model inherits from "),Wo=r(Zs,"A",{href:!0});var el=l(Wo);ma=n(el,"PreTrainedModel"),el.forEach(t),fa=n(Zs,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.).`),Zs.forEach(t),ga=h(Pe),go=r(Pe,"P",{});var en=l(go);_a=n(en,"This model is a PyTorch "),_o=r(en,"A",{href:!0,rel:!0});var tl=l(_o);ba=n(tl,"torch.nn.Module"),tl.forEach(t),va=n(en,` sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.`),en.forEach(t),ya=h(Pe),fe=r(Pe,"DIV",{class:!0});var tt=l(fe);T(bo.$$.fragment,tt),wa=h(tt),_t=r(tt,"P",{});var Qo=l(_t);Ta=n(Qo,"The "),No=r(Qo,"A",{href:!0});var ol=l(No);ka=n(ol,"HubertForSequenceClassification"),ol.forEach(t),$a=n(Qo," forward method, overrides the "),ps=r(Qo,"CODE",{});var sl=l(ps);Ca=n(sl,"__call__"),sl.forEach(t),Ha=n(Qo," special method."),Qo.forEach(t),Ea=h(tt),T(xt.$$.fragment,tt),ja=h(tt),T(qt.$$.fragment,tt),Fa=h(tt),T(Mt.$$.fragment,tt),tt.forEach(t),Pe.forEach(t),Ds=h(o),bt=r(o,"H2",{class:!0});var tn=l(bt);Pt=r(tn,"A",{id:!0,class:!0,href:!0});var nl=l(Pt);us=r(nl,"SPAN",{});var al=l(us);T(vo.$$.fragment,al),al.forEach(t),nl.forEach(t),xa=h(tn),hs=r(tn,"SPAN",{});var rl=l(hs);qa=n(rl,"TFHubertModel"),rl.forEach(t),tn.forEach(t),zs=h(o),G=r(o,"DIV",{class:!0});var Se=l(G);T(yo.$$.fragment,Se),Ma=h(Se),ms=r(Se,"P",{});var ll=l(ms);Pa=n(ll,"The bare TFHubert Model transformer outputing raw hidden-states without any specific head on top."),ll.forEach(t),Sa=h(Se),wo=r(Se,"P",{});var on=l(wo);Oa=n(on,"This model inherits from "),Bo=r(on,"A",{href:!0});var il=l(Bo);Da=n(il,"TFPreTrainedModel"),il.forEach(t),za=n(on,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),on.forEach(t),Aa=h(Se),To=r(Se,"P",{});var sn=l(To);Ia=n(sn,"This model is also a "),ko=r(sn,"A",{href:!0,rel:!0});var dl=l(ko);La=n(dl,"tf.keras.Model"),dl.forEach(t),Wa=n(sn,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),sn.forEach(t),Na=h(Se),T(St.$$.fragment,Se),Ba=h(Se),qe=r(Se,"DIV",{class:!0});var Bt=l(qe);T($o.$$.fragment,Bt),Ra=h(Bt),vt=r(Bt,"P",{});var Zo=l(vt);Va=n(Zo,"The "),Ro=r(Zo,"A",{href:!0});var cl=l(Ro);Ua=n(cl,"TFHubertModel"),cl.forEach(t),Ka=n(Zo," forward method, overrides the "),fs=r(Zo,"CODE",{});var pl=l(fs);Ya=n(pl,"__call__"),pl.forEach(t),Ga=n(Zo," special method."),Zo.forEach(t),Xa=h(Bt),T(Ot.$$.fragment,Bt),Ja=h(Bt),T(Dt.$$.fragment,Bt),Bt.forEach(t),Se.forEach(t),As=h(o),yt=r(o,"H2",{class:!0});var nn=l(yt);zt=r(nn,"A",{id:!0,class:!0,href:!0});var ul=l(zt);gs=r(ul,"SPAN",{});var hl=l(gs);T(Co.$$.fragment,hl),hl.forEach(t),ul.forEach(t),Qa=h(nn),_s=r(nn,"SPAN",{});var ml=l(_s);Za=n(ml,"TFHubertForCTC"),ml.forEach(t),nn.forEach(t),Is=h(o),X=r(o,"DIV",{class:!0});var Oe=l(X);T(Ho.$$.fragment,Oe),er=h(Oe),Eo=r(Oe,"P",{});var an=l(Eo);tr=n(an,"TFHubert Model with a "),bs=r(an,"CODE",{});var fl=l(bs);or=n(fl,"language modeling"),fl.forEach(t),sr=n(an," head on top for Connectionist Temporal Classification (CTC)."),an.forEach(t),nr=h(Oe),jo=r(Oe,"P",{});var rn=l(jo);ar=n(rn,"This model inherits from "),Vo=r(rn,"A",{href:!0});var gl=l(Vo);rr=n(gl,"TFPreTrainedModel"),gl.forEach(t),lr=n(rn,`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)`),rn.forEach(t),ir=h(Oe),Fo=r(Oe,"P",{});var ln=l(Fo);dr=n(ln,"This model is also a "),xo=r(ln,"A",{href:!0,rel:!0});var _l=l(xo);cr=n(_l,"tf.keras.Model"),_l.forEach(t),pr=n(ln,` subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior.`),ln.forEach(t),ur=h(Oe),T(At.$$.fragment,Oe),hr=h(Oe),Me=r(Oe,"DIV",{class:!0});var Rt=l(Me);T(qo.$$.fragment,Rt),mr=h(Rt),wt=r(Rt,"P",{});var es=l(wt);fr=n(es,"The "),Uo=r(es,"A",{href:!0});var bl=l(Uo);gr=n(bl,"TFHubertForCTC"),bl.forEach(t),_r=n(es," forward method, overrides the "),vs=r(es,"CODE",{});var vl=l(vs);br=n(vl,"__call__"),vl.forEach(t),vr=n(es," special method."),es.forEach(t),yr=h(Rt),T(It.$$.fragment,Rt),wr=h(Rt),T(Lt.$$.fragment,Rt),Rt.forEach(t),Oe.forEach(t),this.h()},h(){f(d,"name","hf:doc:metadata"),f(d,"content",JSON.stringify(Wl)),f(m,"id","hubert"),f(m,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(m,"href","#hubert"),f(c,"class","relative group"),f(J,"id","overview"),f(J,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(J,"href","#overview"),f(D,"class","relative group"),f(te,"href","https://arxiv.org/abs/2106.07447"),f(te,"rel","nofollow"),f(S,"href","/docs/transformers/pr_19429/en/model_doc/wav2vec2#transformers.Wav2Vec2CTCTokenizer"),f(le,"href","https://huggingface.co/patrickvonplaten"),f(le,"rel","nofollow"),f(Z,"id","transformers.HubertConfig"),f(Z,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Z,"href","#transformers.HubertConfig"),f(A,"class","relative group"),f(Ee,"href","/docs/transformers/pr_19429/en/model_doc/hubert#transformers.HubertModel"),f(be,"href","https://huggingface.co/facebook/hubert-base-ls960"),f(be,"rel","nofollow"),f(je,"href","/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig"),f(Fe,"href","/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig"),f(b,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(Tt,"id","transformers.HubertModel"),f(Tt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Tt,"href","#transformers.HubertModel"),f(pt,"class","relative group"),f(Qt,"href","https://arxiv.org/abs/2106.07447"),f(Qt,"rel","nofollow"),f(zo,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel"),f(to,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),f(to,"rel","nofollow"),f(Ao,"href","/docs/transformers/pr_19429/en/model_doc/hubert#transformers.HubertModel"),f(xe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(pe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(Ct,"id","transformers.HubertForCTC"),f(Ct,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Ct,"href","#transformers.HubertForCTC"),f(ht,"class","relative group"),f(ao,"href","https://arxiv.org/abs/2106.07447"),f(ao,"rel","nofollow"),f(Io,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel"),f(io,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),f(io,"rel","nofollow"),f(Lo,"href","/docs/transformers/pr_19429/en/model_doc/hubert#transformers.HubertForCTC"),f(me,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(ue,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(Ft,"id","transformers.HubertForSequenceClassification"),f(Ft,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Ft,"href","#transformers.HubertForSequenceClassification"),f(gt,"class","relative group"),f(mo,"href","https://arxiv.org/abs/2106.07447"),f(mo,"rel","nofollow"),f(Wo,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel"),f(_o,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),f(_o,"rel","nofollow"),f(No,"href","/docs/transformers/pr_19429/en/model_doc/hubert#transformers.HubertForSequenceClassification"),f(fe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(Y,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(Pt,"id","transformers.TFHubertModel"),f(Pt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Pt,"href","#transformers.TFHubertModel"),f(bt,"class","relative group"),f(Bo,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel"),f(ko,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),f(ko,"rel","nofollow"),f(Ro,"href","/docs/transformers/pr_19429/en/model_doc/hubert#transformers.TFHubertModel"),f(qe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(G,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(zt,"id","transformers.TFHubertForCTC"),f(zt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(zt,"href","#transformers.TFHubertForCTC"),f(yt,"class","relative group"),f(Vo,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel"),f(xo,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),f(xo,"rel","nofollow"),f(Uo,"href","/docs/transformers/pr_19429/en/model_doc/hubert#transformers.TFHubertForCTC"),f(Me,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(X,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(o,_){e(document.head,d),g(o,v,_),g(o,c,_),e(c,m),e(m,y),k(i,y,null),e(c,p),e(c,x),e(x,De),g(o,ve,_),g(o,D,_),e(D,J),e(J,ee),k(E,ee,null),e(D,ze),e(D,U),e(U,Ae),g(o,ye,_),g(o,L,_),e(L,Ie),e(L,te),e(te,oe),e(L,Le),g(o,we,_),g(o,B,_),e(B,We),g(o,Te,_),g(o,R,_),e(R,ge),e(ge,Ne),g(o,he,_),g(o,M,_),e(M,z),g(o,ke,_),g(o,W,_),e(W,_e),e(_e,Be),e(W,se),e(W,ne),e(ne,Re),e(ne,S),e(S,Ve),e(ne,ae),g(o,$e,_),g(o,Q,_),e(Q,re),e(Q,le),e(le,Ue),e(Q,K),g(o,Ce,_),g(o,A,_),e(A,Z),e(Z,ie),k(P,ie,null),e(A,Ke),e(A,I),e(I,Ye),g(o,He,_),g(o,b,_),k(j,b,null),e(b,ot),e(b,N),e(N,st),e(N,Ee),e(Ee,q),e(N,nt),e(N,be),e(be,at),e(N,rt),e(b,lt),e(b,O),e(O,de),e(O,je),e(je,it),e(O,ce),e(O,Fe),e(Fe,dt),e(O,Ge),e(b,ct),k(V,b,null),g(o,Je,_),g(o,pt,_),e(pt,Tt),e(Tt,ts),k(Gt,ts,null),e(pt,dn),e(pt,os),e(os,cn),g(o,qs,_),g(o,pe,_),k(Xt,pe,null),e(pe,pn),e(pe,Jt),e(Jt,un),e(Jt,Qt),e(Qt,hn),e(Jt,mn),e(pe,fn),e(pe,Zt),e(Zt,gn),e(Zt,zo),e(zo,_n),e(Zt,bn),e(pe,vn),e(pe,eo),e(eo,yn),e(eo,to),e(to,wn),e(eo,Tn),e(pe,kn),e(pe,xe),k(oo,xe,null),e(xe,$n),e(xe,ut),e(ut,Cn),e(ut,Ao),e(Ao,Hn),e(ut,En),e(ut,ss),e(ss,jn),e(ut,Fn),e(xe,xn),k(kt,xe,null),e(xe,qn),k($t,xe,null),g(o,Ms,_),g(o,ht,_),e(ht,Ct),e(Ct,ns),k(so,ns,null),e(ht,Mn),e(ht,as),e(as,Pn),g(o,Ps,_),g(o,ue,_),k(no,ue,null),e(ue,Sn),e(ue,mt),e(mt,On),e(mt,rs),e(rs,Dn),e(mt,zn),e(mt,ao),e(ao,An),e(mt,In),e(ue,Ln),e(ue,ro),e(ro,Wn),e(ro,Io),e(Io,Nn),e(ro,Bn),e(ue,Rn),e(ue,lo),e(lo,Vn),e(lo,io),e(io,Un),e(lo,Kn),e(ue,Yn),e(ue,me),k(co,me,null),e(me,Gn),e(me,ft),e(ft,Xn),e(ft,Lo),e(Lo,Jn),e(ft,Qn),e(ft,ls),e(ls,Zn),e(ft,ea),e(me,ta),k(Ht,me,null),e(me,oa),k(Et,me,null),e(me,sa),k(jt,me,null),g(o,Ss,_),g(o,gt,_),e(gt,Ft),e(Ft,is),k(po,is,null),e(gt,na),e(gt,ds),e(ds,aa),g(o,Os,_),g(o,Y,_),k(uo,Y,null),e(Y,ra),e(Y,cs),e(cs,la),e(Y,ia),e(Y,ho),e(ho,da),e(ho,mo),e(mo,ca),e(ho,pa),e(Y,ua),e(Y,fo),e(fo,ha),e(fo,Wo),e(Wo,ma),e(fo,fa),e(Y,ga),e(Y,go),e(go,_a),e(go,_o),e(_o,ba),e(go,va),e(Y,ya),e(Y,fe),k(bo,fe,null),e(fe,wa),e(fe,_t),e(_t,Ta),e(_t,No),e(No,ka),e(_t,$a),e(_t,ps),e(ps,Ca),e(_t,Ha),e(fe,Ea),k(xt,fe,null),e(fe,ja),k(qt,fe,null),e(fe,Fa),k(Mt,fe,null),g(o,Ds,_),g(o,bt,_),e(bt,Pt),e(Pt,us),k(vo,us,null),e(bt,xa),e(bt,hs),e(hs,qa),g(o,zs,_),g(o,G,_),k(yo,G,null),e(G,Ma),e(G,ms),e(ms,Pa),e(G,Sa),e(G,wo),e(wo,Oa),e(wo,Bo),e(Bo,Da),e(wo,za),e(G,Aa),e(G,To),e(To,Ia),e(To,ko),e(ko,La),e(To,Wa),e(G,Na),k(St,G,null),e(G,Ba),e(G,qe),k($o,qe,null),e(qe,Ra),e(qe,vt),e(vt,Va),e(vt,Ro),e(Ro,Ua),e(vt,Ka),e(vt,fs),e(fs,Ya),e(vt,Ga),e(qe,Xa),k(Ot,qe,null),e(qe,Ja),k(Dt,qe,null),g(o,As,_),g(o,yt,_),e(yt,zt),e(zt,gs),k(Co,gs,null),e(yt,Qa),e(yt,_s),e(_s,Za),g(o,Is,_),g(o,X,_),k(Ho,X,null),e(X,er),e(X,Eo),e(Eo,tr),e(Eo,bs),e(bs,or),e(Eo,sr),e(X,nr),e(X,jo),e(jo,ar),e(jo,Vo),e(Vo,rr),e(jo,lr),e(X,ir),e(X,Fo),e(Fo,dr),e(Fo,xo),e(xo,cr),e(Fo,pr),e(X,ur),k(At,X,null),e(X,hr),e(X,Me),k(qo,Me,null),e(Me,mr),e(Me,wt),e(wt,fr),e(wt,Uo),e(Uo,gr),e(wt,_r),e(wt,vs),e(vs,br),e(wt,vr),e(Me,yr),k(It,Me,null),e(Me,wr),k(Lt,Me,null),Ls=!0},p(o,[_]){const Mo={};_&2&&(Mo.$$scope={dirty:_,ctx:o}),V.$set(Mo);const ys={};_&2&&(ys.$$scope={dirty:_,ctx:o}),kt.$set(ys);const ws={};_&2&&(ws.$$scope={dirty:_,ctx:o}),$t.$set(ws);const Ts={};_&2&&(Ts.$$scope={dirty:_,ctx:o}),Ht.$set(Ts);const Po={};_&2&&(Po.$$scope={dirty:_,ctx:o}),Et.$set(Po);const ks={};_&2&&(ks.$$scope={dirty:_,ctx:o}),jt.$set(ks);const $s={};_&2&&($s.$$scope={dirty:_,ctx:o}),xt.$set($s);const Cs={};_&2&&(Cs.$$scope={dirty:_,ctx:o}),qt.$set(Cs);const So={};_&2&&(So.$$scope={dirty:_,ctx:o}),Mt.$set(So);const Hs={};_&2&&(Hs.$$scope={dirty:_,ctx:o}),St.$set(Hs);const Es={};_&2&&(Es.$$scope={dirty:_,ctx:o}),Ot.$set(Es);const js={};_&2&&(js.$$scope={dirty:_,ctx:o}),Dt.$set(js);const Fs={};_&2&&(Fs.$$scope={dirty:_,ctx:o}),At.$set(Fs);const xs={};_&2&&(xs.$$scope={dirty:_,ctx:o}),It.$set(xs);const Oo={};_&2&&(Oo.$$scope={dirty:_,ctx:o}),Lt.$set(Oo)},i(o){Ls||($(i.$$.fragment,o),$(E.$$.fragment,o),$(P.$$.fragment,o),$(j.$$.fragment,o),$(V.$$.fragment,o),$(Gt.$$.fragment,o),$(Xt.$$.fragment,o),$(oo.$$.fragment,o),$(kt.$$.fragment,o),$($t.$$.fragment,o),$(so.$$.fragment,o),$(no.$$.fragment,o),$(co.$$.fragment,o),$(Ht.$$.fragment,o),$(Et.$$.fragment,o),$(jt.$$.fragment,o),$(po.$$.fragment,o),$(uo.$$.fragment,o),$(bo.$$.fragment,o),$(xt.$$.fragment,o),$(qt.$$.fragment,o),$(Mt.$$.fragment,o),$(vo.$$.fragment,o),$(yo.$$.fragment,o),$(St.$$.fragment,o),$($o.$$.fragment,o),$(Ot.$$.fragment,o),$(Dt.$$.fragment,o),$(Co.$$.fragment,o),$(Ho.$$.fragment,o),$(At.$$.fragment,o),$(qo.$$.fragment,o),$(It.$$.fragment,o),$(Lt.$$.fragment,o),Ls=!0)},o(o){C(i.$$.fragment,o),C(E.$$.fragment,o),C(P.$$.fragment,o),C(j.$$.fragment,o),C(V.$$.fragment,o),C(Gt.$$.fragment,o),C(Xt.$$.fragment,o),C(oo.$$.fragment,o),C(kt.$$.fragment,o),C($t.$$.fragment,o),C(so.$$.fragment,o),C(no.$$.fragment,o),C(co.$$.fragment,o),C(Ht.$$.fragment,o),C(Et.$$.fragment,o),C(jt.$$.fragment,o),C(po.$$.fragment,o),C(uo.$$.fragment,o),C(bo.$$.fragment,o),C(xt.$$.fragment,o),C(qt.$$.fragment,o),C(Mt.$$.fragment,o),C(vo.$$.fragment,o),C(yo.$$.fragment,o),C(St.$$.fragment,o),C($o.$$.fragment,o),C(Ot.$$.fragment,o),C(Dt.$$.fragment,o),C(Co.$$.fragment,o),C(Ho.$$.fragment,o),C(At.$$.fragment,o),C(qo.$$.fragment,o),C(It.$$.fragment,o),C(Lt.$$.fragment,o),Ls=!1},d(o){t(d),o&&t(v),o&&t(c),H(i),o&&t(ve),o&&t(D),H(E),o&&t(ye),o&&t(L),o&&t(we),o&&t(B),o&&t(Te),o&&t(R),o&&t(he),o&&t(M),o&&t(ke),o&&t(W),o&&t($e),o&&t(Q),o&&t(Ce),o&&t(A),H(P),o&&t(He),o&&t(b),H(j),H(V),o&&t(Je),o&&t(pt),H(Gt),o&&t(qs),o&&t(pe),H(Xt),H(oo),H(kt),H($t),o&&t(Ms),o&&t(ht),H(so),o&&t(Ps),o&&t(ue),H(no),H(co),H(Ht),H(Et),H(jt),o&&t(Ss),o&&t(gt),H(po),o&&t(Os),o&&t(Y),H(uo),H(bo),H(xt),H(qt),H(Mt),o&&t(Ds),o&&t(bt),H(vo),o&&t(zs),o&&t(G),H(yo),H(St),H($o),H(Ot),H(Dt),o&&t(As),o&&t(yt),H(Co),o&&t(Is),o&&t(X),H(Ho),H(At),H(qo),H(It),H(Lt)}}}const Wl={local:"hubert",sections:[{local:"overview",title:"Overview"},{local:"transformers.HubertConfig",title:"HubertConfig"},{local:"transformers.HubertModel",title:"HubertModel"},{local:"transformers.HubertForCTC",title:"HubertForCTC"},{local:"transformers.HubertForSequenceClassification",title:"HubertForSequenceClassification"},{local:"transformers.TFHubertModel",title:"TFHubertModel"},{local:"transformers.TFHubertForCTC",title:"TFHubertForCTC"}],title:"Hubert"};function Nl(F){return $l(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Gl extends yl{constructor(d){super();wl(this,d,Nl,Ll,Tl,{})}}export{Gl as default,Wl as metadata};
8
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/model_doc/tapex.mdx-hf-doc-builder.js
import{S as Fo,i as Co,s as No,e as o,k as d,w as v,t as r,M as Io,c as n,d as a,m as p,a as s,x as k,h as i,b as c,G as t,g as h,y as T,L as Xo,q as w,o as y,B as x,v as Bo}from"../../chunks/vendor-hf-doc-builder.js";import{D as Wa}from"../../chunks/Docstring-hf-doc-builder.js";import{C as Ra}from"../../chunks/CodeBlock-hf-doc-builder.js";import{I as _t}from"../../chunks/IconCopyLink-hf-doc-builder.js";function Do(Ma){let q,Me,A,S,ye,W,bt,xe,vt,Ue,z,L,qe,R,kt,Ae,Tt,Ge,F,wt,M,yt,xt,Oe,ne,qt,He,f,se,U,At,zt,Et,re,G,jt,Pt,$t,ie,O,St,Lt,Ft,le,H,Ct,Nt,Ve,ce,It,Je,de,ze,Xt,Ze,pe,Bt,Ke,g,Ee,Dt,Qt,je,Wt,Rt,E,Mt,Pe,Ut,Gt,$e,Ot,Ht,Vt,j,Jt,Se,Zt,Kt,Le,Yt,ea,Ye,P,C,Fe,V,ta,Ce,aa,et,_,oa,he,na,sa,ue,ra,ia,me,la,ca,tt,J,at,N,da,fe,pa,ha,ot,Z,nt,b,ua,ge,ma,fa,_e,ga,_a,st,K,rt,$,I,Ne,Y,ba,Ie,va,it,u,ee,ka,Xe,Ta,wa,Be,ya,xa,De,qa,Aa,Qe,za,Ea,We,ja,Pa,te,$a,be,Sa,La,Fa,X,ae,Ca,Re,Na,Ia,ve,oe,lt;return W=new _t({}),R=new _t({}),V=new _t({}),J=new Ra({props:{code:`from transformers import AutoTokenizer, AutoModelForSeq2SeqLM import pandas as pd tokenizer = AutoTokenizer.from_pretrained("microsoft/tapex-large-finetuned-wtq") model = AutoModelForSeq2SeqLM.from_pretrained("microsoft/tapex-large-finetuned-wtq") # prepare table + question data = {"Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], "Number of movies": ["87", "53", "69"]} table = pd.DataFrame.from_dict(data) question = "how many movies does Leonardo Di Caprio have?" encoding = tokenizer(table, question, return_tensors="pt") # let the model generate an answer autoregressively outputs = model.generate(**encoding) # decode back to text predicted_answer = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0] print(predicted_answer)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForSeq2SeqLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> pandas <span class="hljs-keyword">as</span> pd <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;microsoft/tapex-large-finetuned-wtq&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;microsoft/tapex-large-finetuned-wtq&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># prepare table + question</span> <span class="hljs-meta">&gt;&gt;&gt; </span>data = {<span class="hljs-string">&quot;Actors&quot;</span>: [<span class="hljs-string">&quot;Brad Pitt&quot;</span>, <span class="hljs-string">&quot;Leonardo Di Caprio&quot;</span>, <span class="hljs-string">&quot;George Clooney&quot;</span>], <span class="hljs-string">&quot;Number of movies&quot;</span>: [<span class="hljs-string">&quot;87&quot;</span>, <span class="hljs-string">&quot;53&quot;</span>, <span class="hljs-string">&quot;69&quot;</span>]} <span class="hljs-meta">&gt;&gt;&gt; </span>table = pd.DataFrame.from_dict(data) <span class="hljs-meta">&gt;&gt;&gt; </span>question = <span class="hljs-string">&quot;how many movies does Leonardo Di Caprio have?&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(table, question, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># let the model generate an answer autoregressively</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.generate(**encoding) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># decode back to text</span> <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_answer = tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>)[<span class="hljs-number">0</span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(predicted_answer) <span class="hljs-number">53</span>`}}),Z=new Ra({props:{code:`# prepare table + question data = {"Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], "Number of movies": ["87", "53", "69"]} table = pd.DataFrame.from_dict(data) questions = [ "how many movies does Leonardo Di Caprio have?", "which actor has 69 movies?", "what's the first name of the actor who has 87 movies?", ] encoding = tokenizer(table, questions, padding=True, return_tensors="pt") # let the model generate an answer autoregressively outputs = model.generate(**encoding) # decode back to text tokenizer.batch_decode(outputs, skip_special_tokens=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># prepare table + question</span> <span class="hljs-meta">&gt;&gt;&gt; </span>data = {<span class="hljs-string">&quot;Actors&quot;</span>: [<span class="hljs-string">&quot;Brad Pitt&quot;</span>, <span class="hljs-string">&quot;Leonardo Di Caprio&quot;</span>, <span class="hljs-string">&quot;George Clooney&quot;</span>], <span class="hljs-string">&quot;Number of movies&quot;</span>: [<span class="hljs-string">&quot;87&quot;</span>, <span class="hljs-string">&quot;53&quot;</span>, <span class="hljs-string">&quot;69&quot;</span>]} <span class="hljs-meta">&gt;&gt;&gt; </span>table = pd.DataFrame.from_dict(data) <span class="hljs-meta">&gt;&gt;&gt; </span>questions = [ <span class="hljs-meta">... </span> <span class="hljs-string">&quot;how many movies does Leonardo Di Caprio have?&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;which actor has 69 movies?&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;what&#x27;s the first name of the actor who has 87 movies?&quot;</span>, <span class="hljs-meta">... </span>] <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(table, questions, padding=<span class="hljs-literal">True</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># let the model generate an answer autoregressively</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.generate(**encoding) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># decode back to text</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27; 53&#x27;</span>, <span class="hljs-string">&#x27; george clooney&#x27;</span>, <span class="hljs-string">&#x27; brad pitt&#x27;</span>]`}}),K=new Ra({props:{code:`from transformers import AutoTokenizer, AutoModelForSequenceClassification tokenizer = AutoTokenizer.from_pretrained("microsoft/tapex-large-finetuned-tabfact") model = AutoModelForSequenceClassification.from_pretrained("microsoft/tapex-large-finetuned-tabfact") # prepare table + sentence data = {"Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], "Number of movies": ["87", "53", "69"]} table = pd.DataFrame.from_dict(data) sentence = "George Clooney has 30 movies" encoding = tokenizer(table, sentence, return_tensors="pt") # forward pass outputs = model(**encoding) # print prediction predicted_class_idx = outputs.logits[0].argmax(dim=0).item() print(model.config.id2label[predicted_class_idx])`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;microsoft/tapex-large-finetuned-tabfact&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;microsoft/tapex-large-finetuned-tabfact&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># prepare table + sentence</span> <span class="hljs-meta">&gt;&gt;&gt; </span>data = {<span class="hljs-string">&quot;Actors&quot;</span>: [<span class="hljs-string">&quot;Brad Pitt&quot;</span>, <span class="hljs-string">&quot;Leonardo Di Caprio&quot;</span>, <span class="hljs-string">&quot;George Clooney&quot;</span>], <span class="hljs-string">&quot;Number of movies&quot;</span>: [<span class="hljs-string">&quot;87&quot;</span>, <span class="hljs-string">&quot;53&quot;</span>, <span class="hljs-string">&quot;69&quot;</span>]} <span class="hljs-meta">&gt;&gt;&gt; </span>table = pd.DataFrame.from_dict(data) <span class="hljs-meta">&gt;&gt;&gt; </span>sentence = <span class="hljs-string">&quot;George Clooney has 30 movies&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(table, sentence, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># forward pass</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(**encoding) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># print prediction</span> <span class="hljs-meta">&gt;&gt;&gt; </span>predicted_class_idx = outputs.logits[<span class="hljs-number">0</span>].argmax(dim=<span class="hljs-number">0</span>).item() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(model.config.id2label[predicted_class_idx]) Refused`}}),Y=new _t({}),ee=new Wa({props:{name:"class transformers.TapexTokenizer",anchor:"transformers.TapexTokenizer",parameters:[{name:"vocab_file",val:""},{name:"merges_file",val:""},{name:"do_lower_case",val:" = True"},{name:"errors",val:" = 'replace'"},{name:"bos_token",val:" = '<s>'"},{name:"eos_token",val:" = '</s>'"},{name:"sep_token",val:" = '</s>'"},{name:"cls_token",val:" = '<s>'"},{name:"unk_token",val:" = '<unk>'"},{name:"pad_token",val:" = '<pad>'"},{name:"mask_token",val:" = '<mask>'"},{name:"add_prefix_space",val:" = False"},{name:"max_cell_length",val:" = 15"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.TapexTokenizer.vocab_file",description:`<strong>vocab_file</strong> (<code>str</code>) &#x2014; Path to the vocabulary file.`,name:"vocab_file"},{anchor:"transformers.TapexTokenizer.merges_file",description:`<strong>merges_file</strong> (<code>str</code>) &#x2014; Path to the merges file.`,name:"merges_file"},{anchor:"transformers.TapexTokenizer.do_lower_case",description:`<strong>do_lower_case</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to lowercase the input when tokenizing.`,name:"do_lower_case"},{anchor:"transformers.TapexTokenizer.errors",description:`<strong>errors</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;replace&quot;</code>) &#x2014; Paradigm to follow when decoding bytes to UTF-8. See <a href="https://docs.python.org/3/library/stdtypes.html#bytes.decode" rel="nofollow">bytes.decode</a> for more information.`,name:"errors"},{anchor:"transformers.TapexTokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the <code>cls_token</code>.</p> </div>`,name:"bos_token"},{anchor:"transformers.TapexTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The end of sequence token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the <code>sep_token</code>.</p> </div>`,name:"eos_token"},{anchor:"transformers.TapexTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;/s&gt;&quot;</code>) &#x2014; The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.`,name:"sep_token"},{anchor:"transformers.TapexTokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;s&gt;&quot;</code>) &#x2014; The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.`,name:"cls_token"},{anchor:"transformers.TapexTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;unk&gt;&quot;</code>) &#x2014; The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.`,name:"unk_token"},{anchor:"transformers.TapexTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;pad&gt;&quot;</code>) &#x2014; The token used for padding, for example when batching sequences of different lengths.`,name:"pad_token"},{anchor:"transformers.TapexTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&lt;mask&gt;&quot;</code>) &#x2014; The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.`,name:"mask_token"},{anchor:"transformers.TapexTokenizer.add_prefix_space",description:`<strong>add_prefix_space</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (BART tokenizer detect beginning of words by the preceding space).`,name:"add_prefix_space"},{anchor:"transformers.TapexTokenizer.max_cell_length",description:`<strong>max_cell_length</strong> (<code>int</code>, <em>optional</em>, defaults to 15) &#x2014; Maximum number of characters per cell when linearizing a table. If this number is exceeded, truncation takes place.`,name:"max_cell_length"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/tapex/tokenization_tapex.py#L194"}}),ae=new Wa({props:{name:"__call__",anchor:"transformers.TapexTokenizer.__call__",parameters:[{name:"table",val:": typing.Union[ForwardRef('pd.DataFrame'), typing.List[ForwardRef('pd.DataFrame')]] = None"},{name:"query",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"answer",val:": typing.Union[str, typing.List[str]] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.utils.generic.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.utils.generic.TensorType, NoneType] = None"},{name:"return_token_type_ids",val:": typing.Optional[bool] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_overflowing_tokens",val:": bool = False"},{name:"return_special_tokens_mask",val:": bool = False"},{name:"return_offsets_mapping",val:": bool = False"},{name:"return_length",val:": bool = False"},{name:"verbose",val:": bool = True"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.TapexTokenizer.__call__.table",description:`<strong>table</strong> (<code>pd.DataFrame</code>, <code>List[pd.DataFrame]</code>) &#x2014; Table(s) containing tabular data.`,name:"table"},{anchor:"transformers.TapexTokenizer.__call__.query",description:`<strong>query</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014; Sentence or batch of sentences related to one or more table(s) to be encoded. Note that the number of sentences must match the number of tables.`,name:"query"},{anchor:"transformers.TapexTokenizer.__call__.answer",description:`<strong>answer</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014; Optionally, the corresponding answer to the questions as supervision.`,name:"answer"},{anchor:"transformers.TapexTokenizer.__call__.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.TapexTokenizer.__call__.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.TapexTokenizer.__call__.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.TapexTokenizer.__call__.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.TapexTokenizer.__call__.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.TapexTokenizer.__call__.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.TapexTokenizer.__call__.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.TapexTokenizer.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.TapexTokenizer.__call__.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.TapexTokenizer.__call__.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.TapexTokenizer.__call__.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code>, <code>TapexTruncationStrategy</code> or <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, &#x2014; <em>optional</em>, defaults to <code>False</code>):</p> <p>Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>&apos;drop_rows_to_fit&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate row by row, removing rows from the table.</li> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.TapexTokenizer.__call__.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.TapexTokenizer.__call__.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.TapexTokenizer.__call__.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.TapexTokenizer.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/tapex/tokenization_tapex.py#L513"}}),oe=new Wa({props:{name:"save_vocabulary",anchor:"transformers.TapexTokenizer.save_vocabulary",parameters:[{name:"save_directory",val:": str"},{name:"filename_prefix",val:": typing.Optional[str] = None"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/models/tapex/tokenization_tapex.py#L484"}}),{c(){q=o("meta"),Me=d(),A=o("h1"),S=o("a"),ye=o("span"),v(W.$$.fragment),bt=d(),xe=o("span"),vt=r("TAPEX"),Ue=d(),z=o("h2"),L=o("a"),qe=o("span"),v(R.$$.fragment),kt=d(),Ae=o("span"),Tt=r("Overview"),Ge=d(),F=o("p"),wt=r("The TAPEX model was proposed in "),M=o("a"),yt=r("TAPEX: Table Pre-training via Learning a Neural SQL Executor"),xt=r(` by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou. TAPEX pre-trains a BART model to solve synthetic SQL queries, after which it can be fine-tuned to answer natural language questions related to tabular data, as well as performing table fact checking.`),Oe=d(),ne=o("p"),qt=r("TAPEX has been fine-tuned on several datasets:"),He=d(),f=o("ul"),se=o("li"),U=o("a"),At=r("SQA"),zt=r(" (Sequential Question Answering by Microsoft)"),Et=d(),re=o("li"),G=o("a"),jt=r("WTQ"),Pt=r(" (Wiki Table Questions by Stanford University)"),$t=d(),ie=o("li"),O=o("a"),St=r("WikiSQL"),Lt=r(" (by Salesforce)"),Ft=d(),le=o("li"),H=o("a"),Ct=r("TabFact"),Nt=r(" (by USCB NLP Lab)."),Ve=d(),ce=o("p"),It=r("The abstract from the paper is the following:"),Je=d(),de=o("p"),ze=o("em"),Xt=r(`Recent progress in language model pre-training has achieved a great success via leveraging large-scale unstructured textual data. However, it is still a challenge to apply pre-training on structured tabular data due to the absence of large-scale high-quality tabular data. In this paper, we propose TAPEX to show that table pre-training can be achieved by learning a neural SQL executor over a synthetic corpus, which is obtained by automatically synthesizing executable SQL queries and their execution outputs. TAPEX addresses the data scarcity challenge via guiding the language model to mimic a SQL executor on the diverse, large-scale and high-quality synthetic corpus. We evaluate TAPEX on four benchmark datasets. Experimental results demonstrate that TAPEX outperforms previous table pre-training approaches by a large margin and achieves new state-of-the-art results on all of them. This includes improvements on the weakly-supervised WikiSQL denotation accuracy to 89.5% (+2.3%), the WikiTableQuestions denotation accuracy to 57.5% (+4.8%), the SQA denotation accuracy to 74.5% (+3.5%), and the TabFact accuracy to 84.2% (+3.2%). To our knowledge, this is the first work to exploit table pre-training via synthetic executable programs and to achieve new state-of-the-art results on various downstream tasks.`),Ze=d(),pe=o("p"),Bt=r("Tips:"),Ke=d(),g=o("ul"),Ee=o("li"),Dt=r("TAPEX is a generative (seq2seq) model. One can directly plug in the weights of TAPEX into a BART model."),Qt=d(),je=o("li"),Wt=r("TAPEX has checkpoints on the hub that are either pre-trained only, or fine-tuned on WTQ, SQA, WikiSQL and TabFact."),Rt=d(),E=o("li"),Mt=r("Sentences + tables are presented to the model as "),Pe=o("code"),Ut=r('sentence + " " + linearized table'),Gt=r(`. The linearized table has the following format: `),$e=o("code"),Ot=r("col: col1 | col2 | col 3 row 1 : val1 | val2 | val3 row 2 : ..."),Ht=r("."),Vt=d(),j=o("li"),Jt=r(`TAPEX has its own tokenizer, that allows to prepare all data for the model easily. One can pass Pandas DataFrames and strings to the tokenizer, and it will automatically create the `),Se=o("code"),Zt=r("input_ids"),Kt=r(" and "),Le=o("code"),Yt=r("attention_mask"),ea=r(" (as shown in the usage examples below)."),Ye=d(),P=o("h2"),C=o("a"),Fe=o("span"),v(V.$$.fragment),ta=d(),Ce=o("span"),aa=r("Usage: inference"),et=d(),_=o("p"),oa=r(`Below, we illustrate how to use TAPEX for table question answering. As one can see, one can directly plug in the weights of TAPEX into a BART model. We use the `),he=o("a"),na=r("Auto API"),sa=r(", which will automatically instantiate the appropriate tokenizer ("),ue=o("a"),ra=r("TapexTokenizer"),ia=r(") and model ("),me=o("a"),la=r("BartForConditionalGeneration"),ca=r(`) for us, based on the configuration file of the checkpoint on the hub.`),tt=d(),v(J.$$.fragment),at=d(),N=o("p"),da=r("Note that "),fe=o("a"),pa=r("TapexTokenizer"),ha=r(` also supports batched inference. Hence, one can provide a batch of different tables/questions, or a batch of a single table and multiple questions, or a batch of a single query and multiple tables. Let\u2019s illustrate this:`),ot=d(),v(Z.$$.fragment),nt=d(),b=o("p"),ua=r(`In case one wants to do table verification (i.e. the task of determining whether a given sentence is supported or refuted by the contents of a table), one can instantiate a `),ge=o("a"),ma=r("BartForSequenceClassification"),fa=r(` model. TAPEX has checkpoints on the hub fine-tuned on TabFact, an important benchmark for table fact checking (it achieves 84% accuracy). The code example below again leverages the `),_e=o("a"),ga=r("Auto API"),_a=r("."),st=d(),v(K.$$.fragment),rt=d(),$=o("h2"),I=o("a"),Ne=o("span"),v(Y.$$.fragment),ba=d(),Ie=o("span"),va=r("TapexTokenizer"),it=d(),u=o("div"),v(ee.$$.fragment),ka=d(),Xe=o("p"),Ta=r("Construct a TAPEX tokenizer. Based on byte-level Byte-Pair-Encoding (BPE)."),wa=d(),Be=o("p"),ya=r(`This tokenizer can be used to flatten one or more table(s) and concatenate them with one or more related sentences to be used by TAPEX models. The format that the TAPEX tokenizer creates is the following:`),xa=d(),De=o("p"),qa=r("sentence col: col1 | col2 | col 3 row 1 : val1 | val2 | val3 row 2 : \u2026"),Aa=d(),Qe=o("p"),za=r(`The tokenizer supports a single table + single query, a single table and multiple queries (in which case the table will be duplicated for every query), a single query and multiple tables (in which case the query will be duplicated for every table), and multiple tables and queries. In other words, you can provide a batch of tables + questions to the tokenizer for instance to prepare them for the model.`),Ea=d(),We=o("p"),ja=r("Tokenization itself is based on the BPE algorithm. It is identical to the one used by BART, RoBERTa and GPT-2."),Pa=d(),te=o("p"),$a=r("This tokenizer inherits from "),be=o("a"),Sa=r("PreTrainedTokenizer"),La=r(` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),Fa=d(),X=o("div"),v(ae.$$.fragment),Ca=d(),Re=o("p"),Na=r("Main method to tokenize and prepare for the model one or several table-sequence pair(s)."),Ia=d(),ve=o("div"),v(oe.$$.fragment),this.h()},l(e){const l=Io('[data-svelte="svelte-1phssyn"]',document.head);q=n(l,"META",{name:!0,content:!0}),l.forEach(a),Me=p(e),A=n(e,"H1",{class:!0});var ct=s(A);S=n(ct,"A",{id:!0,class:!0,href:!0});var Ua=s(S);ye=n(Ua,"SPAN",{});var Ga=s(ye);k(W.$$.fragment,Ga),Ga.forEach(a),Ua.forEach(a),bt=p(ct),xe=n(ct,"SPAN",{});var Oa=s(xe);vt=i(Oa,"TAPEX"),Oa.forEach(a),ct.forEach(a),Ue=p(e),z=n(e,"H2",{class:!0});var dt=s(z);L=n(dt,"A",{id:!0,class:!0,href:!0});var Ha=s(L);qe=n(Ha,"SPAN",{});var Va=s(qe);k(R.$$.fragment,Va),Va.forEach(a),Ha.forEach(a),kt=p(dt),Ae=n(dt,"SPAN",{});var Ja=s(Ae);Tt=i(Ja,"Overview"),Ja.forEach(a),dt.forEach(a),Ge=p(e),F=n(e,"P",{});var pt=s(F);wt=i(pt,"The TAPEX model was proposed in "),M=n(pt,"A",{href:!0,rel:!0});var Za=s(M);yt=i(Za,"TAPEX: Table Pre-training via Learning a Neural SQL Executor"),Za.forEach(a),xt=i(pt,` by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou. TAPEX pre-trains a BART model to solve synthetic SQL queries, after which it can be fine-tuned to answer natural language questions related to tabular data, as well as performing table fact checking.`),pt.forEach(a),Oe=p(e),ne=n(e,"P",{});var Ka=s(ne);qt=i(Ka,"TAPEX has been fine-tuned on several datasets:"),Ka.forEach(a),He=p(e),f=n(e,"UL",{});var B=s(f);se=n(B,"LI",{});var Xa=s(se);U=n(Xa,"A",{href:!0,rel:!0});var Ya=s(U);At=i(Ya,"SQA"),Ya.forEach(a),zt=i(Xa," (Sequential Question Answering by Microsoft)"),Xa.forEach(a),Et=p(B),re=n(B,"LI",{});var Ba=s(re);G=n(Ba,"A",{href:!0,rel:!0});var eo=s(G);jt=i(eo,"WTQ"),eo.forEach(a),Pt=i(Ba," (Wiki Table Questions by Stanford University)"),Ba.forEach(a),$t=p(B),ie=n(B,"LI",{});var Da=s(ie);O=n(Da,"A",{href:!0,rel:!0});var to=s(O);St=i(to,"WikiSQL"),to.forEach(a),Lt=i(Da," (by Salesforce)"),Da.forEach(a),Ft=p(B),le=n(B,"LI",{});var Qa=s(le);H=n(Qa,"A",{href:!0,rel:!0});var ao=s(H);Ct=i(ao,"TabFact"),ao.forEach(a),Nt=i(Qa," (by USCB NLP Lab)."),Qa.forEach(a),B.forEach(a),Ve=p(e),ce=n(e,"P",{});var oo=s(ce);It=i(oo,"The abstract from the paper is the following:"),oo.forEach(a),Je=p(e),de=n(e,"P",{});var no=s(de);ze=n(no,"EM",{});var so=s(ze);Xt=i(so,`Recent progress in language model pre-training has achieved a great success via leveraging large-scale unstructured textual data. However, it is still a challenge to apply pre-training on structured tabular data due to the absence of large-scale high-quality tabular data. In this paper, we propose TAPEX to show that table pre-training can be achieved by learning a neural SQL executor over a synthetic corpus, which is obtained by automatically synthesizing executable SQL queries and their execution outputs. TAPEX addresses the data scarcity challenge via guiding the language model to mimic a SQL executor on the diverse, large-scale and high-quality synthetic corpus. We evaluate TAPEX on four benchmark datasets. Experimental results demonstrate that TAPEX outperforms previous table pre-training approaches by a large margin and achieves new state-of-the-art results on all of them. This includes improvements on the weakly-supervised WikiSQL denotation accuracy to 89.5% (+2.3%), the WikiTableQuestions denotation accuracy to 57.5% (+4.8%), the SQA denotation accuracy to 74.5% (+3.5%), and the TabFact accuracy to 84.2% (+3.2%). To our knowledge, this is the first work to exploit table pre-training via synthetic executable programs and to achieve new state-of-the-art results on various downstream tasks.`),so.forEach(a),no.forEach(a),Ze=p(e),pe=n(e,"P",{});var ro=s(pe);Bt=i(ro,"Tips:"),ro.forEach(a),Ke=p(e),g=n(e,"UL",{});var D=s(g);Ee=n(D,"LI",{});var io=s(Ee);Dt=i(io,"TAPEX is a generative (seq2seq) model. One can directly plug in the weights of TAPEX into a BART model."),io.forEach(a),Qt=p(D),je=n(D,"LI",{});var lo=s(je);Wt=i(lo,"TAPEX has checkpoints on the hub that are either pre-trained only, or fine-tuned on WTQ, SQA, WikiSQL and TabFact."),lo.forEach(a),Rt=p(D),E=n(D,"LI",{});var ke=s(E);Mt=i(ke,"Sentences + tables are presented to the model as "),Pe=n(ke,"CODE",{});var co=s(Pe);Ut=i(co,'sentence + " " + linearized table'),co.forEach(a),Gt=i(ke,`. The linearized table has the following format: `),$e=n(ke,"CODE",{});var po=s($e);Ot=i(po,"col: col1 | col2 | col 3 row 1 : val1 | val2 | val3 row 2 : ..."),po.forEach(a),Ht=i(ke,"."),ke.forEach(a),Vt=p(D),j=n(D,"LI",{});var Te=s(j);Jt=i(Te,`TAPEX has its own tokenizer, that allows to prepare all data for the model easily. One can pass Pandas DataFrames and strings to the tokenizer, and it will automatically create the `),Se=n(Te,"CODE",{});var ho=s(Se);Zt=i(ho,"input_ids"),ho.forEach(a),Kt=i(Te," and "),Le=n(Te,"CODE",{});var uo=s(Le);Yt=i(uo,"attention_mask"),uo.forEach(a),ea=i(Te," (as shown in the usage examples below)."),Te.forEach(a),D.forEach(a),Ye=p(e),P=n(e,"H2",{class:!0});var ht=s(P);C=n(ht,"A",{id:!0,class:!0,href:!0});var mo=s(C);Fe=n(mo,"SPAN",{});var fo=s(Fe);k(V.$$.fragment,fo),fo.forEach(a),mo.forEach(a),ta=p(ht),Ce=n(ht,"SPAN",{});var go=s(Ce);aa=i(go,"Usage: inference"),go.forEach(a),ht.forEach(a),et=p(e),_=n(e,"P",{});var Q=s(_);oa=i(Q,`Below, we illustrate how to use TAPEX for table question answering. As one can see, one can directly plug in the weights of TAPEX into a BART model. We use the `),he=n(Q,"A",{href:!0});var _o=s(he);na=i(_o,"Auto API"),_o.forEach(a),sa=i(Q,", which will automatically instantiate the appropriate tokenizer ("),ue=n(Q,"A",{href:!0});var bo=s(ue);ra=i(bo,"TapexTokenizer"),bo.forEach(a),ia=i(Q,") and model ("),me=n(Q,"A",{href:!0});var vo=s(me);la=i(vo,"BartForConditionalGeneration"),vo.forEach(a),ca=i(Q,`) for us, based on the configuration file of the checkpoint on the hub.`),Q.forEach(a),tt=p(e),k(J.$$.fragment,e),at=p(e),N=n(e,"P",{});var ut=s(N);da=i(ut,"Note that "),fe=n(ut,"A",{href:!0});var ko=s(fe);pa=i(ko,"TapexTokenizer"),ko.forEach(a),ha=i(ut,` also supports batched inference. Hence, one can provide a batch of different tables/questions, or a batch of a single table and multiple questions, or a batch of a single query and multiple tables. Let\u2019s illustrate this:`),ut.forEach(a),ot=p(e),k(Z.$$.fragment,e),nt=p(e),b=n(e,"P",{});var we=s(b);ua=i(we,`In case one wants to do table verification (i.e. the task of determining whether a given sentence is supported or refuted by the contents of a table), one can instantiate a `),ge=n(we,"A",{href:!0});var To=s(ge);ma=i(To,"BartForSequenceClassification"),To.forEach(a),fa=i(we,` model. TAPEX has checkpoints on the hub fine-tuned on TabFact, an important benchmark for table fact checking (it achieves 84% accuracy). The code example below again leverages the `),_e=n(we,"A",{href:!0});var wo=s(_e);ga=i(wo,"Auto API"),wo.forEach(a),_a=i(we,"."),we.forEach(a),st=p(e),k(K.$$.fragment,e),rt=p(e),$=n(e,"H2",{class:!0});var mt=s($);I=n(mt,"A",{id:!0,class:!0,href:!0});var yo=s(I);Ne=n(yo,"SPAN",{});var xo=s(Ne);k(Y.$$.fragment,xo),xo.forEach(a),yo.forEach(a),ba=p(mt),Ie=n(mt,"SPAN",{});var qo=s(Ie);va=i(qo,"TapexTokenizer"),qo.forEach(a),mt.forEach(a),it=p(e),u=n(e,"DIV",{class:!0});var m=s(u);k(ee.$$.fragment,m),ka=p(m),Xe=n(m,"P",{});var Ao=s(Xe);Ta=i(Ao,"Construct a TAPEX tokenizer. Based on byte-level Byte-Pair-Encoding (BPE)."),Ao.forEach(a),wa=p(m),Be=n(m,"P",{});var zo=s(Be);ya=i(zo,`This tokenizer can be used to flatten one or more table(s) and concatenate them with one or more related sentences to be used by TAPEX models. The format that the TAPEX tokenizer creates is the following:`),zo.forEach(a),xa=p(m),De=n(m,"P",{});var Eo=s(De);qa=i(Eo,"sentence col: col1 | col2 | col 3 row 1 : val1 | val2 | val3 row 2 : \u2026"),Eo.forEach(a),Aa=p(m),Qe=n(m,"P",{});var jo=s(Qe);za=i(jo,`The tokenizer supports a single table + single query, a single table and multiple queries (in which case the table will be duplicated for every query), a single query and multiple tables (in which case the query will be duplicated for every table), and multiple tables and queries. In other words, you can provide a batch of tables + questions to the tokenizer for instance to prepare them for the model.`),jo.forEach(a),Ea=p(m),We=n(m,"P",{});var Po=s(We);ja=i(Po,"Tokenization itself is based on the BPE algorithm. It is identical to the one used by BART, RoBERTa and GPT-2."),Po.forEach(a),Pa=p(m),te=n(m,"P",{});var ft=s(te);$a=i(ft,"This tokenizer inherits from "),be=n(ft,"A",{href:!0});var $o=s(be);Sa=i($o,"PreTrainedTokenizer"),$o.forEach(a),La=i(ft,` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.`),ft.forEach(a),Fa=p(m),X=n(m,"DIV",{class:!0});var gt=s(X);k(ae.$$.fragment,gt),Ca=p(gt),Re=n(gt,"P",{});var So=s(Re);Na=i(So,"Main method to tokenize and prepare for the model one or several table-sequence pair(s)."),So.forEach(a),gt.forEach(a),Ia=p(m),ve=n(m,"DIV",{class:!0});var Lo=s(ve);k(oe.$$.fragment,Lo),Lo.forEach(a),m.forEach(a),this.h()},h(){c(q,"name","hf:doc:metadata"),c(q,"content",JSON.stringify(Qo)),c(S,"id","tapex"),c(S,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(S,"href","#tapex"),c(A,"class","relative group"),c(L,"id","overview"),c(L,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(L,"href","#overview"),c(z,"class","relative group"),c(M,"href","https://arxiv.org/abs/2107.07653"),c(M,"rel","nofollow"),c(U,"href","https://www.microsoft.com/en-us/download/details.aspx?id=54253"),c(U,"rel","nofollow"),c(G,"href","https://github.com/ppasupat/WikiTableQuestions"),c(G,"rel","nofollow"),c(O,"href","https://github.com/salesforce/WikiSQL"),c(O,"rel","nofollow"),c(H,"href","https://tabfact.github.io/"),c(H,"rel","nofollow"),c(C,"id","usage-inference"),c(C,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(C,"href","#usage-inference"),c(P,"class","relative group"),c(he,"href","auto"),c(ue,"href","/docs/transformers/pr_19429/en/model_doc/tapex#transformers.TapexTokenizer"),c(me,"href","/docs/transformers/pr_19429/en/model_doc/bart#transformers.BartForConditionalGeneration"),c(fe,"href","/docs/transformers/pr_19429/en/model_doc/tapex#transformers.TapexTokenizer"),c(ge,"href","/docs/transformers/pr_19429/en/model_doc/bart#transformers.BartForSequenceClassification"),c(_e,"href","auto"),c(I,"id","transformers.TapexTokenizer"),c(I,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(I,"href","#transformers.TapexTokenizer"),c($,"class","relative group"),c(be,"href","/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),c(X,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(ve,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(u,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,l){t(document.head,q),h(e,Me,l),h(e,A,l),t(A,S),t(S,ye),T(W,ye,null),t(A,bt),t(A,xe),t(xe,vt),h(e,Ue,l),h(e,z,l),t(z,L),t(L,qe),T(R,qe,null),t(z,kt),t(z,Ae),t(Ae,Tt),h(e,Ge,l),h(e,F,l),t(F,wt),t(F,M),t(M,yt),t(F,xt),h(e,Oe,l),h(e,ne,l),t(ne,qt),h(e,He,l),h(e,f,l),t(f,se),t(se,U),t(U,At),t(se,zt),t(f,Et),t(f,re),t(re,G),t(G,jt),t(re,Pt),t(f,$t),t(f,ie),t(ie,O),t(O,St),t(ie,Lt),t(f,Ft),t(f,le),t(le,H),t(H,Ct),t(le,Nt),h(e,Ve,l),h(e,ce,l),t(ce,It),h(e,Je,l),h(e,de,l),t(de,ze),t(ze,Xt),h(e,Ze,l),h(e,pe,l),t(pe,Bt),h(e,Ke,l),h(e,g,l),t(g,Ee),t(Ee,Dt),t(g,Qt),t(g,je),t(je,Wt),t(g,Rt),t(g,E),t(E,Mt),t(E,Pe),t(Pe,Ut),t(E,Gt),t(E,$e),t($e,Ot),t(E,Ht),t(g,Vt),t(g,j),t(j,Jt),t(j,Se),t(Se,Zt),t(j,Kt),t(j,Le),t(Le,Yt),t(j,ea),h(e,Ye,l),h(e,P,l),t(P,C),t(C,Fe),T(V,Fe,null),t(P,ta),t(P,Ce),t(Ce,aa),h(e,et,l),h(e,_,l),t(_,oa),t(_,he),t(he,na),t(_,sa),t(_,ue),t(ue,ra),t(_,ia),t(_,me),t(me,la),t(_,ca),h(e,tt,l),T(J,e,l),h(e,at,l),h(e,N,l),t(N,da),t(N,fe),t(fe,pa),t(N,ha),h(e,ot,l),T(Z,e,l),h(e,nt,l),h(e,b,l),t(b,ua),t(b,ge),t(ge,ma),t(b,fa),t(b,_e),t(_e,ga),t(b,_a),h(e,st,l),T(K,e,l),h(e,rt,l),h(e,$,l),t($,I),t(I,Ne),T(Y,Ne,null),t($,ba),t($,Ie),t(Ie,va),h(e,it,l),h(e,u,l),T(ee,u,null),t(u,ka),t(u,Xe),t(Xe,Ta),t(u,wa),t(u,Be),t(Be,ya),t(u,xa),t(u,De),t(De,qa),t(u,Aa),t(u,Qe),t(Qe,za),t(u,Ea),t(u,We),t(We,ja),t(u,Pa),t(u,te),t(te,$a),t(te,be),t(be,Sa),t(te,La),t(u,Fa),t(u,X),T(ae,X,null),t(X,Ca),t(X,Re),t(Re,Na),t(u,Ia),t(u,ve),T(oe,ve,null),lt=!0},p:Xo,i(e){lt||(w(W.$$.fragment,e),w(R.$$.fragment,e),w(V.$$.fragment,e),w(J.$$.fragment,e),w(Z.$$.fragment,e),w(K.$$.fragment,e),w(Y.$$.fragment,e),w(ee.$$.fragment,e),w(ae.$$.fragment,e),w(oe.$$.fragment,e),lt=!0)},o(e){y(W.$$.fragment,e),y(R.$$.fragment,e),y(V.$$.fragment,e),y(J.$$.fragment,e),y(Z.$$.fragment,e),y(K.$$.fragment,e),y(Y.$$.fragment,e),y(ee.$$.fragment,e),y(ae.$$.fragment,e),y(oe.$$.fragment,e),lt=!1},d(e){a(q),e&&a(Me),e&&a(A),x(W),e&&a(Ue),e&&a(z),x(R),e&&a(Ge),e&&a(F),e&&a(Oe),e&&a(ne),e&&a(He),e&&a(f),e&&a(Ve),e&&a(ce),e&&a(Je),e&&a(de),e&&a(Ze),e&&a(pe),e&&a(Ke),e&&a(g),e&&a(Ye),e&&a(P),x(V),e&&a(et),e&&a(_),e&&a(tt),x(J,e),e&&a(at),e&&a(N),e&&a(ot),x(Z,e),e&&a(nt),e&&a(b),e&&a(st),x(K,e),e&&a(rt),e&&a($),x(Y),e&&a(it),e&&a(u),x(ee),x(ae),x(oe)}}}const Qo={local:"tapex",sections:[{local:"overview",title:"Overview"},{local:"usage-inference",title:"Usage: inference"},{local:"transformers.TapexTokenizer",title:"TapexTokenizer"}],title:"TAPEX"};function Wo(Ma){return Bo(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Oo extends Fo{constructor(q){super();Co(this,q,Wo,Do,No,{})}}export{Oo as default,Qo as metadata};
9
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/main_classes/output.mdx-hf-doc-builder.js
import{S as U$,i as Y$,s as G$,e as n,k as d,w as h,t as p,M as J$,c as s,d as t,m as i,a,x as f,h as c,b as r,G as o,g as l,y as m,q as _,o as g,B as v,v as K$}from"../../chunks/vendor-hf-doc-builder.js";import{T as Z$}from"../../chunks/Tip-hf-doc-builder.js";import{D as y}from"../../chunks/Docstring-hf-doc-builder.js";import{C as X$}from"../../chunks/CodeBlock-hf-doc-builder.js";import{I as T}from"../../chunks/IconCopyLink-hf-doc-builder.js";function eO(xl){let x,Yt,$,A,J,O,wn,K;return{c(){x=n("p"),Yt=p("You can\u2019t unpack a "),$=n("code"),A=p("ModelOutput"),J=p(" directly. Use the "),O=n("a"),wn=p("to_tuple()"),K=p(` method to convert it to a tuple before.`),this.h()},l(Z){x=s(Z,"P",{});var M=a(x);Yt=c(M,"You can\u2019t unpack a "),$=s(M,"CODE",{});var C=a($);A=c(C,"ModelOutput"),C.forEach(t),J=c(M," directly. Use the "),O=s(M,"A",{href:!0});var er=a(O);wn=c(er,"to_tuple()"),er.forEach(t),K=c(M,` method to convert it to a tuple before.`),M.forEach(t),this.h()},h(){r(O,"href","/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput.to_tuple")},m(Z,M){l(Z,x,M),o(x,Yt),o(x,$),o($,A),o(x,J),o(x,O),o(O,wn),o(x,K)},d(Z){Z&&t(x)}}}function tO(xl){let x,Yt,$,A,J,O,wn,K,Z,M,C,er,tr,am,rm,$l,or,dm,Ol,xn,ql,b,im,wr,um,lm,nr,pm,cm,xr,hm,fm,$r,mm,_m,Or,gm,vm,qr,ym,Tm,Fr,bm,wm,Sr,xm,$m,Mr,Om,qm,kr,Fm,Sm,Ar,Mm,km,Cr,Am,Cm,Fl,q,Em,Er,Nm,zm,Nr,Pm,Bm,zr,Lm,Wm,Pr,jm,Dm,Sl,F,Hm,Br,Im,Vm,Lr,Qm,Rm,Wr,Xm,Um,jr,Ym,Gm,Ml,$n,kl,Gt,Jm,Dr,Km,Zm,Al,S,e_,Hr,t_,o_,Ir,n_,s_,Vr,a_,r_,Qr,d_,i_,Cl,sr,u_,El,ee,Jt,Rr,On,l_,Xr,p_,Nl,k,qn,c_,te,h_,Ur,f_,m_,Yr,__,g_,v_,Kt,y_,Zt,Fn,T_,Sn,b_,Gr,w_,x_,zl,oe,eo,Jr,Mn,$_,Kr,O_,Pl,ne,kn,q_,Zr,F_,Bl,se,to,ed,An,S_,td,M_,Ll,ae,Cn,k_,od,A_,Wl,re,oo,nd,En,C_,sd,E_,jl,de,Nn,N_,ad,z_,Dl,ie,no,rd,zn,P_,dd,B_,Hl,ue,Pn,L_,id,W_,Il,le,so,ud,Bn,j_,ld,D_,Vl,pe,Ln,H_,pd,I_,Ql,ce,ao,cd,Wn,V_,hd,Q_,Rl,he,jn,R_,fd,X_,Xl,fe,ro,md,Dn,U_,_d,Y_,Ul,me,Hn,G_,gd,J_,Yl,_e,io,vd,In,K_,yd,Z_,Gl,ge,Vn,eg,Td,tg,Jl,ve,uo,bd,Qn,og,wd,ng,Kl,ye,Rn,sg,xd,ag,Zl,Te,lo,$d,Xn,rg,Od,dg,ep,be,Un,ig,qd,ug,tp,we,po,Fd,Yn,lg,Sd,pg,op,xe,Gn,cg,Md,hg,np,$e,co,kd,Jn,fg,Ad,mg,sp,Oe,Kn,_g,Cd,gg,ap,qe,ho,Ed,Zn,vg,Nd,yg,rp,Fe,es,Tg,zd,bg,dp,Se,fo,Pd,ts,wg,Bd,xg,ip,Me,os,$g,Ld,Og,up,ke,mo,Wd,ns,qg,jd,Fg,lp,Ae,ss,Sg,Dd,Mg,pp,Ce,_o,Hd,as,kg,Id,Ag,cp,Ee,rs,Cg,Vd,Eg,hp,Ne,go,Qd,ds,Ng,Rd,zg,fp,ze,is,Pg,Xd,Bg,mp,Pe,vo,Ud,us,Lg,Yd,Wg,_p,Be,ls,jg,Gd,Dg,gp,Le,yo,Jd,ps,Hg,Kd,Ig,vp,We,cs,Vg,Zd,Qg,yp,je,To,ei,hs,Rg,ti,Xg,Tp,De,fs,Ug,oi,Yg,bp,He,bo,ni,ms,Gg,si,Jg,wp,Ie,_s,Kg,ai,Zg,xp,Ve,wo,ri,gs,ev,di,tv,$p,Qe,vs,ov,ii,nv,Op,Re,xo,ui,ys,sv,li,av,qp,Xe,Ts,rv,pi,dv,Fp,Ue,$o,ci,bs,iv,hi,uv,Sp,Ye,ws,lv,fi,pv,Mp,Ge,Oo,mi,xs,cv,_i,hv,kp,Je,$s,fv,Os,mv,ar,_v,gv,Ap,Ke,qo,gi,qs,vv,vi,yv,Cp,Ze,Fs,Tv,yi,bv,Ep,et,Fo,Ti,Ss,wv,bi,xv,Np,tt,Ms,$v,wi,Ov,zp,ot,So,xi,ks,qv,$i,Fv,Pp,nt,As,Sv,Oi,Mv,Bp,st,Mo,qi,Cs,kv,Fi,Av,Lp,at,Es,Cv,Si,Ev,Wp,rt,ko,Mi,Ns,Nv,ki,zv,jp,dt,zs,Pv,Ai,Bv,Dp,it,Ao,Ci,Ps,Lv,Ei,Wv,Hp,ut,Bs,jv,Ni,Dv,Ip,lt,Co,zi,Ls,Hv,Pi,Iv,Vp,pt,Ws,Vv,Bi,Qv,Qp,ct,Eo,Li,js,Rv,Wi,Xv,Rp,ht,Ds,Uv,ji,Yv,Xp,ft,No,Di,Hs,Gv,Hi,Jv,Up,mt,Is,Kv,Ii,Zv,Yp,_t,zo,Vi,Vs,ey,Qi,ty,Gp,gt,Qs,oy,Ri,ny,Jp,vt,Po,Xi,Rs,sy,Ui,ay,Kp,yt,Xs,ry,Yi,dy,Zp,Tt,Bo,Gi,Us,iy,Ji,uy,ec,bt,Ys,ly,Ki,py,tc,wt,Lo,Zi,Gs,cy,eu,hy,oc,xt,Js,fy,tu,my,nc,$t,Wo,ou,Ks,_y,nu,gy,sc,Ot,Zs,vy,su,yy,ac,qt,jo,au,ea,Ty,ru,by,rc,Ft,ta,wy,du,xy,dc,St,Do,iu,oa,$y,uu,Oy,ic,Mt,na,qy,lu,Fy,uc,kt,Ho,pu,sa,Sy,cu,My,lc,At,aa,ky,hu,Ay,pc,Ct,Io,fu,ra,Cy,mu,Ey,cc,Et,da,Ny,_u,zy,hc,Nt,Vo,gu,ia,Py,vu,By,fc,E,ua,Ly,yu,Wy,jy,Qo,la,Dy,Tu,Hy,mc,zt,Ro,bu,pa,Iy,wu,Vy,_c,N,ca,Qy,xu,Ry,Xy,Xo,ha,Uy,$u,Yy,gc,Pt,Uo,Ou,fa,Gy,qu,Jy,vc,z,ma,Ky,Fu,Zy,eT,Yo,_a,tT,Su,oT,yc,Bt,Go,Mu,ga,nT,ku,sT,Tc,P,va,aT,Au,rT,dT,Jo,ya,iT,Cu,uT,bc,Lt,Ko,Eu,Ta,lT,Nu,pT,wc,B,ba,cT,zu,hT,fT,Zo,wa,mT,Pu,_T,xc,Wt,en,Bu,xa,gT,Lu,vT,$c,L,$a,yT,Wu,TT,bT,tn,Oa,wT,ju,xT,Oc,jt,on,Du,qa,$T,Hu,OT,qc,W,Fa,qT,Iu,FT,ST,nn,Sa,MT,Vu,kT,Fc,Dt,sn,Qu,Ma,AT,Ru,CT,Sc,j,ka,ET,Xu,NT,zT,an,Aa,PT,Uu,BT,Mc,Ht,rn,Yu,Ca,LT,Gu,WT,kc,D,Ea,jT,Ju,DT,HT,dn,Na,IT,Ku,VT,Ac,It,un,Zu,za,QT,el,RT,Cc,H,Pa,XT,tl,UT,YT,ln,Ba,GT,ol,JT,Ec,Vt,pn,nl,La,KT,sl,ZT,Nc,I,Wa,e2,al,t2,o2,cn,ja,n2,rl,s2,zc,Qt,hn,dl,Da,a2,il,r2,Pc,V,Ha,d2,ul,i2,u2,fn,Ia,l2,ll,p2,Bc,Rt,mn,pl,Va,c2,cl,h2,Lc,Q,Qa,f2,hl,m2,_2,_n,Ra,g2,fl,v2,Wc,Xt,gn,ml,Xa,y2,_l,T2,jc,R,Ua,b2,gl,w2,x2,vn,Ya,$2,vl,O2,Dc,Ut,yn,yl,Ga,q2,Tl,F2,Hc,X,Ja,S2,bl,M2,k2,Tn,Ka,A2,wl,C2,Ic;return O=new T({}),xn=new X$({props:{code:`from transformers import BertTokenizer, BertForSequenceClassification import torch tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") model = BertForSequenceClassification.from_pretrained("bert-base-uncased") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels)`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, BertForSequenceClassification <span class="hljs-keyword">import</span> torch tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) model = BertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> outputs = model(**inputs, labels=labels)`}}),$n=new X$({props:{code:"outputs[:2]",highlighted:'outputs[:<span class="hljs-number">2</span>]'}}),On=new T({}),qn=new y({props:{name:"class transformers.utils.ModelOutput",anchor:"transformers.utils.ModelOutput",parameters:"",source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/generic.py#L148"}}),Kt=new Z$({props:{warning:!0,$$slots:{default:[eO]},$$scope:{ctx:xl}}}),Fn=new y({props:{name:"to_tuple",anchor:"transformers.utils.ModelOutput.to_tuple",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/generic.py#L237"}}),Mn=new T({}),kn=new y({props:{name:"class transformers.modeling_outputs.BaseModelOutput",anchor:"transformers.modeling_outputs.BaseModelOutput",parameters:[{name:"last_hidden_state",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_outputs.BaseModelOutput.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.`,name:"last_hidden_state"},{anchor:"transformers.modeling_outputs.BaseModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_outputs.BaseModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L24"}}),An=new T({}),Cn=new y({props:{name:"class transformers.modeling_outputs.BaseModelOutputWithPooling",anchor:"transformers.modeling_outputs.BaseModelOutputWithPooling",parameters:[{name:"last_hidden_state",val:": FloatTensor = None"},{name:"pooler_output",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_outputs.BaseModelOutputWithPooling.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.`,name:"last_hidden_state"},{anchor:"transformers.modeling_outputs.BaseModelOutputWithPooling.pooler_output",description:`<strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) &#x2014; Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.`,name:"pooler_output"},{anchor:"transformers.modeling_outputs.BaseModelOutputWithPooling.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_outputs.BaseModelOutputWithPooling.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L69"}}),En=new T({}),Nn=new y({props:{name:"class transformers.modeling_outputs.BaseModelOutputWithCrossAttentions",anchor:"transformers.modeling_outputs.BaseModelOutputWithCrossAttentions",parameters:[{name:"last_hidden_state",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_outputs.BaseModelOutputWithCrossAttentions.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.`,name:"last_hidden_state"},{anchor:"transformers.modeling_outputs.BaseModelOutputWithCrossAttentions.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_outputs.BaseModelOutputWithCrossAttentions.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"},{anchor:"transformers.modeling_outputs.BaseModelOutputWithCrossAttentions.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L162"}}),zn=new T({}),Pn=new y({props:{name:"class transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions",anchor:"transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions",parameters:[{name:"last_hidden_state",val:": FloatTensor = None"},{name:"pooler_output",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"past_key_values",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.`,name:"last_hidden_state"},{anchor:"transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions.pooler_output",description:`<strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) &#x2014; Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.`,name:"pooler_output"},{anchor:"transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"},{anchor:"transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"},{anchor:"transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L195"}}),Bn=new T({}),Ln=new y({props:{name:"class transformers.modeling_outputs.BaseModelOutputWithPast",anchor:"transformers.modeling_outputs.BaseModelOutputWithPast",parameters:[{name:"last_hidden_state",val:": FloatTensor = None"},{name:"past_key_values",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_outputs.BaseModelOutputWithPast.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.`,name:"last_hidden_state"},{anchor:"transformers.modeling_outputs.BaseModelOutputWithPast.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.modeling_outputs.BaseModelOutputWithPast.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_outputs.BaseModelOutputWithPast.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L123"}}),Wn=new T({}),jn=new y({props:{name:"class transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions",anchor:"transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions",parameters:[{name:"last_hidden_state",val:": FloatTensor = None"},{name:"past_key_values",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.`,name:"last_hidden_state"},{anchor:"transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"},{anchor:"transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L244"}}),Dn=new T({}),Hn=new y({props:{name:"class transformers.modeling_outputs.Seq2SeqModelOutput",anchor:"transformers.modeling_outputs.Seq2SeqModelOutput",parameters:[{name:"last_hidden_state",val:": FloatTensor = None"},{name:"past_key_values",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_last_hidden_state",val:": typing.Optional[torch.FloatTensor] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_outputs.Seq2SeqModelOutput.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.`,name:"last_hidden_state"},{anchor:"transformers.modeling_outputs.Seq2SeqModelOutput.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.modeling_outputs.Seq2SeqModelOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs.`,name:"decoder_hidden_states"},{anchor:"transformers.modeling_outputs.Seq2SeqModelOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"decoder_attentions"},{anchor:"transformers.modeling_outputs.Seq2SeqModelOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"},{anchor:"transformers.modeling_outputs.Seq2SeqModelOutput.encoder_last_hidden_state",description:`<strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.`,name:"encoder_last_hidden_state"},{anchor:"transformers.modeling_outputs.Seq2SeqModelOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs.`,name:"encoder_hidden_states"},{anchor:"transformers.modeling_outputs.Seq2SeqModelOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"encoder_attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L290"}}),In=new T({}),Vn=new y({props:{name:"class transformers.modeling_outputs.CausalLMOutput",anchor:"transformers.modeling_outputs.CausalLMOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_outputs.CausalLMOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Language modeling loss (for next-token prediction).`,name:"loss"},{anchor:"transformers.modeling_outputs.CausalLMOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_outputs.CausalLMOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_outputs.CausalLMOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L351"}}),Qn=new T({}),Rn=new y({props:{name:"class transformers.modeling_outputs.CausalLMOutputWithCrossAttentions",anchor:"transformers.modeling_outputs.CausalLMOutputWithCrossAttentions",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"past_key_values",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_outputs.CausalLMOutputWithCrossAttentions.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Language modeling loss (for next-token prediction).`,name:"loss"},{anchor:"transformers.modeling_outputs.CausalLMOutputWithCrossAttentions.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_outputs.CausalLMOutputWithCrossAttentions.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_outputs.CausalLMOutputWithCrossAttentions.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"},{anchor:"transformers.modeling_outputs.CausalLMOutputWithCrossAttentions.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"},{anchor:"transformers.modeling_outputs.CausalLMOutputWithCrossAttentions.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L416"}}),Xn=new T({}),Un=new y({props:{name:"class transformers.modeling_outputs.CausalLMOutputWithPast",anchor:"transformers.modeling_outputs.CausalLMOutputWithPast",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"past_key_values",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_outputs.CausalLMOutputWithPast.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Language modeling loss (for next-token prediction).`,name:"loss"},{anchor:"transformers.modeling_outputs.CausalLMOutputWithPast.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_outputs.CausalLMOutputWithPast.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>)</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.modeling_outputs.CausalLMOutputWithPast.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_outputs.CausalLMOutputWithPast.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L380"}}),Yn=new T({}),Gn=new y({props:{name:"class transformers.modeling_outputs.MaskedLMOutput",anchor:"transformers.modeling_outputs.MaskedLMOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_outputs.MaskedLMOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Masked language modeling (MLM) loss.`,name:"loss"},{anchor:"transformers.modeling_outputs.MaskedLMOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_outputs.MaskedLMOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_outputs.MaskedLMOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L496"}}),Jn=new T({}),Kn=new y({props:{name:"class transformers.modeling_outputs.Seq2SeqLMOutput",anchor:"transformers.modeling_outputs.Seq2SeqLMOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"past_key_values",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_last_hidden_state",val:": typing.Optional[torch.FloatTensor] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_outputs.Seq2SeqLMOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Language modeling loss.`,name:"loss"},{anchor:"transformers.modeling_outputs.Seq2SeqLMOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_outputs.Seq2SeqLMOutput.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.modeling_outputs.Seq2SeqLMOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.`,name:"decoder_hidden_states"},{anchor:"transformers.modeling_outputs.Seq2SeqLMOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"decoder_attentions"},{anchor:"transformers.modeling_outputs.Seq2SeqLMOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"},{anchor:"transformers.modeling_outputs.Seq2SeqLMOutput.encoder_last_hidden_state",description:`<strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.`,name:"encoder_last_hidden_state"},{anchor:"transformers.modeling_outputs.Seq2SeqLMOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.`,name:"encoder_hidden_states"},{anchor:"transformers.modeling_outputs.Seq2SeqLMOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"encoder_attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L525"}}),Zn=new T({}),es=new y({props:{name:"class transformers.modeling_outputs.NextSentencePredictorOutput",anchor:"transformers.modeling_outputs.NextSentencePredictorOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_outputs.NextSentencePredictorOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>next_sentence_label</code> is provided) &#x2014; Next sequence prediction (classification) loss.`,name:"loss"},{anchor:"transformers.modeling_outputs.NextSentencePredictorOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2)</code>) &#x2014; Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_outputs.NextSentencePredictorOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_outputs.NextSentencePredictorOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L585"}}),ts=new T({}),os=new y({props:{name:"class transformers.modeling_outputs.SequenceClassifierOutput",anchor:"transformers.modeling_outputs.SequenceClassifierOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_outputs.SequenceClassifierOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification (or regression if config.num_labels==1) loss.`,name:"loss"},{anchor:"transformers.modeling_outputs.SequenceClassifierOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_outputs.SequenceClassifierOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_outputs.SequenceClassifierOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L615"}}),ns=new T({}),ss=new y({props:{name:"class transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput",anchor:"transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"past_key_values",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_last_hidden_state",val:": typing.Optional[torch.FloatTensor] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>label</code> is provided) &#x2014; Classification (or regression if config.num_labels==1) loss.`,name:"loss"},{anchor:"transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.`,name:"decoder_hidden_states"},{anchor:"transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"decoder_attentions"},{anchor:"transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"},{anchor:"transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.encoder_last_hidden_state",description:`<strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.`,name:"encoder_last_hidden_state"},{anchor:"transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.`,name:"encoder_hidden_states"},{anchor:"transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"encoder_attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L644"}}),as=new T({}),rs=new y({props:{name:"class transformers.modeling_outputs.MultipleChoiceModelOutput",anchor:"transformers.modeling_outputs.MultipleChoiceModelOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_outputs.MultipleChoiceModelOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification loss.`,name:"loss"},{anchor:"transformers.modeling_outputs.MultipleChoiceModelOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) &#x2014; <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_outputs.MultipleChoiceModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_outputs.MultipleChoiceModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L704"}}),ds=new T({}),is=new y({props:{name:"class transformers.modeling_outputs.TokenClassifierOutput",anchor:"transformers.modeling_outputs.TokenClassifierOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_outputs.TokenClassifierOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification loss.`,name:"loss"},{anchor:"transformers.modeling_outputs.TokenClassifierOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) &#x2014; Classification scores (before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_outputs.TokenClassifierOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_outputs.TokenClassifierOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L735"}}),us=new T({}),ls=new y({props:{name:"class transformers.modeling_outputs.QuestionAnsweringModelOutput",anchor:"transformers.modeling_outputs.QuestionAnsweringModelOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"start_logits",val:": FloatTensor = None"},{name:"end_logits",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_outputs.QuestionAnsweringModelOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.`,name:"loss"},{anchor:"transformers.modeling_outputs.QuestionAnsweringModelOutput.start_logits",description:`<strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-start scores (before SoftMax).`,name:"start_logits"},{anchor:"transformers.modeling_outputs.QuestionAnsweringModelOutput.end_logits",description:`<strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-end scores (before SoftMax).`,name:"end_logits"},{anchor:"transformers.modeling_outputs.QuestionAnsweringModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_outputs.QuestionAnsweringModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L764"}}),ps=new T({}),cs=new y({props:{name:"class transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput",anchor:"transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"start_logits",val:": FloatTensor = None"},{name:"end_logits",val:": FloatTensor = None"},{name:"past_key_values",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_last_hidden_state",val:": typing.Optional[torch.FloatTensor] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.`,name:"loss"},{anchor:"transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.start_logits",description:`<strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-start scores (before SoftMax).`,name:"start_logits"},{anchor:"transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.end_logits",description:`<strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-end scores (before SoftMax).`,name:"end_logits"},{anchor:"transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.`,name:"decoder_hidden_states"},{anchor:"transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"decoder_attentions"},{anchor:"transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"},{anchor:"transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.encoder_last_hidden_state",description:`<strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.`,name:"encoder_last_hidden_state"},{anchor:"transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.`,name:"encoder_hidden_states"},{anchor:"transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"encoder_attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L796"}}),hs=new T({}),fs=new y({props:{name:"class transformers.modeling_outputs.SemanticSegmenterOutput",anchor:"transformers.modeling_outputs.SemanticSegmenterOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_outputs.SemanticSegmenterOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification (or regression if config.num_labels==1) loss.`,name:"loss"},{anchor:"transformers.modeling_outputs.SemanticSegmenterOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels, logits_height, logits_width)</code>) &#x2014; Classification scores for each pixel.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>The logits returned do not necessarily have the same size as the <code>pixel_values</code> passed as inputs. This is to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the original image size as post-processing. You should always check your logits shape and resize as needed.</p> </div>`,name:"logits"},{anchor:"transformers.modeling_outputs.SemanticSegmenterOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, patch_size, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_outputs.SemanticSegmenterOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, patch_size, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L859"}}),ms=new T({}),_s=new y({props:{name:"class transformers.modeling_outputs.ImageClassifierOutput",anchor:"transformers.modeling_outputs.ImageClassifierOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_outputs.ImageClassifierOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification (or regression if config.num_labels==1) loss.`,name:"loss"},{anchor:"transformers.modeling_outputs.ImageClassifierOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_outputs.ImageClassifierOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each stage) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states (also called feature maps) of the model at the output of each stage.`,name:"hidden_states"},{anchor:"transformers.modeling_outputs.ImageClassifierOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, patch_size, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L897"}}),gs=new T({}),vs=new y({props:{name:"class transformers.modeling_outputs.ImageClassifierOutputWithNoAttention",anchor:"transformers.modeling_outputs.ImageClassifierOutputWithNoAttention",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_outputs.ImageClassifierOutputWithNoAttention.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification (or regression if config.num_labels==1) loss.`,name:"loss"},{anchor:"transformers.modeling_outputs.ImageClassifierOutputWithNoAttention.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_outputs.ImageClassifierOutputWithNoAttention.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each stage) of shape <code>(batch_size, num_channels, height, width)</code>. Hidden-states (also called feature maps) of the model at the output of each stage.`,name:"hidden_states"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L925"}}),ys=new T({}),Ts=new y({props:{name:"class transformers.modeling_outputs.DepthEstimatorOutput",anchor:"transformers.modeling_outputs.DepthEstimatorOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"predicted_depth",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_outputs.DepthEstimatorOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification (or regression if config.num_labels==1) loss.`,name:"loss"},{anchor:"transformers.modeling_outputs.DepthEstimatorOutput.predicted_depth",description:`<strong>predicted_depth</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, height, width)</code>) &#x2014; Predicted depth for each pixel.`,name:"predicted_depth"},{anchor:"transformers.modeling_outputs.DepthEstimatorOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, num_channels, height, width)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_outputs.DepthEstimatorOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, patch_size, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L946"}}),bs=new T({}),ws=new y({props:{name:"class transformers.modeling_outputs.Wav2Vec2BaseModelOutput",anchor:"transformers.modeling_outputs.Wav2Vec2BaseModelOutput",parameters:[{name:"last_hidden_state",val:": FloatTensor = None"},{name:"extract_features",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_outputs.Wav2Vec2BaseModelOutput.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.`,name:"last_hidden_state"},{anchor:"transformers.modeling_outputs.Wav2Vec2BaseModelOutput.extract_features",description:`<strong>extract_features</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, conv_dim[-1])</code>) &#x2014; Sequence of extracted feature vectors of the last convolutional layer of the model.`,name:"extract_features"},{anchor:"transformers.modeling_outputs.Wav2Vec2BaseModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_outputs.Wav2Vec2BaseModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L976"}}),xs=new T({}),$s=new y({props:{name:"class transformers.modeling_outputs.XVectorOutput",anchor:"transformers.modeling_outputs.XVectorOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"logits",val:": FloatTensor = None"},{name:"embeddings",val:": FloatTensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_outputs.XVectorOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification loss.`,name:"loss"},{anchor:"transformers.modeling_outputs.XVectorOutput.logits",description:`<strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.xvector_output_dim)</code>) &#x2014; Classification hidden states before AMSoftmax.`,name:"logits"},{anchor:"transformers.modeling_outputs.XVectorOutput.embeddings",description:`<strong>embeddings</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.xvector_output_dim)</code>) &#x2014; Utterance embeddings used for vector similarity-based retrieval.`,name:"embeddings"},{anchor:"transformers.modeling_outputs.XVectorOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_outputs.XVectorOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L1005"}}),qs=new T({}),Fs=new y({props:{name:"class transformers.modeling_tf_outputs.TFBaseModelOutput",anchor:"transformers.modeling_tf_outputs.TFBaseModelOutput",parameters:[{name:"last_hidden_state",val:": Tensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_tf_outputs.TFBaseModelOutput.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.`,name:"last_hidden_state"},{anchor:"transformers.modeling_tf_outputs.TFBaseModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_tf_outputs.TFBaseModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L24"}}),Ss=new T({}),Ms=new y({props:{name:"class transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling",anchor:"transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling",parameters:[{name:"last_hidden_state",val:": Tensor = None"},{name:"pooler_output",val:": Tensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.`,name:"last_hidden_state"},{anchor:"transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling.pooler_output",description:`<strong>pooler_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, hidden_size)</code>) &#x2014; Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> <p>This output is usually <em>not</em> a good summary of the semantic content of the input, you&#x2019;re often better with averaging or pooling the sequence of hidden-states for the whole input sequence.`,name:"pooler_output"},{anchor:"transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L69"}}),ks=new T({}),As=new y({props:{name:"class transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions",anchor:"transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions",parameters:[{name:"last_hidden_state",val:": Tensor = None"},{name:"pooler_output",val:": Tensor = None"},{name:"past_key_values",val:": typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.`,name:"last_hidden_state"},{anchor:"transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions.pooler_output",description:`<strong>pooler_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, hidden_size)</code>) &#x2014; Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> <p>This output is usually <em>not</em> a good summary of the semantic content of the input, you&#x2019;re often better with averaging or pooling the sequence of hidden-states for the whole input sequence.`,name:"pooler_output"},{anchor:"transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions.past_key_values",description:`<strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"},{anchor:"transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L125"}}),Cs=new T({}),Es=new y({props:{name:"class transformers.modeling_tf_outputs.TFBaseModelOutputWithPast",anchor:"transformers.modeling_tf_outputs.TFBaseModelOutputWithPast",parameters:[{name:"last_hidden_state",val:": Tensor = None"},{name:"past_key_values",val:": typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_tf_outputs.TFBaseModelOutputWithPast.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.`,name:"last_hidden_state"},{anchor:"transformers.modeling_tf_outputs.TFBaseModelOutputWithPast.past_key_values",description:`<strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.modeling_tf_outputs.TFBaseModelOutputWithPast.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_tf_outputs.TFBaseModelOutputWithPast.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L173"}}),Ns=new T({}),zs=new y({props:{name:"class transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions",anchor:"transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions",parameters:[{name:"last_hidden_state",val:": Tensor = None"},{name:"past_key_values",val:": typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.`,name:"last_hidden_state"},{anchor:"transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions.past_key_values",description:`<strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"},{anchor:"transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L242"}}),Ps=new T({}),Bs=new y({props:{name:"class transformers.modeling_tf_outputs.TFSeq2SeqModelOutput",anchor:"transformers.modeling_tf_outputs.TFSeq2SeqModelOutput",parameters:[{name:"last_hidden_state",val:": Tensor = None"},{name:"past_key_values",val:": typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"encoder_last_hidden_state",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.`,name:"last_hidden_state"},{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.past_key_values",description:`<strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.`,name:"decoder_hidden_states"},{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"decoder_attentions"},{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"},{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.encoder_last_hidden_state",description:`<strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.`,name:"encoder_last_hidden_state"},{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.`,name:"encoder_hidden_states"},{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"encoder_attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L285"}}),Ls=new T({}),Ws=new y({props:{name:"class transformers.modeling_tf_outputs.TFCausalLMOutput",anchor:"transformers.modeling_tf_outputs.TFCausalLMOutput",parameters:[{name:"loss",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"logits",val:": Tensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_tf_outputs.TFCausalLMOutput.loss",description:`<strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) &#x2014; Language modeling loss (for next-token prediction).`,name:"loss"},{anchor:"transformers.modeling_tf_outputs.TFCausalLMOutput.logits",description:`<strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_tf_outputs.TFCausalLMOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_tf_outputs.TFCausalLMOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L345"}}),js=new T({}),Ds=new y({props:{name:"class transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions",anchor:"transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions",parameters:[{name:"loss",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"logits",val:": Tensor = None"},{name:"past_key_values",val:": typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions.loss",description:`<strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) &#x2014; Language modeling loss (for next-token prediction).`,name:"loss"},{anchor:"transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions.logits",description:`<strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"},{anchor:"transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"},{anchor:"transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions.past_key_values",description:`<strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L410"}}),Hs=new T({}),Is=new y({props:{name:"class transformers.modeling_tf_outputs.TFCausalLMOutputWithPast",anchor:"transformers.modeling_tf_outputs.TFCausalLMOutputWithPast",parameters:[{name:"loss",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"logits",val:": Tensor = None"},{name:"past_key_values",val:": typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_tf_outputs.TFCausalLMOutputWithPast.loss",description:`<strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) &#x2014; Language modeling loss (for next-token prediction).`,name:"loss"},{anchor:"transformers.modeling_tf_outputs.TFCausalLMOutputWithPast.logits",description:`<strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_tf_outputs.TFCausalLMOutputWithPast.past_key_values",description:`<strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.modeling_tf_outputs.TFCausalLMOutputWithPast.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_tf_outputs.TFCausalLMOutputWithPast.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L374"}}),Vs=new T({}),Qs=new y({props:{name:"class transformers.modeling_tf_outputs.TFMaskedLMOutput",anchor:"transformers.modeling_tf_outputs.TFMaskedLMOutput",parameters:[{name:"loss",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"logits",val:": Tensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_tf_outputs.TFMaskedLMOutput.loss",description:`<strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) &#x2014; Masked language modeling (MLM) loss.`,name:"loss"},{anchor:"transformers.modeling_tf_outputs.TFMaskedLMOutput.logits",description:`<strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_tf_outputs.TFMaskedLMOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_tf_outputs.TFMaskedLMOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L453"}}),Rs=new T({}),Xs=new y({props:{name:"class transformers.modeling_tf_outputs.TFSeq2SeqLMOutput",anchor:"transformers.modeling_tf_outputs.TFSeq2SeqLMOutput",parameters:[{name:"loss",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"logits",val:": Tensor = None"},{name:"past_key_values",val:": typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"encoder_last_hidden_state",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.loss",description:`<strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) &#x2014; Language modeling loss.`,name:"loss"},{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.logits",description:`<strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.past_key_values",description:`<strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.`,name:"decoder_hidden_states"},{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"decoder_attentions"},{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"},{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.encoder_last_hidden_state",description:`<strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.`,name:"encoder_last_hidden_state"},{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.`,name:"encoder_hidden_states"},{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"encoder_attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L482"}}),Us=new T({}),Ys=new y({props:{name:"class transformers.modeling_tf_outputs.TFNextSentencePredictorOutput",anchor:"transformers.modeling_tf_outputs.TFNextSentencePredictorOutput",parameters:[{name:"loss",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"logits",val:": Tensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_tf_outputs.TFNextSentencePredictorOutput.loss",description:`<strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>next_sentence_label</code> is provided) &#x2014; Next sentence prediction loss.`,name:"loss"},{anchor:"transformers.modeling_tf_outputs.TFNextSentencePredictorOutput.logits",description:`<strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, 2)</code>) &#x2014; Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_tf_outputs.TFNextSentencePredictorOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_tf_outputs.TFNextSentencePredictorOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L541"}}),Gs=new T({}),Js=new y({props:{name:"class transformers.modeling_tf_outputs.TFSequenceClassifierOutput",anchor:"transformers.modeling_tf_outputs.TFSequenceClassifierOutput",parameters:[{name:"loss",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"logits",val:": Tensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_tf_outputs.TFSequenceClassifierOutput.loss",description:`<strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification (or regression if config.num_labels==1) loss.`,name:"loss"},{anchor:"transformers.modeling_tf_outputs.TFSequenceClassifierOutput.logits",description:`<strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_tf_outputs.TFSequenceClassifierOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_tf_outputs.TFSequenceClassifierOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L571"}}),Ks=new T({}),Zs=new y({props:{name:"class transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput",anchor:"transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput",parameters:[{name:"loss",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"logits",val:": Tensor = None"},{name:"past_key_values",val:": typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"encoder_last_hidden_state",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.loss",description:`<strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>label</code> is provided) &#x2014; Classification (or regression if config.num_labels==1) loss.`,name:"loss"},{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.logits",description:`<strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.past_key_values",description:`<strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.`,name:"decoder_hidden_states"},{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"decoder_attentions"},{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.encoder_last_hidden_state",description:`<strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.`,name:"encoder_last_hidden_state"},{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.`,name:"encoder_hidden_states"},{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"encoder_attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L600"}}),ea=new T({}),ta=new y({props:{name:"class transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput",anchor:"transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput",parameters:[{name:"loss",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"logits",val:": Tensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput.loss",description:`<strong>loss</strong> (<code>tf.Tensor</code> of shape <em>(batch_size, )</em>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification loss.`,name:"loss"},{anchor:"transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput.logits",description:`<strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices)</code>) &#x2014; <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L747"}}),oa=new T({}),na=new y({props:{name:"class transformers.modeling_tf_outputs.TFTokenClassifierOutput",anchor:"transformers.modeling_tf_outputs.TFTokenClassifierOutput",parameters:[{name:"loss",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"logits",val:": Tensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_tf_outputs.TFTokenClassifierOutput.loss",description:`<strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of unmasked labels, returned when <code>labels</code> is provided) &#x2014; Classification loss.`,name:"loss"},{anchor:"transformers.modeling_tf_outputs.TFTokenClassifierOutput.logits",description:`<strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) &#x2014; Classification scores (before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_tf_outputs.TFTokenClassifierOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_tf_outputs.TFTokenClassifierOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L778"}}),sa=new T({}),aa=new y({props:{name:"class transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput",anchor:"transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput",parameters:[{name:"loss",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"start_logits",val:": Tensor = None"},{name:"end_logits",val:": Tensor = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput.loss",description:`<strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>start_positions</code> and <code>end_positions</code> are provided) &#x2014; Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.`,name:"loss"},{anchor:"transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput.start_logits",description:`<strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-start scores (before SoftMax).`,name:"start_logits"},{anchor:"transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput.end_logits",description:`<strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-end scores (before SoftMax).`,name:"end_logits"},{anchor:"transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L807"}}),ra=new T({}),da=new y({props:{name:"class transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput",anchor:"transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput",parameters:[{name:"loss",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"start_logits",val:": Tensor = None"},{name:"end_logits",val:": Tensor = None"},{name:"past_key_values",val:": typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"encoder_last_hidden_state",val:": typing.Optional[tensorflow.python.framework.ops.Tensor] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None"}],parametersDescription:[{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.loss",description:`<strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.`,name:"loss"},{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.start_logits",description:`<strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-start scores (before SoftMax).`,name:"start_logits"},{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.end_logits",description:`<strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-end scores (before SoftMax).`,name:"end_logits"},{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.past_key_values",description:`<strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.`,name:"decoder_hidden_states"},{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"decoder_attentions"},{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.encoder_last_hidden_state",description:`<strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.`,name:"encoder_last_hidden_state"},{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.`,name:"encoder_hidden_states"},{anchor:"transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"encoder_attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L839"}}),ia=new T({}),ua=new y({props:{name:"class transformers.modeling_flax_outputs.FlaxBaseModelOutput",anchor:"transformers.modeling_flax_outputs.FlaxBaseModelOutput",parameters:[{name:"last_hidden_state",val:": ndarray = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"}],parametersDescription:[{anchor:"transformers.modeling_flax_outputs.FlaxBaseModelOutput.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.`,name:"last_hidden_state"},{anchor:"transformers.modeling_flax_outputs.FlaxBaseModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_flax_outputs.FlaxBaseModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_outputs.py#L23"}}),la=new y({props:{name:"replace",anchor:"transformers.modeling_flax_outputs.FlaxBaseModelOutput.replace",parameters:[{name:"**updates",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108"}}),pa=new T({}),ca=new y({props:{name:"class transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast",anchor:"transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast",parameters:[{name:"last_hidden_state",val:": ndarray = None"},{name:"past_key_values",val:": typing.Union[typing.Dict[str, jax._src.numpy.ndarray.ndarray], NoneType] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"}],parametersDescription:[{anchor:"transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.`,name:"last_hidden_state"},{anchor:"transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast.past_key_values",description:`<strong>past_key_values</strong> (<code>Dict[str, jnp.ndarray]</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.`,name:"past_key_values"},{anchor:"transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast.attentions",description:`<strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_outputs.py#L49"}}),ha=new y({props:{name:"replace",anchor:"transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast.replace",parameters:[{name:"**updates",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108"}}),fa=new T({}),ma=new y({props:{name:"class transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling",anchor:"transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling",parameters:[{name:"last_hidden_state",val:": ndarray = None"},{name:"pooler_output",val:": ndarray = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"}],parametersDescription:[{anchor:"transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.`,name:"last_hidden_state"},{anchor:"transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling.pooler_output",description:`<strong>pooler_output</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, hidden_size)</code>) &#x2014; Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.`,name:"pooler_output"},{anchor:"transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling.attentions",description:`<strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_outputs.py#L79"}}),_a=new y({props:{name:"replace",anchor:"transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling.replace",parameters:[{name:"**updates",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108"}}),ga=new T({}),va=new y({props:{name:"class transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions",anchor:"transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions",parameters:[{name:"last_hidden_state",val:": ndarray = None"},{name:"past_key_values",val:": typing.Optional[typing.Tuple[typing.Tuple[jax._src.numpy.ndarray.ndarray]]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"}],parametersDescription:[{anchor:"transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.`,name:"last_hidden_state"},{anchor:"transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions.attentions",description:`<strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"},{anchor:"transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_outputs.py#L159"}}),ya=new y({props:{name:"replace",anchor:"transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions.replace",parameters:[{name:"**updates",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108"}}),Ta=new T({}),ba=new y({props:{name:"class transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput",anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput",parameters:[{name:"last_hidden_state",val:": ndarray = None"},{name:"past_key_values",val:": typing.Optional[typing.Tuple[typing.Tuple[jax._src.numpy.ndarray.ndarray]]] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"},{name:"encoder_last_hidden_state",val:": typing.Optional[jax._src.numpy.ndarray.ndarray] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"}],parametersDescription:[{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.last_hidden_state",description:`<strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.`,name:"last_hidden_state"},{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.`,name:"decoder_hidden_states"},{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"decoder_attentions"},{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"},{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.encoder_last_hidden_state",description:`<strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.`,name:"encoder_last_hidden_state"},{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.`,name:"encoder_hidden_states"},{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"encoder_attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_outputs.py#L205"}}),wa=new y({props:{name:"replace",anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.replace",parameters:[{name:"**updates",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108"}}),xa=new T({}),$a=new y({props:{name:"class transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions",anchor:"transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions",parameters:[{name:"logits",val:": ndarray = None"},{name:"past_key_values",val:": typing.Optional[typing.Tuple[typing.Tuple[jax._src.numpy.ndarray.ndarray]]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"}],parametersDescription:[{anchor:"transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions.logits",description:`<strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions.attentions",description:`<strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"},{anchor:"transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"},{anchor:"transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_outputs.py#L266"}}),Oa=new y({props:{name:"replace",anchor:"transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions.replace",parameters:[{name:"**updates",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108"}}),qa=new T({}),Fa=new y({props:{name:"class transformers.modeling_flax_outputs.FlaxMaskedLMOutput",anchor:"transformers.modeling_flax_outputs.FlaxMaskedLMOutput",parameters:[{name:"logits",val:": ndarray = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"}],parametersDescription:[{anchor:"transformers.modeling_flax_outputs.FlaxMaskedLMOutput.logits",description:`<strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_flax_outputs.FlaxMaskedLMOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_flax_outputs.FlaxMaskedLMOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_outputs.py#L307"}}),Sa=new y({props:{name:"replace",anchor:"transformers.modeling_flax_outputs.FlaxMaskedLMOutput.replace",parameters:[{name:"**updates",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108"}}),Ma=new T({}),ka=new y({props:{name:"class transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput",anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput",parameters:[{name:"logits",val:": ndarray = None"},{name:"past_key_values",val:": typing.Optional[typing.Tuple[typing.Tuple[jax._src.numpy.ndarray.ndarray]]] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"},{name:"encoder_last_hidden_state",val:": typing.Optional[jax._src.numpy.ndarray.ndarray] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"}],parametersDescription:[{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.logits",description:`<strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.`,name:"decoder_hidden_states"},{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"decoder_attentions"},{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"},{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.encoder_last_hidden_state",description:`<strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.`,name:"encoder_last_hidden_state"},{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.`,name:"encoder_hidden_states"},{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"encoder_attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_outputs.py#L336"}}),Aa=new y({props:{name:"replace",anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.replace",parameters:[{name:"**updates",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108"}}),Ca=new T({}),Ea=new y({props:{name:"class transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput",anchor:"transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput",parameters:[{name:"logits",val:": ndarray = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"}],parametersDescription:[{anchor:"transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput.logits",description:`<strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, 2)</code>) &#x2014; Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_outputs.py#L393"}}),Na=new y({props:{name:"replace",anchor:"transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput.replace",parameters:[{name:"**updates",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108"}}),za=new T({}),Pa=new y({props:{name:"class transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput",anchor:"transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput",parameters:[{name:"logits",val:": ndarray = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"}],parametersDescription:[{anchor:"transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput.logits",description:`<strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_outputs.py#L420"}}),Ba=new y({props:{name:"replace",anchor:"transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput.replace",parameters:[{name:"**updates",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108"}}),La=new T({}),Wa=new y({props:{name:"class transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput",anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput",parameters:[{name:"logits",val:": ndarray = None"},{name:"past_key_values",val:": typing.Optional[typing.Tuple[typing.Tuple[jax._src.numpy.ndarray.ndarray]]] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"},{name:"encoder_last_hidden_state",val:": typing.Optional[jax._src.numpy.ndarray.ndarray] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"}],parametersDescription:[{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.logits",description:`<strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.`,name:"decoder_hidden_states"},{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"decoder_attentions"},{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"},{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.encoder_last_hidden_state",description:`<strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.`,name:"encoder_last_hidden_state"},{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.`,name:"encoder_hidden_states"},{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"encoder_attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_outputs.py#L446"}}),ja=new y({props:{name:"replace",anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.replace",parameters:[{name:"**updates",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108"}}),Da=new T({}),Ha=new y({props:{name:"class transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput",anchor:"transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput",parameters:[{name:"logits",val:": ndarray = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"}],parametersDescription:[{anchor:"transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput.logits",description:`<strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, num_choices)</code>) &#x2014; <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_outputs.py#L503"}}),Ia=new y({props:{name:"replace",anchor:"transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput.replace",parameters:[{name:"**updates",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108"}}),Va=new T({}),Qa=new y({props:{name:"class transformers.modeling_flax_outputs.FlaxTokenClassifierOutput",anchor:"transformers.modeling_flax_outputs.FlaxTokenClassifierOutput",parameters:[{name:"logits",val:": ndarray = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"}],parametersDescription:[{anchor:"transformers.modeling_flax_outputs.FlaxTokenClassifierOutput.logits",description:`<strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) &#x2014; Classification scores (before SoftMax).`,name:"logits"},{anchor:"transformers.modeling_flax_outputs.FlaxTokenClassifierOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_flax_outputs.FlaxTokenClassifierOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_outputs.py#L531"}}),Ra=new y({props:{name:"replace",anchor:"transformers.modeling_flax_outputs.FlaxTokenClassifierOutput.replace",parameters:[{name:"**updates",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108"}}),Xa=new T({}),Ua=new y({props:{name:"class transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput",anchor:"transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput",parameters:[{name:"start_logits",val:": ndarray = None"},{name:"end_logits",val:": ndarray = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"}],parametersDescription:[{anchor:"transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput.start_logits",description:`<strong>start_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-start scores (before SoftMax).`,name:"start_logits"},{anchor:"transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput.end_logits",description:`<strong>end_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-end scores (before SoftMax).`,name:"end_logits"},{anchor:"transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.`,name:"hidden_states"},{anchor:"transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_outputs.py#L557"}}),Ya=new y({props:{name:"replace",anchor:"transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput.replace",parameters:[{name:"**updates",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108"}}),Ga=new T({}),Ja=new y({props:{name:"class transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput",anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput",parameters:[{name:"start_logits",val:": ndarray = None"},{name:"end_logits",val:": ndarray = None"},{name:"past_key_values",val:": typing.Optional[typing.Tuple[typing.Tuple[jax._src.numpy.ndarray.ndarray]]] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"},{name:"encoder_last_hidden_state",val:": typing.Optional[jax._src.numpy.ndarray.ndarray] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None"}],parametersDescription:[{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.start_logits",description:`<strong>start_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-start scores (before SoftMax).`,name:"start_logits"},{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.end_logits",description:`<strong>end_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-end scores (before SoftMax).`,name:"end_logits"},{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.past_key_values",description:`<strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.`,name:"past_key_values"},{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.`,name:"decoder_hidden_states"},{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"decoder_attentions"},{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.`,name:"cross_attentions"},{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.encoder_last_hidden_state",description:`<strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.`,name:"encoder_last_hidden_state"},{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.`,name:"encoder_hidden_states"},{anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.`,name:"encoder_attentions"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_outputs.py#L586"}}),Ka=new y({props:{name:"replace",anchor:"transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.replace",parameters:[{name:"**updates",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108"}}),{c(){x=n("meta"),Yt=d(),$=n("h1"),A=n("a"),J=n("span"),h(O.$$.fragment),wn=d(),K=n("span"),Z=p("Model outputs"),M=d(),C=n("p"),er=p("All models have outputs that are instances of subclasses of "),tr=n("a"),am=p("ModelOutput"),rm=p(`. Those are data structures containing all the information returned by the model, but that can also be used as tuples or dictionaries.`),$l=d(),or=n("p"),dm=p("Let\u2019s see how this looks in an example:"),Ol=d(),h(xn.$$.fragment),ql=d(),b=n("p"),im=p("The "),wr=n("code"),um=p("outputs"),lm=p(" object is a "),nr=n("a"),pm=p("SequenceClassifierOutput"),cm=p(`, as we can see in the documentation of that class below, it means it has an optional `),xr=n("code"),hm=p("loss"),fm=p(", a "),$r=n("code"),mm=p("logits"),_m=p(" an optional "),Or=n("code"),gm=p("hidden_states"),vm=p(` and an optional `),qr=n("code"),ym=p("attentions"),Tm=p(" attribute. Here we have the "),Fr=n("code"),bm=p("loss"),wm=p(" since we passed along "),Sr=n("code"),xm=p("labels"),$m=p(`, but we don\u2019t have `),Mr=n("code"),Om=p("hidden_states"),qm=p(" and "),kr=n("code"),Fm=p("attentions"),Sm=p(" because we didn\u2019t pass "),Ar=n("code"),Mm=p("output_hidden_states=True"),km=p(` or `),Cr=n("code"),Am=p("output_attentions=True"),Cm=p("."),Fl=d(),q=n("p"),Em=p(`You can access each attribute as you would usually do, and if that attribute has not been returned by the model, you will get `),Er=n("code"),Nm=p("None"),zm=p(". Here for instance "),Nr=n("code"),Pm=p("outputs.loss"),Bm=p(" is the loss computed by the model, and "),zr=n("code"),Lm=p("outputs.attentions"),Wm=p(` is `),Pr=n("code"),jm=p("None"),Dm=p("."),Sl=d(),F=n("p"),Hm=p("When considering our "),Br=n("code"),Im=p("outputs"),Vm=p(" object as tuple, it only considers the attributes that don\u2019t have "),Lr=n("code"),Qm=p("None"),Rm=p(` values. Here for instance, it has two elements, `),Wr=n("code"),Xm=p("loss"),Um=p(" then "),jr=n("code"),Ym=p("logits"),Gm=p(", so"),Ml=d(),h($n.$$.fragment),kl=d(),Gt=n("p"),Jm=p("will return the tuple "),Dr=n("code"),Km=p("(outputs.loss, outputs.logits)"),Zm=p(" for instance."),Al=d(),S=n("p"),e_=p("When considering our "),Hr=n("code"),t_=p("outputs"),o_=p(" object as dictionary, it only considers the attributes that don\u2019t have "),Ir=n("code"),n_=p("None"),s_=p(` values. Here for instance, it has two keys that are `),Vr=n("code"),a_=p("loss"),r_=p(" and "),Qr=n("code"),d_=p("logits"),i_=p("."),Cl=d(),sr=n("p"),u_=p(`We document here the generic model outputs that are used by more than one model type. Specific output types are documented on their corresponding model page.`),El=d(),ee=n("h2"),Jt=n("a"),Rr=n("span"),h(On.$$.fragment),l_=d(),Xr=n("span"),p_=p("ModelOutput"),Nl=d(),k=n("div"),h(qn.$$.fragment),c_=d(),te=n("p"),h_=p("Base class for all model outputs as dataclass. Has a "),Ur=n("code"),f_=p("__getitem__"),m_=p(` that allows indexing by integer or slice (like a tuple) or strings (like a dictionary) that will ignore the `),Yr=n("code"),__=p("None"),g_=p(` attributes. Otherwise behaves like a regular python dictionary.`),v_=d(),h(Kt.$$.fragment),y_=d(),Zt=n("div"),h(Fn.$$.fragment),T_=d(),Sn=n("p"),b_=p("Convert self to a tuple containing all the attributes/keys that are not "),Gr=n("code"),w_=p("None"),x_=p("."),zl=d(),oe=n("h2"),eo=n("a"),Jr=n("span"),h(Mn.$$.fragment),$_=d(),Kr=n("span"),O_=p("BaseModelOutput"),Pl=d(),ne=n("div"),h(kn.$$.fragment),q_=d(),Zr=n("p"),F_=p("Base class for model\u2019s outputs, with potential hidden states and attentions."),Bl=d(),se=n("h2"),to=n("a"),ed=n("span"),h(An.$$.fragment),S_=d(),td=n("span"),M_=p("BaseModelOutputWithPooling"),Ll=d(),ae=n("div"),h(Cn.$$.fragment),k_=d(),od=n("p"),A_=p("Base class for model\u2019s outputs that also contains a pooling of the last hidden states."),Wl=d(),re=n("h2"),oo=n("a"),nd=n("span"),h(En.$$.fragment),C_=d(),sd=n("span"),E_=p("BaseModelOutputWithCrossAttentions"),jl=d(),de=n("div"),h(Nn.$$.fragment),N_=d(),ad=n("p"),z_=p("Base class for model\u2019s outputs, with potential hidden states and attentions."),Dl=d(),ie=n("h2"),no=n("a"),rd=n("span"),h(zn.$$.fragment),P_=d(),dd=n("span"),B_=p("BaseModelOutputWithPoolingAndCrossAttentions"),Hl=d(),ue=n("div"),h(Pn.$$.fragment),L_=d(),id=n("p"),W_=p("Base class for model\u2019s outputs that also contains a pooling of the last hidden states."),Il=d(),le=n("h2"),so=n("a"),ud=n("span"),h(Bn.$$.fragment),j_=d(),ld=n("span"),D_=p("BaseModelOutputWithPast"),Vl=d(),pe=n("div"),h(Ln.$$.fragment),H_=d(),pd=n("p"),I_=p("Base class for model\u2019s outputs that may also contain a past key/values (to speed up sequential decoding)."),Ql=d(),ce=n("h2"),ao=n("a"),cd=n("span"),h(Wn.$$.fragment),V_=d(),hd=n("span"),Q_=p("BaseModelOutputWithPastAndCrossAttentions"),Rl=d(),he=n("div"),h(jn.$$.fragment),R_=d(),fd=n("p"),X_=p("Base class for model\u2019s outputs that may also contain a past key/values (to speed up sequential decoding)."),Xl=d(),fe=n("h2"),ro=n("a"),md=n("span"),h(Dn.$$.fragment),U_=d(),_d=n("span"),Y_=p("Seq2SeqModelOutput"),Ul=d(),me=n("div"),h(Hn.$$.fragment),G_=d(),gd=n("p"),J_=p(`Base class for model encoder\u2019s outputs that also contains : pre-computed hidden states that can speed up sequential decoding.`),Yl=d(),_e=n("h2"),io=n("a"),vd=n("span"),h(In.$$.fragment),K_=d(),yd=n("span"),Z_=p("CausalLMOutput"),Gl=d(),ge=n("div"),h(Vn.$$.fragment),eg=d(),Td=n("p"),tg=p("Base class for causal language model (or autoregressive) outputs."),Jl=d(),ve=n("h2"),uo=n("a"),bd=n("span"),h(Qn.$$.fragment),og=d(),wd=n("span"),ng=p("CausalLMOutputWithCrossAttentions"),Kl=d(),ye=n("div"),h(Rn.$$.fragment),sg=d(),xd=n("p"),ag=p("Base class for causal language model (or autoregressive) outputs."),Zl=d(),Te=n("h2"),lo=n("a"),$d=n("span"),h(Xn.$$.fragment),rg=d(),Od=n("span"),dg=p("CausalLMOutputWithPast"),ep=d(),be=n("div"),h(Un.$$.fragment),ig=d(),qd=n("p"),ug=p("Base class for causal language model (or autoregressive) outputs."),tp=d(),we=n("h2"),po=n("a"),Fd=n("span"),h(Yn.$$.fragment),lg=d(),Sd=n("span"),pg=p("MaskedLMOutput"),op=d(),xe=n("div"),h(Gn.$$.fragment),cg=d(),Md=n("p"),hg=p("Base class for masked language models outputs."),np=d(),$e=n("h2"),co=n("a"),kd=n("span"),h(Jn.$$.fragment),fg=d(),Ad=n("span"),mg=p("Seq2SeqLMOutput"),sp=d(),Oe=n("div"),h(Kn.$$.fragment),_g=d(),Cd=n("p"),gg=p("Base class for sequence-to-sequence language models outputs."),ap=d(),qe=n("h2"),ho=n("a"),Ed=n("span"),h(Zn.$$.fragment),vg=d(),Nd=n("span"),yg=p("NextSentencePredictorOutput"),rp=d(),Fe=n("div"),h(es.$$.fragment),Tg=d(),zd=n("p"),bg=p("Base class for outputs of models predicting if two sentences are consecutive or not."),dp=d(),Se=n("h2"),fo=n("a"),Pd=n("span"),h(ts.$$.fragment),wg=d(),Bd=n("span"),xg=p("SequenceClassifierOutput"),ip=d(),Me=n("div"),h(os.$$.fragment),$g=d(),Ld=n("p"),Og=p("Base class for outputs of sentence classification models."),up=d(),ke=n("h2"),mo=n("a"),Wd=n("span"),h(ns.$$.fragment),qg=d(),jd=n("span"),Fg=p("Seq2SeqSequenceClassifierOutput"),lp=d(),Ae=n("div"),h(ss.$$.fragment),Sg=d(),Dd=n("p"),Mg=p("Base class for outputs of sequence-to-sequence sentence classification models."),pp=d(),Ce=n("h2"),_o=n("a"),Hd=n("span"),h(as.$$.fragment),kg=d(),Id=n("span"),Ag=p("MultipleChoiceModelOutput"),cp=d(),Ee=n("div"),h(rs.$$.fragment),Cg=d(),Vd=n("p"),Eg=p("Base class for outputs of multiple choice models."),hp=d(),Ne=n("h2"),go=n("a"),Qd=n("span"),h(ds.$$.fragment),Ng=d(),Rd=n("span"),zg=p("TokenClassifierOutput"),fp=d(),ze=n("div"),h(is.$$.fragment),Pg=d(),Xd=n("p"),Bg=p("Base class for outputs of token classification models."),mp=d(),Pe=n("h2"),vo=n("a"),Ud=n("span"),h(us.$$.fragment),Lg=d(),Yd=n("span"),Wg=p("QuestionAnsweringModelOutput"),_p=d(),Be=n("div"),h(ls.$$.fragment),jg=d(),Gd=n("p"),Dg=p("Base class for outputs of question answering models."),gp=d(),Le=n("h2"),yo=n("a"),Jd=n("span"),h(ps.$$.fragment),Hg=d(),Kd=n("span"),Ig=p("Seq2SeqQuestionAnsweringModelOutput"),vp=d(),We=n("div"),h(cs.$$.fragment),Vg=d(),Zd=n("p"),Qg=p("Base class for outputs of sequence-to-sequence question answering models."),yp=d(),je=n("h2"),To=n("a"),ei=n("span"),h(hs.$$.fragment),Rg=d(),ti=n("span"),Xg=p("SemanticSegmenterOutput"),Tp=d(),De=n("div"),h(fs.$$.fragment),Ug=d(),oi=n("p"),Yg=p("Base class for outputs of semantic segmentation models."),bp=d(),He=n("h2"),bo=n("a"),ni=n("span"),h(ms.$$.fragment),Gg=d(),si=n("span"),Jg=p("ImageClassifierOutput"),wp=d(),Ie=n("div"),h(_s.$$.fragment),Kg=d(),ai=n("p"),Zg=p("Base class for outputs of image classification models."),xp=d(),Ve=n("h2"),wo=n("a"),ri=n("span"),h(gs.$$.fragment),ev=d(),di=n("span"),tv=p("ImageClassifierOutputWithNoAttention"),$p=d(),Qe=n("div"),h(vs.$$.fragment),ov=d(),ii=n("p"),nv=p("Base class for outputs of image classification models."),Op=d(),Re=n("h2"),xo=n("a"),ui=n("span"),h(ys.$$.fragment),sv=d(),li=n("span"),av=p("DepthEstimatorOutput"),qp=d(),Xe=n("div"),h(Ts.$$.fragment),rv=d(),pi=n("p"),dv=p("Base class for outputs of depth estimation models."),Fp=d(),Ue=n("h2"),$o=n("a"),ci=n("span"),h(bs.$$.fragment),iv=d(),hi=n("span"),uv=p("Wav2Vec2BaseModelOutput"),Sp=d(),Ye=n("div"),h(ws.$$.fragment),lv=d(),fi=n("p"),pv=p("Base class for models that have been trained with the Wav2Vec2 loss objective."),Mp=d(),Ge=n("h2"),Oo=n("a"),mi=n("span"),h(xs.$$.fragment),cv=d(),_i=n("span"),hv=p("XVectorOutput"),kp=d(),Je=n("div"),h($s.$$.fragment),fv=d(),Os=n("p"),mv=p("Output type of "),ar=n("a"),_v=p("Wav2Vec2ForXVector"),gv=p("."),Ap=d(),Ke=n("h2"),qo=n("a"),gi=n("span"),h(qs.$$.fragment),vv=d(),vi=n("span"),yv=p("TFBaseModelOutput"),Cp=d(),Ze=n("div"),h(Fs.$$.fragment),Tv=d(),yi=n("p"),bv=p("Base class for model\u2019s outputs, with potential hidden states and attentions."),Ep=d(),et=n("h2"),Fo=n("a"),Ti=n("span"),h(Ss.$$.fragment),wv=d(),bi=n("span"),xv=p("TFBaseModelOutputWithPooling"),Np=d(),tt=n("div"),h(Ms.$$.fragment),$v=d(),wi=n("p"),Ov=p("Base class for model\u2019s outputs that also contains a pooling of the last hidden states."),zp=d(),ot=n("h2"),So=n("a"),xi=n("span"),h(ks.$$.fragment),qv=d(),$i=n("span"),Fv=p("TFBaseModelOutputWithPoolingAndCrossAttentions"),Pp=d(),nt=n("div"),h(As.$$.fragment),Sv=d(),Oi=n("p"),Mv=p("Base class for model\u2019s outputs that also contains a pooling of the last hidden states."),Bp=d(),st=n("h2"),Mo=n("a"),qi=n("span"),h(Cs.$$.fragment),kv=d(),Fi=n("span"),Av=p("TFBaseModelOutputWithPast"),Lp=d(),at=n("div"),h(Es.$$.fragment),Cv=d(),Si=n("p"),Ev=p("Base class for model\u2019s outputs that may also contain a past key/values (to speed up sequential decoding)."),Wp=d(),rt=n("h2"),ko=n("a"),Mi=n("span"),h(Ns.$$.fragment),Nv=d(),ki=n("span"),zv=p("TFBaseModelOutputWithPastAndCrossAttentions"),jp=d(),dt=n("div"),h(zs.$$.fragment),Pv=d(),Ai=n("p"),Bv=p("Base class for model\u2019s outputs that may also contain a past key/values (to speed up sequential decoding)."),Dp=d(),it=n("h2"),Ao=n("a"),Ci=n("span"),h(Ps.$$.fragment),Lv=d(),Ei=n("span"),Wv=p("TFSeq2SeqModelOutput"),Hp=d(),ut=n("div"),h(Bs.$$.fragment),jv=d(),Ni=n("p"),Dv=p(`Base class for model encoder\u2019s outputs that also contains : pre-computed hidden states that can speed up sequential decoding.`),Ip=d(),lt=n("h2"),Co=n("a"),zi=n("span"),h(Ls.$$.fragment),Hv=d(),Pi=n("span"),Iv=p("TFCausalLMOutput"),Vp=d(),pt=n("div"),h(Ws.$$.fragment),Vv=d(),Bi=n("p"),Qv=p("Base class for causal language model (or autoregressive) outputs."),Qp=d(),ct=n("h2"),Eo=n("a"),Li=n("span"),h(js.$$.fragment),Rv=d(),Wi=n("span"),Xv=p("TFCausalLMOutputWithCrossAttentions"),Rp=d(),ht=n("div"),h(Ds.$$.fragment),Uv=d(),ji=n("p"),Yv=p("Base class for causal language model (or autoregressive) outputs."),Xp=d(),ft=n("h2"),No=n("a"),Di=n("span"),h(Hs.$$.fragment),Gv=d(),Hi=n("span"),Jv=p("TFCausalLMOutputWithPast"),Up=d(),mt=n("div"),h(Is.$$.fragment),Kv=d(),Ii=n("p"),Zv=p("Base class for causal language model (or autoregressive) outputs."),Yp=d(),_t=n("h2"),zo=n("a"),Vi=n("span"),h(Vs.$$.fragment),ey=d(),Qi=n("span"),ty=p("TFMaskedLMOutput"),Gp=d(),gt=n("div"),h(Qs.$$.fragment),oy=d(),Ri=n("p"),ny=p("Base class for masked language models outputs."),Jp=d(),vt=n("h2"),Po=n("a"),Xi=n("span"),h(Rs.$$.fragment),sy=d(),Ui=n("span"),ay=p("TFSeq2SeqLMOutput"),Kp=d(),yt=n("div"),h(Xs.$$.fragment),ry=d(),Yi=n("p"),dy=p("Base class for sequence-to-sequence language models outputs."),Zp=d(),Tt=n("h2"),Bo=n("a"),Gi=n("span"),h(Us.$$.fragment),iy=d(),Ji=n("span"),uy=p("TFNextSentencePredictorOutput"),ec=d(),bt=n("div"),h(Ys.$$.fragment),ly=d(),Ki=n("p"),py=p("Base class for outputs of models predicting if two sentences are consecutive or not."),tc=d(),wt=n("h2"),Lo=n("a"),Zi=n("span"),h(Gs.$$.fragment),cy=d(),eu=n("span"),hy=p("TFSequenceClassifierOutput"),oc=d(),xt=n("div"),h(Js.$$.fragment),fy=d(),tu=n("p"),my=p("Base class for outputs of sentence classification models."),nc=d(),$t=n("h2"),Wo=n("a"),ou=n("span"),h(Ks.$$.fragment),_y=d(),nu=n("span"),gy=p("TFSeq2SeqSequenceClassifierOutput"),sc=d(),Ot=n("div"),h(Zs.$$.fragment),vy=d(),su=n("p"),yy=p("Base class for outputs of sequence-to-sequence sentence classification models."),ac=d(),qt=n("h2"),jo=n("a"),au=n("span"),h(ea.$$.fragment),Ty=d(),ru=n("span"),by=p("TFMultipleChoiceModelOutput"),rc=d(),Ft=n("div"),h(ta.$$.fragment),wy=d(),du=n("p"),xy=p("Base class for outputs of multiple choice models."),dc=d(),St=n("h2"),Do=n("a"),iu=n("span"),h(oa.$$.fragment),$y=d(),uu=n("span"),Oy=p("TFTokenClassifierOutput"),ic=d(),Mt=n("div"),h(na.$$.fragment),qy=d(),lu=n("p"),Fy=p("Base class for outputs of token classification models."),uc=d(),kt=n("h2"),Ho=n("a"),pu=n("span"),h(sa.$$.fragment),Sy=d(),cu=n("span"),My=p("TFQuestionAnsweringModelOutput"),lc=d(),At=n("div"),h(aa.$$.fragment),ky=d(),hu=n("p"),Ay=p("Base class for outputs of question answering models."),pc=d(),Ct=n("h2"),Io=n("a"),fu=n("span"),h(ra.$$.fragment),Cy=d(),mu=n("span"),Ey=p("TFSeq2SeqQuestionAnsweringModelOutput"),cc=d(),Et=n("div"),h(da.$$.fragment),Ny=d(),_u=n("p"),zy=p("Base class for outputs of sequence-to-sequence question answering models."),hc=d(),Nt=n("h2"),Vo=n("a"),gu=n("span"),h(ia.$$.fragment),Py=d(),vu=n("span"),By=p("FlaxBaseModelOutput"),fc=d(),E=n("div"),h(ua.$$.fragment),Ly=d(),yu=n("p"),Wy=p("Base class for model\u2019s outputs, with potential hidden states and attentions."),jy=d(),Qo=n("div"),h(la.$$.fragment),Dy=d(),Tu=n("p"),Hy=p("\u201CReturns a new object replacing the specified fields with new values."),mc=d(),zt=n("h2"),Ro=n("a"),bu=n("span"),h(pa.$$.fragment),Iy=d(),wu=n("span"),Vy=p("FlaxBaseModelOutputWithPast"),_c=d(),N=n("div"),h(ca.$$.fragment),Qy=d(),xu=n("p"),Ry=p("Base class for model\u2019s outputs, with potential hidden states and attentions."),Xy=d(),Xo=n("div"),h(ha.$$.fragment),Uy=d(),$u=n("p"),Yy=p("\u201CReturns a new object replacing the specified fields with new values."),gc=d(),Pt=n("h2"),Uo=n("a"),Ou=n("span"),h(fa.$$.fragment),Gy=d(),qu=n("span"),Jy=p("FlaxBaseModelOutputWithPooling"),vc=d(),z=n("div"),h(ma.$$.fragment),Ky=d(),Fu=n("p"),Zy=p("Base class for model\u2019s outputs that also contains a pooling of the last hidden states."),eT=d(),Yo=n("div"),h(_a.$$.fragment),tT=d(),Su=n("p"),oT=p("\u201CReturns a new object replacing the specified fields with new values."),yc=d(),Bt=n("h2"),Go=n("a"),Mu=n("span"),h(ga.$$.fragment),nT=d(),ku=n("span"),sT=p("FlaxBaseModelOutputWithPastAndCrossAttentions"),Tc=d(),P=n("div"),h(va.$$.fragment),aT=d(),Au=n("p"),rT=p("Base class for model\u2019s outputs that may also contain a past key/values (to speed up sequential decoding)."),dT=d(),Jo=n("div"),h(ya.$$.fragment),iT=d(),Cu=n("p"),uT=p("\u201CReturns a new object replacing the specified fields with new values."),bc=d(),Lt=n("h2"),Ko=n("a"),Eu=n("span"),h(Ta.$$.fragment),lT=d(),Nu=n("span"),pT=p("FlaxSeq2SeqModelOutput"),wc=d(),B=n("div"),h(ba.$$.fragment),cT=d(),zu=n("p"),hT=p(`Base class for model encoder\u2019s outputs that also contains : pre-computed hidden states that can speed up sequential decoding.`),fT=d(),Zo=n("div"),h(wa.$$.fragment),mT=d(),Pu=n("p"),_T=p("\u201CReturns a new object replacing the specified fields with new values."),xc=d(),Wt=n("h2"),en=n("a"),Bu=n("span"),h(xa.$$.fragment),gT=d(),Lu=n("span"),vT=p("FlaxCausalLMOutputWithCrossAttentions"),$c=d(),L=n("div"),h($a.$$.fragment),yT=d(),Wu=n("p"),TT=p("Base class for causal language model (or autoregressive) outputs."),bT=d(),tn=n("div"),h(Oa.$$.fragment),wT=d(),ju=n("p"),xT=p("\u201CReturns a new object replacing the specified fields with new values."),Oc=d(),jt=n("h2"),on=n("a"),Du=n("span"),h(qa.$$.fragment),$T=d(),Hu=n("span"),OT=p("FlaxMaskedLMOutput"),qc=d(),W=n("div"),h(Fa.$$.fragment),qT=d(),Iu=n("p"),FT=p("Base class for masked language models outputs."),ST=d(),nn=n("div"),h(Sa.$$.fragment),MT=d(),Vu=n("p"),kT=p("\u201CReturns a new object replacing the specified fields with new values."),Fc=d(),Dt=n("h2"),sn=n("a"),Qu=n("span"),h(Ma.$$.fragment),AT=d(),Ru=n("span"),CT=p("FlaxSeq2SeqLMOutput"),Sc=d(),j=n("div"),h(ka.$$.fragment),ET=d(),Xu=n("p"),NT=p("Base class for sequence-to-sequence language models outputs."),zT=d(),an=n("div"),h(Aa.$$.fragment),PT=d(),Uu=n("p"),BT=p("\u201CReturns a new object replacing the specified fields with new values."),Mc=d(),Ht=n("h2"),rn=n("a"),Yu=n("span"),h(Ca.$$.fragment),LT=d(),Gu=n("span"),WT=p("FlaxNextSentencePredictorOutput"),kc=d(),D=n("div"),h(Ea.$$.fragment),jT=d(),Ju=n("p"),DT=p("Base class for outputs of models predicting if two sentences are consecutive or not."),HT=d(),dn=n("div"),h(Na.$$.fragment),IT=d(),Ku=n("p"),VT=p("\u201CReturns a new object replacing the specified fields with new values."),Ac=d(),It=n("h2"),un=n("a"),Zu=n("span"),h(za.$$.fragment),QT=d(),el=n("span"),RT=p("FlaxSequenceClassifierOutput"),Cc=d(),H=n("div"),h(Pa.$$.fragment),XT=d(),tl=n("p"),UT=p("Base class for outputs of sentence classification models."),YT=d(),ln=n("div"),h(Ba.$$.fragment),GT=d(),ol=n("p"),JT=p("\u201CReturns a new object replacing the specified fields with new values."),Ec=d(),Vt=n("h2"),pn=n("a"),nl=n("span"),h(La.$$.fragment),KT=d(),sl=n("span"),ZT=p("FlaxSeq2SeqSequenceClassifierOutput"),Nc=d(),I=n("div"),h(Wa.$$.fragment),e2=d(),al=n("p"),t2=p("Base class for outputs of sequence-to-sequence sentence classification models."),o2=d(),cn=n("div"),h(ja.$$.fragment),n2=d(),rl=n("p"),s2=p("\u201CReturns a new object replacing the specified fields with new values."),zc=d(),Qt=n("h2"),hn=n("a"),dl=n("span"),h(Da.$$.fragment),a2=d(),il=n("span"),r2=p("FlaxMultipleChoiceModelOutput"),Pc=d(),V=n("div"),h(Ha.$$.fragment),d2=d(),ul=n("p"),i2=p("Base class for outputs of multiple choice models."),u2=d(),fn=n("div"),h(Ia.$$.fragment),l2=d(),ll=n("p"),p2=p("\u201CReturns a new object replacing the specified fields with new values."),Bc=d(),Rt=n("h2"),mn=n("a"),pl=n("span"),h(Va.$$.fragment),c2=d(),cl=n("span"),h2=p("FlaxTokenClassifierOutput"),Lc=d(),Q=n("div"),h(Qa.$$.fragment),f2=d(),hl=n("p"),m2=p("Base class for outputs of token classification models."),_2=d(),_n=n("div"),h(Ra.$$.fragment),g2=d(),fl=n("p"),v2=p("\u201CReturns a new object replacing the specified fields with new values."),Wc=d(),Xt=n("h2"),gn=n("a"),ml=n("span"),h(Xa.$$.fragment),y2=d(),_l=n("span"),T2=p("FlaxQuestionAnsweringModelOutput"),jc=d(),R=n("div"),h(Ua.$$.fragment),b2=d(),gl=n("p"),w2=p("Base class for outputs of question answering models."),x2=d(),vn=n("div"),h(Ya.$$.fragment),$2=d(),vl=n("p"),O2=p("\u201CReturns a new object replacing the specified fields with new values."),Dc=d(),Ut=n("h2"),yn=n("a"),yl=n("span"),h(Ga.$$.fragment),q2=d(),Tl=n("span"),F2=p("FlaxSeq2SeqQuestionAnsweringModelOutput"),Hc=d(),X=n("div"),h(Ja.$$.fragment),S2=d(),bl=n("p"),M2=p("Base class for outputs of sequence-to-sequence question answering models."),k2=d(),Tn=n("div"),h(Ka.$$.fragment),A2=d(),wl=n("p"),C2=p("\u201CReturns a new object replacing the specified fields with new values."),this.h()},l(e){const u=J$('[data-svelte="svelte-1phssyn"]',document.head);x=s(u,"META",{name:!0,content:!0}),u.forEach(t),Yt=i(e),$=s(e,"H1",{class:!0});var Za=a($);A=s(Za,"A",{id:!0,class:!0,href:!0});var E2=a(A);J=s(E2,"SPAN",{});var N2=a(J);f(O.$$.fragment,N2),N2.forEach(t),E2.forEach(t),wn=i(Za),K=s(Za,"SPAN",{});var z2=a(K);Z=c(z2,"Model outputs"),z2.forEach(t),Za.forEach(t),M=i(e),C=s(e,"P",{});var Vc=a(C);er=c(Vc,"All models have outputs that are instances of subclasses of "),tr=s(Vc,"A",{href:!0});var P2=a(tr);am=c(P2,"ModelOutput"),P2.forEach(t),rm=c(Vc,`. Those are data structures containing all the information returned by the model, but that can also be used as tuples or dictionaries.`),Vc.forEach(t),$l=i(e),or=s(e,"P",{});var B2=a(or);dm=c(B2,"Let\u2019s see how this looks in an example:"),B2.forEach(t),Ol=i(e),f(xn.$$.fragment,e),ql=i(e),b=s(e,"P",{});var w=a(b);im=c(w,"The "),wr=s(w,"CODE",{});var L2=a(wr);um=c(L2,"outputs"),L2.forEach(t),lm=c(w," object is a "),nr=s(w,"A",{href:!0});var W2=a(nr);pm=c(W2,"SequenceClassifierOutput"),W2.forEach(t),cm=c(w,`, as we can see in the documentation of that class below, it means it has an optional `),xr=s(w,"CODE",{});var j2=a(xr);hm=c(j2,"loss"),j2.forEach(t),fm=c(w,", a "),$r=s(w,"CODE",{});var D2=a($r);mm=c(D2,"logits"),D2.forEach(t),_m=c(w," an optional "),Or=s(w,"CODE",{});var H2=a(Or);gm=c(H2,"hidden_states"),H2.forEach(t),vm=c(w,` and an optional `),qr=s(w,"CODE",{});var I2=a(qr);ym=c(I2,"attentions"),I2.forEach(t),Tm=c(w," attribute. Here we have the "),Fr=s(w,"CODE",{});var V2=a(Fr);bm=c(V2,"loss"),V2.forEach(t),wm=c(w," since we passed along "),Sr=s(w,"CODE",{});var Q2=a(Sr);xm=c(Q2,"labels"),Q2.forEach(t),$m=c(w,`, but we don\u2019t have `),Mr=s(w,"CODE",{});var R2=a(Mr);Om=c(R2,"hidden_states"),R2.forEach(t),qm=c(w," and "),kr=s(w,"CODE",{});var X2=a(kr);Fm=c(X2,"attentions"),X2.forEach(t),Sm=c(w," because we didn\u2019t pass "),Ar=s(w,"CODE",{});var U2=a(Ar);Mm=c(U2,"output_hidden_states=True"),U2.forEach(t),km=c(w,` or `),Cr=s(w,"CODE",{});var Y2=a(Cr);Am=c(Y2,"output_attentions=True"),Y2.forEach(t),Cm=c(w,"."),w.forEach(t),Fl=i(e),q=s(e,"P",{});var U=a(q);Em=c(U,`You can access each attribute as you would usually do, and if that attribute has not been returned by the model, you will get `),Er=s(U,"CODE",{});var G2=a(Er);Nm=c(G2,"None"),G2.forEach(t),zm=c(U,". Here for instance "),Nr=s(U,"CODE",{});var J2=a(Nr);Pm=c(J2,"outputs.loss"),J2.forEach(t),Bm=c(U," is the loss computed by the model, and "),zr=s(U,"CODE",{});var K2=a(zr);Lm=c(K2,"outputs.attentions"),K2.forEach(t),Wm=c(U,` is `),Pr=s(U,"CODE",{});var Z2=a(Pr);jm=c(Z2,"None"),Z2.forEach(t),Dm=c(U,"."),U.forEach(t),Sl=i(e),F=s(e,"P",{});var Y=a(F);Hm=c(Y,"When considering our "),Br=s(Y,"CODE",{});var eb=a(Br);Im=c(eb,"outputs"),eb.forEach(t),Vm=c(Y," object as tuple, it only considers the attributes that don\u2019t have "),Lr=s(Y,"CODE",{});var tb=a(Lr);Qm=c(tb,"None"),tb.forEach(t),Rm=c(Y,` values. Here for instance, it has two elements, `),Wr=s(Y,"CODE",{});var ob=a(Wr);Xm=c(ob,"loss"),ob.forEach(t),Um=c(Y," then "),jr=s(Y,"CODE",{});var nb=a(jr);Ym=c(nb,"logits"),nb.forEach(t),Gm=c(Y,", so"),Y.forEach(t),Ml=i(e),f($n.$$.fragment,e),kl=i(e),Gt=s(e,"P",{});var Qc=a(Gt);Jm=c(Qc,"will return the tuple "),Dr=s(Qc,"CODE",{});var sb=a(Dr);Km=c(sb,"(outputs.loss, outputs.logits)"),sb.forEach(t),Zm=c(Qc," for instance."),Qc.forEach(t),Al=i(e),S=s(e,"P",{});var G=a(S);e_=c(G,"When considering our "),Hr=s(G,"CODE",{});var ab=a(Hr);t_=c(ab,"outputs"),ab.forEach(t),o_=c(G," object as dictionary, it only considers the attributes that don\u2019t have "),Ir=s(G,"CODE",{});var rb=a(Ir);n_=c(rb,"None"),rb.forEach(t),s_=c(G,` values. Here for instance, it has two keys that are `),Vr=s(G,"CODE",{});var db=a(Vr);a_=c(db,"loss"),db.forEach(t),r_=c(G," and "),Qr=s(G,"CODE",{});var ib=a(Qr);d_=c(ib,"logits"),ib.forEach(t),i_=c(G,"."),G.forEach(t),Cl=i(e),sr=s(e,"P",{});var ub=a(sr);u_=c(ub,`We document here the generic model outputs that are used by more than one model type. Specific output types are documented on their corresponding model page.`),ub.forEach(t),El=i(e),ee=s(e,"H2",{class:!0});var Rc=a(ee);Jt=s(Rc,"A",{id:!0,class:!0,href:!0});var lb=a(Jt);Rr=s(lb,"SPAN",{});var pb=a(Rr);f(On.$$.fragment,pb),pb.forEach(t),lb.forEach(t),l_=i(Rc),Xr=s(Rc,"SPAN",{});var cb=a(Xr);p_=c(cb,"ModelOutput"),cb.forEach(t),Rc.forEach(t),Nl=i(e),k=s(e,"DIV",{class:!0});var bn=a(k);f(qn.$$.fragment,bn),c_=i(bn),te=s(bn,"P",{});var rr=a(te);h_=c(rr,"Base class for all model outputs as dataclass. Has a "),Ur=s(rr,"CODE",{});var hb=a(Ur);f_=c(hb,"__getitem__"),hb.forEach(t),m_=c(rr,` that allows indexing by integer or slice (like a tuple) or strings (like a dictionary) that will ignore the `),Yr=s(rr,"CODE",{});var fb=a(Yr);__=c(fb,"None"),fb.forEach(t),g_=c(rr,` attributes. Otherwise behaves like a regular python dictionary.`),rr.forEach(t),v_=i(bn),f(Kt.$$.fragment,bn),y_=i(bn),Zt=s(bn,"DIV",{class:!0});var Xc=a(Zt);f(Fn.$$.fragment,Xc),T_=i(Xc),Sn=s(Xc,"P",{});var Uc=a(Sn);b_=c(Uc,"Convert self to a tuple containing all the attributes/keys that are not "),Gr=s(Uc,"CODE",{});var mb=a(Gr);w_=c(mb,"None"),mb.forEach(t),x_=c(Uc,"."),Uc.forEach(t),Xc.forEach(t),bn.forEach(t),zl=i(e),oe=s(e,"H2",{class:!0});var Yc=a(oe);eo=s(Yc,"A",{id:!0,class:!0,href:!0});var _b=a(eo);Jr=s(_b,"SPAN",{});var gb=a(Jr);f(Mn.$$.fragment,gb),gb.forEach(t),_b.forEach(t),$_=i(Yc),Kr=s(Yc,"SPAN",{});var vb=a(Kr);O_=c(vb,"BaseModelOutput"),vb.forEach(t),Yc.forEach(t),Pl=i(e),ne=s(e,"DIV",{class:!0});var Gc=a(ne);f(kn.$$.fragment,Gc),q_=i(Gc),Zr=s(Gc,"P",{});var yb=a(Zr);F_=c(yb,"Base class for model\u2019s outputs, with potential hidden states and attentions."),yb.forEach(t),Gc.forEach(t),Bl=i(e),se=s(e,"H2",{class:!0});var Jc=a(se);to=s(Jc,"A",{id:!0,class:!0,href:!0});var Tb=a(to);ed=s(Tb,"SPAN",{});var bb=a(ed);f(An.$$.fragment,bb),bb.forEach(t),Tb.forEach(t),S_=i(Jc),td=s(Jc,"SPAN",{});var wb=a(td);M_=c(wb,"BaseModelOutputWithPooling"),wb.forEach(t),Jc.forEach(t),Ll=i(e),ae=s(e,"DIV",{class:!0});var Kc=a(ae);f(Cn.$$.fragment,Kc),k_=i(Kc),od=s(Kc,"P",{});var xb=a(od);A_=c(xb,"Base class for model\u2019s outputs that also contains a pooling of the last hidden states."),xb.forEach(t),Kc.forEach(t),Wl=i(e),re=s(e,"H2",{class:!0});var Zc=a(re);oo=s(Zc,"A",{id:!0,class:!0,href:!0});var $b=a(oo);nd=s($b,"SPAN",{});var Ob=a(nd);f(En.$$.fragment,Ob),Ob.forEach(t),$b.forEach(t),C_=i(Zc),sd=s(Zc,"SPAN",{});var qb=a(sd);E_=c(qb,"BaseModelOutputWithCrossAttentions"),qb.forEach(t),Zc.forEach(t),jl=i(e),de=s(e,"DIV",{class:!0});var eh=a(de);f(Nn.$$.fragment,eh),N_=i(eh),ad=s(eh,"P",{});var Fb=a(ad);z_=c(Fb,"Base class for model\u2019s outputs, with potential hidden states and attentions."),Fb.forEach(t),eh.forEach(t),Dl=i(e),ie=s(e,"H2",{class:!0});var th=a(ie);no=s(th,"A",{id:!0,class:!0,href:!0});var Sb=a(no);rd=s(Sb,"SPAN",{});var Mb=a(rd);f(zn.$$.fragment,Mb),Mb.forEach(t),Sb.forEach(t),P_=i(th),dd=s(th,"SPAN",{});var kb=a(dd);B_=c(kb,"BaseModelOutputWithPoolingAndCrossAttentions"),kb.forEach(t),th.forEach(t),Hl=i(e),ue=s(e,"DIV",{class:!0});var oh=a(ue);f(Pn.$$.fragment,oh),L_=i(oh),id=s(oh,"P",{});var Ab=a(id);W_=c(Ab,"Base class for model\u2019s outputs that also contains a pooling of the last hidden states."),Ab.forEach(t),oh.forEach(t),Il=i(e),le=s(e,"H2",{class:!0});var nh=a(le);so=s(nh,"A",{id:!0,class:!0,href:!0});var Cb=a(so);ud=s(Cb,"SPAN",{});var Eb=a(ud);f(Bn.$$.fragment,Eb),Eb.forEach(t),Cb.forEach(t),j_=i(nh),ld=s(nh,"SPAN",{});var Nb=a(ld);D_=c(Nb,"BaseModelOutputWithPast"),Nb.forEach(t),nh.forEach(t),Vl=i(e),pe=s(e,"DIV",{class:!0});var sh=a(pe);f(Ln.$$.fragment,sh),H_=i(sh),pd=s(sh,"P",{});var zb=a(pd);I_=c(zb,"Base class for model\u2019s outputs that may also contain a past key/values (to speed up sequential decoding)."),zb.forEach(t),sh.forEach(t),Ql=i(e),ce=s(e,"H2",{class:!0});var ah=a(ce);ao=s(ah,"A",{id:!0,class:!0,href:!0});var Pb=a(ao);cd=s(Pb,"SPAN",{});var Bb=a(cd);f(Wn.$$.fragment,Bb),Bb.forEach(t),Pb.forEach(t),V_=i(ah),hd=s(ah,"SPAN",{});var Lb=a(hd);Q_=c(Lb,"BaseModelOutputWithPastAndCrossAttentions"),Lb.forEach(t),ah.forEach(t),Rl=i(e),he=s(e,"DIV",{class:!0});var rh=a(he);f(jn.$$.fragment,rh),R_=i(rh),fd=s(rh,"P",{});var Wb=a(fd);X_=c(Wb,"Base class for model\u2019s outputs that may also contain a past key/values (to speed up sequential decoding)."),Wb.forEach(t),rh.forEach(t),Xl=i(e),fe=s(e,"H2",{class:!0});var dh=a(fe);ro=s(dh,"A",{id:!0,class:!0,href:!0});var jb=a(ro);md=s(jb,"SPAN",{});var Db=a(md);f(Dn.$$.fragment,Db),Db.forEach(t),jb.forEach(t),U_=i(dh),_d=s(dh,"SPAN",{});var Hb=a(_d);Y_=c(Hb,"Seq2SeqModelOutput"),Hb.forEach(t),dh.forEach(t),Ul=i(e),me=s(e,"DIV",{class:!0});var ih=a(me);f(Hn.$$.fragment,ih),G_=i(ih),gd=s(ih,"P",{});var Ib=a(gd);J_=c(Ib,`Base class for model encoder\u2019s outputs that also contains : pre-computed hidden states that can speed up sequential decoding.`),Ib.forEach(t),ih.forEach(t),Yl=i(e),_e=s(e,"H2",{class:!0});var uh=a(_e);io=s(uh,"A",{id:!0,class:!0,href:!0});var Vb=a(io);vd=s(Vb,"SPAN",{});var Qb=a(vd);f(In.$$.fragment,Qb),Qb.forEach(t),Vb.forEach(t),K_=i(uh),yd=s(uh,"SPAN",{});var Rb=a(yd);Z_=c(Rb,"CausalLMOutput"),Rb.forEach(t),uh.forEach(t),Gl=i(e),ge=s(e,"DIV",{class:!0});var lh=a(ge);f(Vn.$$.fragment,lh),eg=i(lh),Td=s(lh,"P",{});var Xb=a(Td);tg=c(Xb,"Base class for causal language model (or autoregressive) outputs."),Xb.forEach(t),lh.forEach(t),Jl=i(e),ve=s(e,"H2",{class:!0});var ph=a(ve);uo=s(ph,"A",{id:!0,class:!0,href:!0});var Ub=a(uo);bd=s(Ub,"SPAN",{});var Yb=a(bd);f(Qn.$$.fragment,Yb),Yb.forEach(t),Ub.forEach(t),og=i(ph),wd=s(ph,"SPAN",{});var Gb=a(wd);ng=c(Gb,"CausalLMOutputWithCrossAttentions"),Gb.forEach(t),ph.forEach(t),Kl=i(e),ye=s(e,"DIV",{class:!0});var ch=a(ye);f(Rn.$$.fragment,ch),sg=i(ch),xd=s(ch,"P",{});var Jb=a(xd);ag=c(Jb,"Base class for causal language model (or autoregressive) outputs."),Jb.forEach(t),ch.forEach(t),Zl=i(e),Te=s(e,"H2",{class:!0});var hh=a(Te);lo=s(hh,"A",{id:!0,class:!0,href:!0});var Kb=a(lo);$d=s(Kb,"SPAN",{});var Zb=a($d);f(Xn.$$.fragment,Zb),Zb.forEach(t),Kb.forEach(t),rg=i(hh),Od=s(hh,"SPAN",{});var ew=a(Od);dg=c(ew,"CausalLMOutputWithPast"),ew.forEach(t),hh.forEach(t),ep=i(e),be=s(e,"DIV",{class:!0});var fh=a(be);f(Un.$$.fragment,fh),ig=i(fh),qd=s(fh,"P",{});var tw=a(qd);ug=c(tw,"Base class for causal language model (or autoregressive) outputs."),tw.forEach(t),fh.forEach(t),tp=i(e),we=s(e,"H2",{class:!0});var mh=a(we);po=s(mh,"A",{id:!0,class:!0,href:!0});var ow=a(po);Fd=s(ow,"SPAN",{});var nw=a(Fd);f(Yn.$$.fragment,nw),nw.forEach(t),ow.forEach(t),lg=i(mh),Sd=s(mh,"SPAN",{});var sw=a(Sd);pg=c(sw,"MaskedLMOutput"),sw.forEach(t),mh.forEach(t),op=i(e),xe=s(e,"DIV",{class:!0});var _h=a(xe);f(Gn.$$.fragment,_h),cg=i(_h),Md=s(_h,"P",{});var aw=a(Md);hg=c(aw,"Base class for masked language models outputs."),aw.forEach(t),_h.forEach(t),np=i(e),$e=s(e,"H2",{class:!0});var gh=a($e);co=s(gh,"A",{id:!0,class:!0,href:!0});var rw=a(co);kd=s(rw,"SPAN",{});var dw=a(kd);f(Jn.$$.fragment,dw),dw.forEach(t),rw.forEach(t),fg=i(gh),Ad=s(gh,"SPAN",{});var iw=a(Ad);mg=c(iw,"Seq2SeqLMOutput"),iw.forEach(t),gh.forEach(t),sp=i(e),Oe=s(e,"DIV",{class:!0});var vh=a(Oe);f(Kn.$$.fragment,vh),_g=i(vh),Cd=s(vh,"P",{});var uw=a(Cd);gg=c(uw,"Base class for sequence-to-sequence language models outputs."),uw.forEach(t),vh.forEach(t),ap=i(e),qe=s(e,"H2",{class:!0});var yh=a(qe);ho=s(yh,"A",{id:!0,class:!0,href:!0});var lw=a(ho);Ed=s(lw,"SPAN",{});var pw=a(Ed);f(Zn.$$.fragment,pw),pw.forEach(t),lw.forEach(t),vg=i(yh),Nd=s(yh,"SPAN",{});var cw=a(Nd);yg=c(cw,"NextSentencePredictorOutput"),cw.forEach(t),yh.forEach(t),rp=i(e),Fe=s(e,"DIV",{class:!0});var Th=a(Fe);f(es.$$.fragment,Th),Tg=i(Th),zd=s(Th,"P",{});var hw=a(zd);bg=c(hw,"Base class for outputs of models predicting if two sentences are consecutive or not."),hw.forEach(t),Th.forEach(t),dp=i(e),Se=s(e,"H2",{class:!0});var bh=a(Se);fo=s(bh,"A",{id:!0,class:!0,href:!0});var fw=a(fo);Pd=s(fw,"SPAN",{});var mw=a(Pd);f(ts.$$.fragment,mw),mw.forEach(t),fw.forEach(t),wg=i(bh),Bd=s(bh,"SPAN",{});var _w=a(Bd);xg=c(_w,"SequenceClassifierOutput"),_w.forEach(t),bh.forEach(t),ip=i(e),Me=s(e,"DIV",{class:!0});var wh=a(Me);f(os.$$.fragment,wh),$g=i(wh),Ld=s(wh,"P",{});var gw=a(Ld);Og=c(gw,"Base class for outputs of sentence classification models."),gw.forEach(t),wh.forEach(t),up=i(e),ke=s(e,"H2",{class:!0});var xh=a(ke);mo=s(xh,"A",{id:!0,class:!0,href:!0});var vw=a(mo);Wd=s(vw,"SPAN",{});var yw=a(Wd);f(ns.$$.fragment,yw),yw.forEach(t),vw.forEach(t),qg=i(xh),jd=s(xh,"SPAN",{});var Tw=a(jd);Fg=c(Tw,"Seq2SeqSequenceClassifierOutput"),Tw.forEach(t),xh.forEach(t),lp=i(e),Ae=s(e,"DIV",{class:!0});var $h=a(Ae);f(ss.$$.fragment,$h),Sg=i($h),Dd=s($h,"P",{});var bw=a(Dd);Mg=c(bw,"Base class for outputs of sequence-to-sequence sentence classification models."),bw.forEach(t),$h.forEach(t),pp=i(e),Ce=s(e,"H2",{class:!0});var Oh=a(Ce);_o=s(Oh,"A",{id:!0,class:!0,href:!0});var ww=a(_o);Hd=s(ww,"SPAN",{});var xw=a(Hd);f(as.$$.fragment,xw),xw.forEach(t),ww.forEach(t),kg=i(Oh),Id=s(Oh,"SPAN",{});var $w=a(Id);Ag=c($w,"MultipleChoiceModelOutput"),$w.forEach(t),Oh.forEach(t),cp=i(e),Ee=s(e,"DIV",{class:!0});var qh=a(Ee);f(rs.$$.fragment,qh),Cg=i(qh),Vd=s(qh,"P",{});var Ow=a(Vd);Eg=c(Ow,"Base class for outputs of multiple choice models."),Ow.forEach(t),qh.forEach(t),hp=i(e),Ne=s(e,"H2",{class:!0});var Fh=a(Ne);go=s(Fh,"A",{id:!0,class:!0,href:!0});var qw=a(go);Qd=s(qw,"SPAN",{});var Fw=a(Qd);f(ds.$$.fragment,Fw),Fw.forEach(t),qw.forEach(t),Ng=i(Fh),Rd=s(Fh,"SPAN",{});var Sw=a(Rd);zg=c(Sw,"TokenClassifierOutput"),Sw.forEach(t),Fh.forEach(t),fp=i(e),ze=s(e,"DIV",{class:!0});var Sh=a(ze);f(is.$$.fragment,Sh),Pg=i(Sh),Xd=s(Sh,"P",{});var Mw=a(Xd);Bg=c(Mw,"Base class for outputs of token classification models."),Mw.forEach(t),Sh.forEach(t),mp=i(e),Pe=s(e,"H2",{class:!0});var Mh=a(Pe);vo=s(Mh,"A",{id:!0,class:!0,href:!0});var kw=a(vo);Ud=s(kw,"SPAN",{});var Aw=a(Ud);f(us.$$.fragment,Aw),Aw.forEach(t),kw.forEach(t),Lg=i(Mh),Yd=s(Mh,"SPAN",{});var Cw=a(Yd);Wg=c(Cw,"QuestionAnsweringModelOutput"),Cw.forEach(t),Mh.forEach(t),_p=i(e),Be=s(e,"DIV",{class:!0});var kh=a(Be);f(ls.$$.fragment,kh),jg=i(kh),Gd=s(kh,"P",{});var Ew=a(Gd);Dg=c(Ew,"Base class for outputs of question answering models."),Ew.forEach(t),kh.forEach(t),gp=i(e),Le=s(e,"H2",{class:!0});var Ah=a(Le);yo=s(Ah,"A",{id:!0,class:!0,href:!0});var Nw=a(yo);Jd=s(Nw,"SPAN",{});var zw=a(Jd);f(ps.$$.fragment,zw),zw.forEach(t),Nw.forEach(t),Hg=i(Ah),Kd=s(Ah,"SPAN",{});var Pw=a(Kd);Ig=c(Pw,"Seq2SeqQuestionAnsweringModelOutput"),Pw.forEach(t),Ah.forEach(t),vp=i(e),We=s(e,"DIV",{class:!0});var Ch=a(We);f(cs.$$.fragment,Ch),Vg=i(Ch),Zd=s(Ch,"P",{});var Bw=a(Zd);Qg=c(Bw,"Base class for outputs of sequence-to-sequence question answering models."),Bw.forEach(t),Ch.forEach(t),yp=i(e),je=s(e,"H2",{class:!0});var Eh=a(je);To=s(Eh,"A",{id:!0,class:!0,href:!0});var Lw=a(To);ei=s(Lw,"SPAN",{});var Ww=a(ei);f(hs.$$.fragment,Ww),Ww.forEach(t),Lw.forEach(t),Rg=i(Eh),ti=s(Eh,"SPAN",{});var jw=a(ti);Xg=c(jw,"SemanticSegmenterOutput"),jw.forEach(t),Eh.forEach(t),Tp=i(e),De=s(e,"DIV",{class:!0});var Nh=a(De);f(fs.$$.fragment,Nh),Ug=i(Nh),oi=s(Nh,"P",{});var Dw=a(oi);Yg=c(Dw,"Base class for outputs of semantic segmentation models."),Dw.forEach(t),Nh.forEach(t),bp=i(e),He=s(e,"H2",{class:!0});var zh=a(He);bo=s(zh,"A",{id:!0,class:!0,href:!0});var Hw=a(bo);ni=s(Hw,"SPAN",{});var Iw=a(ni);f(ms.$$.fragment,Iw),Iw.forEach(t),Hw.forEach(t),Gg=i(zh),si=s(zh,"SPAN",{});var Vw=a(si);Jg=c(Vw,"ImageClassifierOutput"),Vw.forEach(t),zh.forEach(t),wp=i(e),Ie=s(e,"DIV",{class:!0});var Ph=a(Ie);f(_s.$$.fragment,Ph),Kg=i(Ph),ai=s(Ph,"P",{});var Qw=a(ai);Zg=c(Qw,"Base class for outputs of image classification models."),Qw.forEach(t),Ph.forEach(t),xp=i(e),Ve=s(e,"H2",{class:!0});var Bh=a(Ve);wo=s(Bh,"A",{id:!0,class:!0,href:!0});var Rw=a(wo);ri=s(Rw,"SPAN",{});var Xw=a(ri);f(gs.$$.fragment,Xw),Xw.forEach(t),Rw.forEach(t),ev=i(Bh),di=s(Bh,"SPAN",{});var Uw=a(di);tv=c(Uw,"ImageClassifierOutputWithNoAttention"),Uw.forEach(t),Bh.forEach(t),$p=i(e),Qe=s(e,"DIV",{class:!0});var Lh=a(Qe);f(vs.$$.fragment,Lh),ov=i(Lh),ii=s(Lh,"P",{});var Yw=a(ii);nv=c(Yw,"Base class for outputs of image classification models."),Yw.forEach(t),Lh.forEach(t),Op=i(e),Re=s(e,"H2",{class:!0});var Wh=a(Re);xo=s(Wh,"A",{id:!0,class:!0,href:!0});var Gw=a(xo);ui=s(Gw,"SPAN",{});var Jw=a(ui);f(ys.$$.fragment,Jw),Jw.forEach(t),Gw.forEach(t),sv=i(Wh),li=s(Wh,"SPAN",{});var Kw=a(li);av=c(Kw,"DepthEstimatorOutput"),Kw.forEach(t),Wh.forEach(t),qp=i(e),Xe=s(e,"DIV",{class:!0});var jh=a(Xe);f(Ts.$$.fragment,jh),rv=i(jh),pi=s(jh,"P",{});var Zw=a(pi);dv=c(Zw,"Base class for outputs of depth estimation models."),Zw.forEach(t),jh.forEach(t),Fp=i(e),Ue=s(e,"H2",{class:!0});var Dh=a(Ue);$o=s(Dh,"A",{id:!0,class:!0,href:!0});var e1=a($o);ci=s(e1,"SPAN",{});var t1=a(ci);f(bs.$$.fragment,t1),t1.forEach(t),e1.forEach(t),iv=i(Dh),hi=s(Dh,"SPAN",{});var o1=a(hi);uv=c(o1,"Wav2Vec2BaseModelOutput"),o1.forEach(t),Dh.forEach(t),Sp=i(e),Ye=s(e,"DIV",{class:!0});var Hh=a(Ye);f(ws.$$.fragment,Hh),lv=i(Hh),fi=s(Hh,"P",{});var n1=a(fi);pv=c(n1,"Base class for models that have been trained with the Wav2Vec2 loss objective."),n1.forEach(t),Hh.forEach(t),Mp=i(e),Ge=s(e,"H2",{class:!0});var Ih=a(Ge);Oo=s(Ih,"A",{id:!0,class:!0,href:!0});var s1=a(Oo);mi=s(s1,"SPAN",{});var a1=a(mi);f(xs.$$.fragment,a1),a1.forEach(t),s1.forEach(t),cv=i(Ih),_i=s(Ih,"SPAN",{});var r1=a(_i);hv=c(r1,"XVectorOutput"),r1.forEach(t),Ih.forEach(t),kp=i(e),Je=s(e,"DIV",{class:!0});var Vh=a(Je);f($s.$$.fragment,Vh),fv=i(Vh),Os=s(Vh,"P",{});var Qh=a(Os);mv=c(Qh,"Output type of "),ar=s(Qh,"A",{href:!0});var d1=a(ar);_v=c(d1,"Wav2Vec2ForXVector"),d1.forEach(t),gv=c(Qh,"."),Qh.forEach(t),Vh.forEach(t),Ap=i(e),Ke=s(e,"H2",{class:!0});var Rh=a(Ke);qo=s(Rh,"A",{id:!0,class:!0,href:!0});var i1=a(qo);gi=s(i1,"SPAN",{});var u1=a(gi);f(qs.$$.fragment,u1),u1.forEach(t),i1.forEach(t),vv=i(Rh),vi=s(Rh,"SPAN",{});var l1=a(vi);yv=c(l1,"TFBaseModelOutput"),l1.forEach(t),Rh.forEach(t),Cp=i(e),Ze=s(e,"DIV",{class:!0});var Xh=a(Ze);f(Fs.$$.fragment,Xh),Tv=i(Xh),yi=s(Xh,"P",{});var p1=a(yi);bv=c(p1,"Base class for model\u2019s outputs, with potential hidden states and attentions."),p1.forEach(t),Xh.forEach(t),Ep=i(e),et=s(e,"H2",{class:!0});var Uh=a(et);Fo=s(Uh,"A",{id:!0,class:!0,href:!0});var c1=a(Fo);Ti=s(c1,"SPAN",{});var h1=a(Ti);f(Ss.$$.fragment,h1),h1.forEach(t),c1.forEach(t),wv=i(Uh),bi=s(Uh,"SPAN",{});var f1=a(bi);xv=c(f1,"TFBaseModelOutputWithPooling"),f1.forEach(t),Uh.forEach(t),Np=i(e),tt=s(e,"DIV",{class:!0});var Yh=a(tt);f(Ms.$$.fragment,Yh),$v=i(Yh),wi=s(Yh,"P",{});var m1=a(wi);Ov=c(m1,"Base class for model\u2019s outputs that also contains a pooling of the last hidden states."),m1.forEach(t),Yh.forEach(t),zp=i(e),ot=s(e,"H2",{class:!0});var Gh=a(ot);So=s(Gh,"A",{id:!0,class:!0,href:!0});var _1=a(So);xi=s(_1,"SPAN",{});var g1=a(xi);f(ks.$$.fragment,g1),g1.forEach(t),_1.forEach(t),qv=i(Gh),$i=s(Gh,"SPAN",{});var v1=a($i);Fv=c(v1,"TFBaseModelOutputWithPoolingAndCrossAttentions"),v1.forEach(t),Gh.forEach(t),Pp=i(e),nt=s(e,"DIV",{class:!0});var Jh=a(nt);f(As.$$.fragment,Jh),Sv=i(Jh),Oi=s(Jh,"P",{});var y1=a(Oi);Mv=c(y1,"Base class for model\u2019s outputs that also contains a pooling of the last hidden states."),y1.forEach(t),Jh.forEach(t),Bp=i(e),st=s(e,"H2",{class:!0});var Kh=a(st);Mo=s(Kh,"A",{id:!0,class:!0,href:!0});var T1=a(Mo);qi=s(T1,"SPAN",{});var b1=a(qi);f(Cs.$$.fragment,b1),b1.forEach(t),T1.forEach(t),kv=i(Kh),Fi=s(Kh,"SPAN",{});var w1=a(Fi);Av=c(w1,"TFBaseModelOutputWithPast"),w1.forEach(t),Kh.forEach(t),Lp=i(e),at=s(e,"DIV",{class:!0});var Zh=a(at);f(Es.$$.fragment,Zh),Cv=i(Zh),Si=s(Zh,"P",{});var x1=a(Si);Ev=c(x1,"Base class for model\u2019s outputs that may also contain a past key/values (to speed up sequential decoding)."),x1.forEach(t),Zh.forEach(t),Wp=i(e),rt=s(e,"H2",{class:!0});var ef=a(rt);ko=s(ef,"A",{id:!0,class:!0,href:!0});var $1=a(ko);Mi=s($1,"SPAN",{});var O1=a(Mi);f(Ns.$$.fragment,O1),O1.forEach(t),$1.forEach(t),Nv=i(ef),ki=s(ef,"SPAN",{});var q1=a(ki);zv=c(q1,"TFBaseModelOutputWithPastAndCrossAttentions"),q1.forEach(t),ef.forEach(t),jp=i(e),dt=s(e,"DIV",{class:!0});var tf=a(dt);f(zs.$$.fragment,tf),Pv=i(tf),Ai=s(tf,"P",{});var F1=a(Ai);Bv=c(F1,"Base class for model\u2019s outputs that may also contain a past key/values (to speed up sequential decoding)."),F1.forEach(t),tf.forEach(t),Dp=i(e),it=s(e,"H2",{class:!0});var of=a(it);Ao=s(of,"A",{id:!0,class:!0,href:!0});var S1=a(Ao);Ci=s(S1,"SPAN",{});var M1=a(Ci);f(Ps.$$.fragment,M1),M1.forEach(t),S1.forEach(t),Lv=i(of),Ei=s(of,"SPAN",{});var k1=a(Ei);Wv=c(k1,"TFSeq2SeqModelOutput"),k1.forEach(t),of.forEach(t),Hp=i(e),ut=s(e,"DIV",{class:!0});var nf=a(ut);f(Bs.$$.fragment,nf),jv=i(nf),Ni=s(nf,"P",{});var A1=a(Ni);Dv=c(A1,`Base class for model encoder\u2019s outputs that also contains : pre-computed hidden states that can speed up sequential decoding.`),A1.forEach(t),nf.forEach(t),Ip=i(e),lt=s(e,"H2",{class:!0});var sf=a(lt);Co=s(sf,"A",{id:!0,class:!0,href:!0});var C1=a(Co);zi=s(C1,"SPAN",{});var E1=a(zi);f(Ls.$$.fragment,E1),E1.forEach(t),C1.forEach(t),Hv=i(sf),Pi=s(sf,"SPAN",{});var N1=a(Pi);Iv=c(N1,"TFCausalLMOutput"),N1.forEach(t),sf.forEach(t),Vp=i(e),pt=s(e,"DIV",{class:!0});var af=a(pt);f(Ws.$$.fragment,af),Vv=i(af),Bi=s(af,"P",{});var z1=a(Bi);Qv=c(z1,"Base class for causal language model (or autoregressive) outputs."),z1.forEach(t),af.forEach(t),Qp=i(e),ct=s(e,"H2",{class:!0});var rf=a(ct);Eo=s(rf,"A",{id:!0,class:!0,href:!0});var P1=a(Eo);Li=s(P1,"SPAN",{});var B1=a(Li);f(js.$$.fragment,B1),B1.forEach(t),P1.forEach(t),Rv=i(rf),Wi=s(rf,"SPAN",{});var L1=a(Wi);Xv=c(L1,"TFCausalLMOutputWithCrossAttentions"),L1.forEach(t),rf.forEach(t),Rp=i(e),ht=s(e,"DIV",{class:!0});var df=a(ht);f(Ds.$$.fragment,df),Uv=i(df),ji=s(df,"P",{});var W1=a(ji);Yv=c(W1,"Base class for causal language model (or autoregressive) outputs."),W1.forEach(t),df.forEach(t),Xp=i(e),ft=s(e,"H2",{class:!0});var uf=a(ft);No=s(uf,"A",{id:!0,class:!0,href:!0});var j1=a(No);Di=s(j1,"SPAN",{});var D1=a(Di);f(Hs.$$.fragment,D1),D1.forEach(t),j1.forEach(t),Gv=i(uf),Hi=s(uf,"SPAN",{});var H1=a(Hi);Jv=c(H1,"TFCausalLMOutputWithPast"),H1.forEach(t),uf.forEach(t),Up=i(e),mt=s(e,"DIV",{class:!0});var lf=a(mt);f(Is.$$.fragment,lf),Kv=i(lf),Ii=s(lf,"P",{});var I1=a(Ii);Zv=c(I1,"Base class for causal language model (or autoregressive) outputs."),I1.forEach(t),lf.forEach(t),Yp=i(e),_t=s(e,"H2",{class:!0});var pf=a(_t);zo=s(pf,"A",{id:!0,class:!0,href:!0});var V1=a(zo);Vi=s(V1,"SPAN",{});var Q1=a(Vi);f(Vs.$$.fragment,Q1),Q1.forEach(t),V1.forEach(t),ey=i(pf),Qi=s(pf,"SPAN",{});var R1=a(Qi);ty=c(R1,"TFMaskedLMOutput"),R1.forEach(t),pf.forEach(t),Gp=i(e),gt=s(e,"DIV",{class:!0});var cf=a(gt);f(Qs.$$.fragment,cf),oy=i(cf),Ri=s(cf,"P",{});var X1=a(Ri);ny=c(X1,"Base class for masked language models outputs."),X1.forEach(t),cf.forEach(t),Jp=i(e),vt=s(e,"H2",{class:!0});var hf=a(vt);Po=s(hf,"A",{id:!0,class:!0,href:!0});var U1=a(Po);Xi=s(U1,"SPAN",{});var Y1=a(Xi);f(Rs.$$.fragment,Y1),Y1.forEach(t),U1.forEach(t),sy=i(hf),Ui=s(hf,"SPAN",{});var G1=a(Ui);ay=c(G1,"TFSeq2SeqLMOutput"),G1.forEach(t),hf.forEach(t),Kp=i(e),yt=s(e,"DIV",{class:!0});var ff=a(yt);f(Xs.$$.fragment,ff),ry=i(ff),Yi=s(ff,"P",{});var J1=a(Yi);dy=c(J1,"Base class for sequence-to-sequence language models outputs."),J1.forEach(t),ff.forEach(t),Zp=i(e),Tt=s(e,"H2",{class:!0});var mf=a(Tt);Bo=s(mf,"A",{id:!0,class:!0,href:!0});var K1=a(Bo);Gi=s(K1,"SPAN",{});var Z1=a(Gi);f(Us.$$.fragment,Z1),Z1.forEach(t),K1.forEach(t),iy=i(mf),Ji=s(mf,"SPAN",{});var ex=a(Ji);uy=c(ex,"TFNextSentencePredictorOutput"),ex.forEach(t),mf.forEach(t),ec=i(e),bt=s(e,"DIV",{class:!0});var _f=a(bt);f(Ys.$$.fragment,_f),ly=i(_f),Ki=s(_f,"P",{});var tx=a(Ki);py=c(tx,"Base class for outputs of models predicting if two sentences are consecutive or not."),tx.forEach(t),_f.forEach(t),tc=i(e),wt=s(e,"H2",{class:!0});var gf=a(wt);Lo=s(gf,"A",{id:!0,class:!0,href:!0});var ox=a(Lo);Zi=s(ox,"SPAN",{});var nx=a(Zi);f(Gs.$$.fragment,nx),nx.forEach(t),ox.forEach(t),cy=i(gf),eu=s(gf,"SPAN",{});var sx=a(eu);hy=c(sx,"TFSequenceClassifierOutput"),sx.forEach(t),gf.forEach(t),oc=i(e),xt=s(e,"DIV",{class:!0});var vf=a(xt);f(Js.$$.fragment,vf),fy=i(vf),tu=s(vf,"P",{});var ax=a(tu);my=c(ax,"Base class for outputs of sentence classification models."),ax.forEach(t),vf.forEach(t),nc=i(e),$t=s(e,"H2",{class:!0});var yf=a($t);Wo=s(yf,"A",{id:!0,class:!0,href:!0});var rx=a(Wo);ou=s(rx,"SPAN",{});var dx=a(ou);f(Ks.$$.fragment,dx),dx.forEach(t),rx.forEach(t),_y=i(yf),nu=s(yf,"SPAN",{});var ix=a(nu);gy=c(ix,"TFSeq2SeqSequenceClassifierOutput"),ix.forEach(t),yf.forEach(t),sc=i(e),Ot=s(e,"DIV",{class:!0});var Tf=a(Ot);f(Zs.$$.fragment,Tf),vy=i(Tf),su=s(Tf,"P",{});var ux=a(su);yy=c(ux,"Base class for outputs of sequence-to-sequence sentence classification models."),ux.forEach(t),Tf.forEach(t),ac=i(e),qt=s(e,"H2",{class:!0});var bf=a(qt);jo=s(bf,"A",{id:!0,class:!0,href:!0});var lx=a(jo);au=s(lx,"SPAN",{});var px=a(au);f(ea.$$.fragment,px),px.forEach(t),lx.forEach(t),Ty=i(bf),ru=s(bf,"SPAN",{});var cx=a(ru);by=c(cx,"TFMultipleChoiceModelOutput"),cx.forEach(t),bf.forEach(t),rc=i(e),Ft=s(e,"DIV",{class:!0});var wf=a(Ft);f(ta.$$.fragment,wf),wy=i(wf),du=s(wf,"P",{});var hx=a(du);xy=c(hx,"Base class for outputs of multiple choice models."),hx.forEach(t),wf.forEach(t),dc=i(e),St=s(e,"H2",{class:!0});var xf=a(St);Do=s(xf,"A",{id:!0,class:!0,href:!0});var fx=a(Do);iu=s(fx,"SPAN",{});var mx=a(iu);f(oa.$$.fragment,mx),mx.forEach(t),fx.forEach(t),$y=i(xf),uu=s(xf,"SPAN",{});var _x=a(uu);Oy=c(_x,"TFTokenClassifierOutput"),_x.forEach(t),xf.forEach(t),ic=i(e),Mt=s(e,"DIV",{class:!0});var $f=a(Mt);f(na.$$.fragment,$f),qy=i($f),lu=s($f,"P",{});var gx=a(lu);Fy=c(gx,"Base class for outputs of token classification models."),gx.forEach(t),$f.forEach(t),uc=i(e),kt=s(e,"H2",{class:!0});var Of=a(kt);Ho=s(Of,"A",{id:!0,class:!0,href:!0});var vx=a(Ho);pu=s(vx,"SPAN",{});var yx=a(pu);f(sa.$$.fragment,yx),yx.forEach(t),vx.forEach(t),Sy=i(Of),cu=s(Of,"SPAN",{});var Tx=a(cu);My=c(Tx,"TFQuestionAnsweringModelOutput"),Tx.forEach(t),Of.forEach(t),lc=i(e),At=s(e,"DIV",{class:!0});var qf=a(At);f(aa.$$.fragment,qf),ky=i(qf),hu=s(qf,"P",{});var bx=a(hu);Ay=c(bx,"Base class for outputs of question answering models."),bx.forEach(t),qf.forEach(t),pc=i(e),Ct=s(e,"H2",{class:!0});var Ff=a(Ct);Io=s(Ff,"A",{id:!0,class:!0,href:!0});var wx=a(Io);fu=s(wx,"SPAN",{});var xx=a(fu);f(ra.$$.fragment,xx),xx.forEach(t),wx.forEach(t),Cy=i(Ff),mu=s(Ff,"SPAN",{});var $x=a(mu);Ey=c($x,"TFSeq2SeqQuestionAnsweringModelOutput"),$x.forEach(t),Ff.forEach(t),cc=i(e),Et=s(e,"DIV",{class:!0});var Sf=a(Et);f(da.$$.fragment,Sf),Ny=i(Sf),_u=s(Sf,"P",{});var Ox=a(_u);zy=c(Ox,"Base class for outputs of sequence-to-sequence question answering models."),Ox.forEach(t),Sf.forEach(t),hc=i(e),Nt=s(e,"H2",{class:!0});var Mf=a(Nt);Vo=s(Mf,"A",{id:!0,class:!0,href:!0});var qx=a(Vo);gu=s(qx,"SPAN",{});var Fx=a(gu);f(ia.$$.fragment,Fx),Fx.forEach(t),qx.forEach(t),Py=i(Mf),vu=s(Mf,"SPAN",{});var Sx=a(vu);By=c(Sx,"FlaxBaseModelOutput"),Sx.forEach(t),Mf.forEach(t),fc=i(e),E=s(e,"DIV",{class:!0});var dr=a(E);f(ua.$$.fragment,dr),Ly=i(dr),yu=s(dr,"P",{});var Mx=a(yu);Wy=c(Mx,"Base class for model\u2019s outputs, with potential hidden states and attentions."),Mx.forEach(t),jy=i(dr),Qo=s(dr,"DIV",{class:!0});var kf=a(Qo);f(la.$$.fragment,kf),Dy=i(kf),Tu=s(kf,"P",{});var kx=a(Tu);Hy=c(kx,"\u201CReturns a new object replacing the specified fields with new values."),kx.forEach(t),kf.forEach(t),dr.forEach(t),mc=i(e),zt=s(e,"H2",{class:!0});var Af=a(zt);Ro=s(Af,"A",{id:!0,class:!0,href:!0});var Ax=a(Ro);bu=s(Ax,"SPAN",{});var Cx=a(bu);f(pa.$$.fragment,Cx),Cx.forEach(t),Ax.forEach(t),Iy=i(Af),wu=s(Af,"SPAN",{});var Ex=a(wu);Vy=c(Ex,"FlaxBaseModelOutputWithPast"),Ex.forEach(t),Af.forEach(t),_c=i(e),N=s(e,"DIV",{class:!0});var ir=a(N);f(ca.$$.fragment,ir),Qy=i(ir),xu=s(ir,"P",{});var Nx=a(xu);Ry=c(Nx,"Base class for model\u2019s outputs, with potential hidden states and attentions."),Nx.forEach(t),Xy=i(ir),Xo=s(ir,"DIV",{class:!0});var Cf=a(Xo);f(ha.$$.fragment,Cf),Uy=i(Cf),$u=s(Cf,"P",{});var zx=a($u);Yy=c(zx,"\u201CReturns a new object replacing the specified fields with new values."),zx.forEach(t),Cf.forEach(t),ir.forEach(t),gc=i(e),Pt=s(e,"H2",{class:!0});var Ef=a(Pt);Uo=s(Ef,"A",{id:!0,class:!0,href:!0});var Px=a(Uo);Ou=s(Px,"SPAN",{});var Bx=a(Ou);f(fa.$$.fragment,Bx),Bx.forEach(t),Px.forEach(t),Gy=i(Ef),qu=s(Ef,"SPAN",{});var Lx=a(qu);Jy=c(Lx,"FlaxBaseModelOutputWithPooling"),Lx.forEach(t),Ef.forEach(t),vc=i(e),z=s(e,"DIV",{class:!0});var ur=a(z);f(ma.$$.fragment,ur),Ky=i(ur),Fu=s(ur,"P",{});var Wx=a(Fu);Zy=c(Wx,"Base class for model\u2019s outputs that also contains a pooling of the last hidden states."),Wx.forEach(t),eT=i(ur),Yo=s(ur,"DIV",{class:!0});var Nf=a(Yo);f(_a.$$.fragment,Nf),tT=i(Nf),Su=s(Nf,"P",{});var jx=a(Su);oT=c(jx,"\u201CReturns a new object replacing the specified fields with new values."),jx.forEach(t),Nf.forEach(t),ur.forEach(t),yc=i(e),Bt=s(e,"H2",{class:!0});var zf=a(Bt);Go=s(zf,"A",{id:!0,class:!0,href:!0});var Dx=a(Go);Mu=s(Dx,"SPAN",{});var Hx=a(Mu);f(ga.$$.fragment,Hx),Hx.forEach(t),Dx.forEach(t),nT=i(zf),ku=s(zf,"SPAN",{});var Ix=a(ku);sT=c(Ix,"FlaxBaseModelOutputWithPastAndCrossAttentions"),Ix.forEach(t),zf.forEach(t),Tc=i(e),P=s(e,"DIV",{class:!0});var lr=a(P);f(va.$$.fragment,lr),aT=i(lr),Au=s(lr,"P",{});var Vx=a(Au);rT=c(Vx,"Base class for model\u2019s outputs that may also contain a past key/values (to speed up sequential decoding)."),Vx.forEach(t),dT=i(lr),Jo=s(lr,"DIV",{class:!0});var Pf=a(Jo);f(ya.$$.fragment,Pf),iT=i(Pf),Cu=s(Pf,"P",{});var Qx=a(Cu);uT=c(Qx,"\u201CReturns a new object replacing the specified fields with new values."),Qx.forEach(t),Pf.forEach(t),lr.forEach(t),bc=i(e),Lt=s(e,"H2",{class:!0});var Bf=a(Lt);Ko=s(Bf,"A",{id:!0,class:!0,href:!0});var Rx=a(Ko);Eu=s(Rx,"SPAN",{});var Xx=a(Eu);f(Ta.$$.fragment,Xx),Xx.forEach(t),Rx.forEach(t),lT=i(Bf),Nu=s(Bf,"SPAN",{});var Ux=a(Nu);pT=c(Ux,"FlaxSeq2SeqModelOutput"),Ux.forEach(t),Bf.forEach(t),wc=i(e),B=s(e,"DIV",{class:!0});var pr=a(B);f(ba.$$.fragment,pr),cT=i(pr),zu=s(pr,"P",{});var Yx=a(zu);hT=c(Yx,`Base class for model encoder\u2019s outputs that also contains : pre-computed hidden states that can speed up sequential decoding.`),Yx.forEach(t),fT=i(pr),Zo=s(pr,"DIV",{class:!0});var Lf=a(Zo);f(wa.$$.fragment,Lf),mT=i(Lf),Pu=s(Lf,"P",{});var Gx=a(Pu);_T=c(Gx,"\u201CReturns a new object replacing the specified fields with new values."),Gx.forEach(t),Lf.forEach(t),pr.forEach(t),xc=i(e),Wt=s(e,"H2",{class:!0});var Wf=a(Wt);en=s(Wf,"A",{id:!0,class:!0,href:!0});var Jx=a(en);Bu=s(Jx,"SPAN",{});var Kx=a(Bu);f(xa.$$.fragment,Kx),Kx.forEach(t),Jx.forEach(t),gT=i(Wf),Lu=s(Wf,"SPAN",{});var Zx=a(Lu);vT=c(Zx,"FlaxCausalLMOutputWithCrossAttentions"),Zx.forEach(t),Wf.forEach(t),$c=i(e),L=s(e,"DIV",{class:!0});var cr=a(L);f($a.$$.fragment,cr),yT=i(cr),Wu=s(cr,"P",{});var e$=a(Wu);TT=c(e$,"Base class for causal language model (or autoregressive) outputs."),e$.forEach(t),bT=i(cr),tn=s(cr,"DIV",{class:!0});var jf=a(tn);f(Oa.$$.fragment,jf),wT=i(jf),ju=s(jf,"P",{});var t$=a(ju);xT=c(t$,"\u201CReturns a new object replacing the specified fields with new values."),t$.forEach(t),jf.forEach(t),cr.forEach(t),Oc=i(e),jt=s(e,"H2",{class:!0});var Df=a(jt);on=s(Df,"A",{id:!0,class:!0,href:!0});var o$=a(on);Du=s(o$,"SPAN",{});var n$=a(Du);f(qa.$$.fragment,n$),n$.forEach(t),o$.forEach(t),$T=i(Df),Hu=s(Df,"SPAN",{});var s$=a(Hu);OT=c(s$,"FlaxMaskedLMOutput"),s$.forEach(t),Df.forEach(t),qc=i(e),W=s(e,"DIV",{class:!0});var hr=a(W);f(Fa.$$.fragment,hr),qT=i(hr),Iu=s(hr,"P",{});var a$=a(Iu);FT=c(a$,"Base class for masked language models outputs."),a$.forEach(t),ST=i(hr),nn=s(hr,"DIV",{class:!0});var Hf=a(nn);f(Sa.$$.fragment,Hf),MT=i(Hf),Vu=s(Hf,"P",{});var r$=a(Vu);kT=c(r$,"\u201CReturns a new object replacing the specified fields with new values."),r$.forEach(t),Hf.forEach(t),hr.forEach(t),Fc=i(e),Dt=s(e,"H2",{class:!0});var If=a(Dt);sn=s(If,"A",{id:!0,class:!0,href:!0});var d$=a(sn);Qu=s(d$,"SPAN",{});var i$=a(Qu);f(Ma.$$.fragment,i$),i$.forEach(t),d$.forEach(t),AT=i(If),Ru=s(If,"SPAN",{});var u$=a(Ru);CT=c(u$,"FlaxSeq2SeqLMOutput"),u$.forEach(t),If.forEach(t),Sc=i(e),j=s(e,"DIV",{class:!0});var fr=a(j);f(ka.$$.fragment,fr),ET=i(fr),Xu=s(fr,"P",{});var l$=a(Xu);NT=c(l$,"Base class for sequence-to-sequence language models outputs."),l$.forEach(t),zT=i(fr),an=s(fr,"DIV",{class:!0});var Vf=a(an);f(Aa.$$.fragment,Vf),PT=i(Vf),Uu=s(Vf,"P",{});var p$=a(Uu);BT=c(p$,"\u201CReturns a new object replacing the specified fields with new values."),p$.forEach(t),Vf.forEach(t),fr.forEach(t),Mc=i(e),Ht=s(e,"H2",{class:!0});var Qf=a(Ht);rn=s(Qf,"A",{id:!0,class:!0,href:!0});var c$=a(rn);Yu=s(c$,"SPAN",{});var h$=a(Yu);f(Ca.$$.fragment,h$),h$.forEach(t),c$.forEach(t),LT=i(Qf),Gu=s(Qf,"SPAN",{});var f$=a(Gu);WT=c(f$,"FlaxNextSentencePredictorOutput"),f$.forEach(t),Qf.forEach(t),kc=i(e),D=s(e,"DIV",{class:!0});var mr=a(D);f(Ea.$$.fragment,mr),jT=i(mr),Ju=s(mr,"P",{});var m$=a(Ju);DT=c(m$,"Base class for outputs of models predicting if two sentences are consecutive or not."),m$.forEach(t),HT=i(mr),dn=s(mr,"DIV",{class:!0});var Rf=a(dn);f(Na.$$.fragment,Rf),IT=i(Rf),Ku=s(Rf,"P",{});var _$=a(Ku);VT=c(_$,"\u201CReturns a new object replacing the specified fields with new values."),_$.forEach(t),Rf.forEach(t),mr.forEach(t),Ac=i(e),It=s(e,"H2",{class:!0});var Xf=a(It);un=s(Xf,"A",{id:!0,class:!0,href:!0});var g$=a(un);Zu=s(g$,"SPAN",{});var v$=a(Zu);f(za.$$.fragment,v$),v$.forEach(t),g$.forEach(t),QT=i(Xf),el=s(Xf,"SPAN",{});var y$=a(el);RT=c(y$,"FlaxSequenceClassifierOutput"),y$.forEach(t),Xf.forEach(t),Cc=i(e),H=s(e,"DIV",{class:!0});var _r=a(H);f(Pa.$$.fragment,_r),XT=i(_r),tl=s(_r,"P",{});var T$=a(tl);UT=c(T$,"Base class for outputs of sentence classification models."),T$.forEach(t),YT=i(_r),ln=s(_r,"DIV",{class:!0});var Uf=a(ln);f(Ba.$$.fragment,Uf),GT=i(Uf),ol=s(Uf,"P",{});var b$=a(ol);JT=c(b$,"\u201CReturns a new object replacing the specified fields with new values."),b$.forEach(t),Uf.forEach(t),_r.forEach(t),Ec=i(e),Vt=s(e,"H2",{class:!0});var Yf=a(Vt);pn=s(Yf,"A",{id:!0,class:!0,href:!0});var w$=a(pn);nl=s(w$,"SPAN",{});var x$=a(nl);f(La.$$.fragment,x$),x$.forEach(t),w$.forEach(t),KT=i(Yf),sl=s(Yf,"SPAN",{});var $$=a(sl);ZT=c($$,"FlaxSeq2SeqSequenceClassifierOutput"),$$.forEach(t),Yf.forEach(t),Nc=i(e),I=s(e,"DIV",{class:!0});var gr=a(I);f(Wa.$$.fragment,gr),e2=i(gr),al=s(gr,"P",{});var O$=a(al);t2=c(O$,"Base class for outputs of sequence-to-sequence sentence classification models."),O$.forEach(t),o2=i(gr),cn=s(gr,"DIV",{class:!0});var Gf=a(cn);f(ja.$$.fragment,Gf),n2=i(Gf),rl=s(Gf,"P",{});var q$=a(rl);s2=c(q$,"\u201CReturns a new object replacing the specified fields with new values."),q$.forEach(t),Gf.forEach(t),gr.forEach(t),zc=i(e),Qt=s(e,"H2",{class:!0});var Jf=a(Qt);hn=s(Jf,"A",{id:!0,class:!0,href:!0});var F$=a(hn);dl=s(F$,"SPAN",{});var S$=a(dl);f(Da.$$.fragment,S$),S$.forEach(t),F$.forEach(t),a2=i(Jf),il=s(Jf,"SPAN",{});var M$=a(il);r2=c(M$,"FlaxMultipleChoiceModelOutput"),M$.forEach(t),Jf.forEach(t),Pc=i(e),V=s(e,"DIV",{class:!0});var vr=a(V);f(Ha.$$.fragment,vr),d2=i(vr),ul=s(vr,"P",{});var k$=a(ul);i2=c(k$,"Base class for outputs of multiple choice models."),k$.forEach(t),u2=i(vr),fn=s(vr,"DIV",{class:!0});var Kf=a(fn);f(Ia.$$.fragment,Kf),l2=i(Kf),ll=s(Kf,"P",{});var A$=a(ll);p2=c(A$,"\u201CReturns a new object replacing the specified fields with new values."),A$.forEach(t),Kf.forEach(t),vr.forEach(t),Bc=i(e),Rt=s(e,"H2",{class:!0});var Zf=a(Rt);mn=s(Zf,"A",{id:!0,class:!0,href:!0});var C$=a(mn);pl=s(C$,"SPAN",{});var E$=a(pl);f(Va.$$.fragment,E$),E$.forEach(t),C$.forEach(t),c2=i(Zf),cl=s(Zf,"SPAN",{});var N$=a(cl);h2=c(N$,"FlaxTokenClassifierOutput"),N$.forEach(t),Zf.forEach(t),Lc=i(e),Q=s(e,"DIV",{class:!0});var yr=a(Q);f(Qa.$$.fragment,yr),f2=i(yr),hl=s(yr,"P",{});var z$=a(hl);m2=c(z$,"Base class for outputs of token classification models."),z$.forEach(t),_2=i(yr),_n=s(yr,"DIV",{class:!0});var em=a(_n);f(Ra.$$.fragment,em),g2=i(em),fl=s(em,"P",{});var P$=a(fl);v2=c(P$,"\u201CReturns a new object replacing the specified fields with new values."),P$.forEach(t),em.forEach(t),yr.forEach(t),Wc=i(e),Xt=s(e,"H2",{class:!0});var tm=a(Xt);gn=s(tm,"A",{id:!0,class:!0,href:!0});var B$=a(gn);ml=s(B$,"SPAN",{});var L$=a(ml);f(Xa.$$.fragment,L$),L$.forEach(t),B$.forEach(t),y2=i(tm),_l=s(tm,"SPAN",{});var W$=a(_l);T2=c(W$,"FlaxQuestionAnsweringModelOutput"),W$.forEach(t),tm.forEach(t),jc=i(e),R=s(e,"DIV",{class:!0});var Tr=a(R);f(Ua.$$.fragment,Tr),b2=i(Tr),gl=s(Tr,"P",{});var j$=a(gl);w2=c(j$,"Base class for outputs of question answering models."),j$.forEach(t),x2=i(Tr),vn=s(Tr,"DIV",{class:!0});var om=a(vn);f(Ya.$$.fragment,om),$2=i(om),vl=s(om,"P",{});var D$=a(vl);O2=c(D$,"\u201CReturns a new object replacing the specified fields with new values."),D$.forEach(t),om.forEach(t),Tr.forEach(t),Dc=i(e),Ut=s(e,"H2",{class:!0});var nm=a(Ut);yn=s(nm,"A",{id:!0,class:!0,href:!0});var H$=a(yn);yl=s(H$,"SPAN",{});var I$=a(yl);f(Ga.$$.fragment,I$),I$.forEach(t),H$.forEach(t),q2=i(nm),Tl=s(nm,"SPAN",{});var V$=a(Tl);F2=c(V$,"FlaxSeq2SeqQuestionAnsweringModelOutput"),V$.forEach(t),nm.forEach(t),Hc=i(e),X=s(e,"DIV",{class:!0});var br=a(X);f(Ja.$$.fragment,br),S2=i(br),bl=s(br,"P",{});var Q$=a(bl);M2=c(Q$,"Base class for outputs of sequence-to-sequence question answering models."),Q$.forEach(t),k2=i(br),Tn=s(br,"DIV",{class:!0});var sm=a(Tn);f(Ka.$$.fragment,sm),A2=i(sm),wl=s(sm,"P",{});var R$=a(wl);C2=c(R$,"\u201CReturns a new object replacing the specified fields with new values."),R$.forEach(t),sm.forEach(t),br.forEach(t),this.h()},h(){r(x,"name","hf:doc:metadata"),r(x,"content",JSON.stringify(oO)),r(A,"id","model-outputs"),r(A,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(A,"href","#model-outputs"),r($,"class","relative group"),r(tr,"href","/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput"),r(nr,"href","/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput"),r(Jt,"id","transformers.utils.ModelOutput"),r(Jt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(Jt,"href","#transformers.utils.ModelOutput"),r(ee,"class","relative group"),r(Zt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(k,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(eo,"id","transformers.modeling_outputs.BaseModelOutput"),r(eo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(eo,"href","#transformers.modeling_outputs.BaseModelOutput"),r(oe,"class","relative group"),r(ne,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(to,"id","transformers.modeling_outputs.BaseModelOutputWithPooling"),r(to,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(to,"href","#transformers.modeling_outputs.BaseModelOutputWithPooling"),r(se,"class","relative group"),r(ae,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(oo,"id","transformers.modeling_outputs.BaseModelOutputWithCrossAttentions"),r(oo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(oo,"href","#transformers.modeling_outputs.BaseModelOutputWithCrossAttentions"),r(re,"class","relative group"),r(de,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(no,"id","transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions"),r(no,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(no,"href","#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions"),r(ie,"class","relative group"),r(ue,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(so,"id","transformers.modeling_outputs.BaseModelOutputWithPast"),r(so,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(so,"href","#transformers.modeling_outputs.BaseModelOutputWithPast"),r(le,"class","relative group"),r(pe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(ao,"id","transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions"),r(ao,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(ao,"href","#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions"),r(ce,"class","relative group"),r(he,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(ro,"id","transformers.modeling_outputs.Seq2SeqModelOutput"),r(ro,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(ro,"href","#transformers.modeling_outputs.Seq2SeqModelOutput"),r(fe,"class","relative group"),r(me,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(io,"id","transformers.modeling_outputs.CausalLMOutput"),r(io,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(io,"href","#transformers.modeling_outputs.CausalLMOutput"),r(_e,"class","relative group"),r(ge,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(uo,"id","transformers.modeling_outputs.CausalLMOutputWithCrossAttentions"),r(uo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(uo,"href","#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions"),r(ve,"class","relative group"),r(ye,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(lo,"id","transformers.modeling_outputs.CausalLMOutputWithPast"),r(lo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(lo,"href","#transformers.modeling_outputs.CausalLMOutputWithPast"),r(Te,"class","relative group"),r(be,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(po,"id","transformers.modeling_outputs.MaskedLMOutput"),r(po,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(po,"href","#transformers.modeling_outputs.MaskedLMOutput"),r(we,"class","relative group"),r(xe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(co,"id","transformers.modeling_outputs.Seq2SeqLMOutput"),r(co,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(co,"href","#transformers.modeling_outputs.Seq2SeqLMOutput"),r($e,"class","relative group"),r(Oe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(ho,"id","transformers.modeling_outputs.NextSentencePredictorOutput"),r(ho,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(ho,"href","#transformers.modeling_outputs.NextSentencePredictorOutput"),r(qe,"class","relative group"),r(Fe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(fo,"id","transformers.modeling_outputs.SequenceClassifierOutput"),r(fo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(fo,"href","#transformers.modeling_outputs.SequenceClassifierOutput"),r(Se,"class","relative group"),r(Me,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(mo,"id","transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput"),r(mo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(mo,"href","#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput"),r(ke,"class","relative group"),r(Ae,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(_o,"id","transformers.modeling_outputs.MultipleChoiceModelOutput"),r(_o,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(_o,"href","#transformers.modeling_outputs.MultipleChoiceModelOutput"),r(Ce,"class","relative group"),r(Ee,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(go,"id","transformers.modeling_outputs.TokenClassifierOutput"),r(go,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(go,"href","#transformers.modeling_outputs.TokenClassifierOutput"),r(Ne,"class","relative group"),r(ze,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(vo,"id","transformers.modeling_outputs.QuestionAnsweringModelOutput"),r(vo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(vo,"href","#transformers.modeling_outputs.QuestionAnsweringModelOutput"),r(Pe,"class","relative group"),r(Be,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(yo,"id","transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput"),r(yo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(yo,"href","#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput"),r(Le,"class","relative group"),r(We,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(To,"id","transformers.modeling_outputs.SemanticSegmenterOutput"),r(To,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(To,"href","#transformers.modeling_outputs.SemanticSegmenterOutput"),r(je,"class","relative group"),r(De,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(bo,"id","transformers.modeling_outputs.ImageClassifierOutput"),r(bo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(bo,"href","#transformers.modeling_outputs.ImageClassifierOutput"),r(He,"class","relative group"),r(Ie,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(wo,"id","transformers.modeling_outputs.ImageClassifierOutputWithNoAttention"),r(wo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(wo,"href","#transformers.modeling_outputs.ImageClassifierOutputWithNoAttention"),r(Ve,"class","relative group"),r(Qe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(xo,"id","transformers.modeling_outputs.DepthEstimatorOutput"),r(xo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(xo,"href","#transformers.modeling_outputs.DepthEstimatorOutput"),r(Re,"class","relative group"),r(Xe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r($o,"id","transformers.modeling_outputs.Wav2Vec2BaseModelOutput"),r($o,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r($o,"href","#transformers.modeling_outputs.Wav2Vec2BaseModelOutput"),r(Ue,"class","relative group"),r(Ye,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(Oo,"id","transformers.modeling_outputs.XVectorOutput"),r(Oo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(Oo,"href","#transformers.modeling_outputs.XVectorOutput"),r(Ge,"class","relative group"),r(ar,"href","/docs/transformers/pr_19429/en/model_doc/wav2vec2#transformers.Wav2Vec2ForXVector"),r(Je,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(qo,"id","transformers.modeling_tf_outputs.TFBaseModelOutput"),r(qo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(qo,"href","#transformers.modeling_tf_outputs.TFBaseModelOutput"),r(Ke,"class","relative group"),r(Ze,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(Fo,"id","transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling"),r(Fo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(Fo,"href","#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling"),r(et,"class","relative group"),r(tt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(So,"id","transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions"),r(So,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(So,"href","#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions"),r(ot,"class","relative group"),r(nt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(Mo,"id","transformers.modeling_tf_outputs.TFBaseModelOutputWithPast"),r(Mo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(Mo,"href","#transformers.modeling_tf_outputs.TFBaseModelOutputWithPast"),r(st,"class","relative group"),r(at,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(ko,"id","transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions"),r(ko,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(ko,"href","#transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions"),r(rt,"class","relative group"),r(dt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(Ao,"id","transformers.modeling_tf_outputs.TFSeq2SeqModelOutput"),r(Ao,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(Ao,"href","#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput"),r(it,"class","relative group"),r(ut,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(Co,"id","transformers.modeling_tf_outputs.TFCausalLMOutput"),r(Co,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(Co,"href","#transformers.modeling_tf_outputs.TFCausalLMOutput"),r(lt,"class","relative group"),r(pt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(Eo,"id","transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions"),r(Eo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(Eo,"href","#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions"),r(ct,"class","relative group"),r(ht,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(No,"id","transformers.modeling_tf_outputs.TFCausalLMOutputWithPast"),r(No,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(No,"href","#transformers.modeling_tf_outputs.TFCausalLMOutputWithPast"),r(ft,"class","relative group"),r(mt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(zo,"id","transformers.modeling_tf_outputs.TFMaskedLMOutput"),r(zo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(zo,"href","#transformers.modeling_tf_outputs.TFMaskedLMOutput"),r(_t,"class","relative group"),r(gt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(Po,"id","transformers.modeling_tf_outputs.TFSeq2SeqLMOutput"),r(Po,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(Po,"href","#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput"),r(vt,"class","relative group"),r(yt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(Bo,"id","transformers.modeling_tf_outputs.TFNextSentencePredictorOutput"),r(Bo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(Bo,"href","#transformers.modeling_tf_outputs.TFNextSentencePredictorOutput"),r(Tt,"class","relative group"),r(bt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(Lo,"id","transformers.modeling_tf_outputs.TFSequenceClassifierOutput"),r(Lo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(Lo,"href","#transformers.modeling_tf_outputs.TFSequenceClassifierOutput"),r(wt,"class","relative group"),r(xt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(Wo,"id","transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput"),r(Wo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(Wo,"href","#transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput"),r($t,"class","relative group"),r(Ot,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(jo,"id","transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput"),r(jo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(jo,"href","#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput"),r(qt,"class","relative group"),r(Ft,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(Do,"id","transformers.modeling_tf_outputs.TFTokenClassifierOutput"),r(Do,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(Do,"href","#transformers.modeling_tf_outputs.TFTokenClassifierOutput"),r(St,"class","relative group"),r(Mt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(Ho,"id","transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput"),r(Ho,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(Ho,"href","#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput"),r(kt,"class","relative group"),r(At,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(Io,"id","transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput"),r(Io,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(Io,"href","#transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput"),r(Ct,"class","relative group"),r(Et,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(Vo,"id","transformers.modeling_flax_outputs.FlaxBaseModelOutput"),r(Vo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(Vo,"href","#transformers.modeling_flax_outputs.FlaxBaseModelOutput"),r(Nt,"class","relative group"),r(Qo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(E,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(Ro,"id","transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast"),r(Ro,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(Ro,"href","#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast"),r(zt,"class","relative group"),r(Xo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(N,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(Uo,"id","transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling"),r(Uo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(Uo,"href","#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling"),r(Pt,"class","relative group"),r(Yo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(z,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(Go,"id","transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions"),r(Go,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(Go,"href","#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions"),r(Bt,"class","relative group"),r(Jo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(P,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(Ko,"id","transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput"),r(Ko,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(Ko,"href","#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput"),r(Lt,"class","relative group"),r(Zo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(B,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(en,"id","transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions"),r(en,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(en,"href","#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions"),r(Wt,"class","relative group"),r(tn,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(L,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(on,"id","transformers.modeling_flax_outputs.FlaxMaskedLMOutput"),r(on,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(on,"href","#transformers.modeling_flax_outputs.FlaxMaskedLMOutput"),r(jt,"class","relative group"),r(nn,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(W,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(sn,"id","transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput"),r(sn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(sn,"href","#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput"),r(Dt,"class","relative group"),r(an,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(j,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(rn,"id","transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput"),r(rn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(rn,"href","#transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput"),r(Ht,"class","relative group"),r(dn,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(D,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(un,"id","transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput"),r(un,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(un,"href","#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput"),r(It,"class","relative group"),r(ln,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(H,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(pn,"id","transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput"),r(pn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(pn,"href","#transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput"),r(Vt,"class","relative group"),r(cn,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(I,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(hn,"id","transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput"),r(hn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(hn,"href","#transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput"),r(Qt,"class","relative group"),r(fn,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(V,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(mn,"id","transformers.modeling_flax_outputs.FlaxTokenClassifierOutput"),r(mn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(mn,"href","#transformers.modeling_flax_outputs.FlaxTokenClassifierOutput"),r(Rt,"class","relative group"),r(_n,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(Q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(gn,"id","transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput"),r(gn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(gn,"href","#transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput"),r(Xt,"class","relative group"),r(vn,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(R,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(yn,"id","transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput"),r(yn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(yn,"href","#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput"),r(Ut,"class","relative group"),r(Tn,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),r(X,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,u){o(document.head,x),l(e,Yt,u),l(e,$,u),o($,A),o(A,J),m(O,J,null),o($,wn),o($,K),o(K,Z),l(e,M,u),l(e,C,u),o(C,er),o(C,tr),o(tr,am),o(C,rm),l(e,$l,u),l(e,or,u),o(or,dm),l(e,Ol,u),m(xn,e,u),l(e,ql,u),l(e,b,u),o(b,im),o(b,wr),o(wr,um),o(b,lm),o(b,nr),o(nr,pm),o(b,cm),o(b,xr),o(xr,hm),o(b,fm),o(b,$r),o($r,mm),o(b,_m),o(b,Or),o(Or,gm),o(b,vm),o(b,qr),o(qr,ym),o(b,Tm),o(b,Fr),o(Fr,bm),o(b,wm),o(b,Sr),o(Sr,xm),o(b,$m),o(b,Mr),o(Mr,Om),o(b,qm),o(b,kr),o(kr,Fm),o(b,Sm),o(b,Ar),o(Ar,Mm),o(b,km),o(b,Cr),o(Cr,Am),o(b,Cm),l(e,Fl,u),l(e,q,u),o(q,Em),o(q,Er),o(Er,Nm),o(q,zm),o(q,Nr),o(Nr,Pm),o(q,Bm),o(q,zr),o(zr,Lm),o(q,Wm),o(q,Pr),o(Pr,jm),o(q,Dm),l(e,Sl,u),l(e,F,u),o(F,Hm),o(F,Br),o(Br,Im),o(F,Vm),o(F,Lr),o(Lr,Qm),o(F,Rm),o(F,Wr),o(Wr,Xm),o(F,Um),o(F,jr),o(jr,Ym),o(F,Gm),l(e,Ml,u),m($n,e,u),l(e,kl,u),l(e,Gt,u),o(Gt,Jm),o(Gt,Dr),o(Dr,Km),o(Gt,Zm),l(e,Al,u),l(e,S,u),o(S,e_),o(S,Hr),o(Hr,t_),o(S,o_),o(S,Ir),o(Ir,n_),o(S,s_),o(S,Vr),o(Vr,a_),o(S,r_),o(S,Qr),o(Qr,d_),o(S,i_),l(e,Cl,u),l(e,sr,u),o(sr,u_),l(e,El,u),l(e,ee,u),o(ee,Jt),o(Jt,Rr),m(On,Rr,null),o(ee,l_),o(ee,Xr),o(Xr,p_),l(e,Nl,u),l(e,k,u),m(qn,k,null),o(k,c_),o(k,te),o(te,h_),o(te,Ur),o(Ur,f_),o(te,m_),o(te,Yr),o(Yr,__),o(te,g_),o(k,v_),m(Kt,k,null),o(k,y_),o(k,Zt),m(Fn,Zt,null),o(Zt,T_),o(Zt,Sn),o(Sn,b_),o(Sn,Gr),o(Gr,w_),o(Sn,x_),l(e,zl,u),l(e,oe,u),o(oe,eo),o(eo,Jr),m(Mn,Jr,null),o(oe,$_),o(oe,Kr),o(Kr,O_),l(e,Pl,u),l(e,ne,u),m(kn,ne,null),o(ne,q_),o(ne,Zr),o(Zr,F_),l(e,Bl,u),l(e,se,u),o(se,to),o(to,ed),m(An,ed,null),o(se,S_),o(se,td),o(td,M_),l(e,Ll,u),l(e,ae,u),m(Cn,ae,null),o(ae,k_),o(ae,od),o(od,A_),l(e,Wl,u),l(e,re,u),o(re,oo),o(oo,nd),m(En,nd,null),o(re,C_),o(re,sd),o(sd,E_),l(e,jl,u),l(e,de,u),m(Nn,de,null),o(de,N_),o(de,ad),o(ad,z_),l(e,Dl,u),l(e,ie,u),o(ie,no),o(no,rd),m(zn,rd,null),o(ie,P_),o(ie,dd),o(dd,B_),l(e,Hl,u),l(e,ue,u),m(Pn,ue,null),o(ue,L_),o(ue,id),o(id,W_),l(e,Il,u),l(e,le,u),o(le,so),o(so,ud),m(Bn,ud,null),o(le,j_),o(le,ld),o(ld,D_),l(e,Vl,u),l(e,pe,u),m(Ln,pe,null),o(pe,H_),o(pe,pd),o(pd,I_),l(e,Ql,u),l(e,ce,u),o(ce,ao),o(ao,cd),m(Wn,cd,null),o(ce,V_),o(ce,hd),o(hd,Q_),l(e,Rl,u),l(e,he,u),m(jn,he,null),o(he,R_),o(he,fd),o(fd,X_),l(e,Xl,u),l(e,fe,u),o(fe,ro),o(ro,md),m(Dn,md,null),o(fe,U_),o(fe,_d),o(_d,Y_),l(e,Ul,u),l(e,me,u),m(Hn,me,null),o(me,G_),o(me,gd),o(gd,J_),l(e,Yl,u),l(e,_e,u),o(_e,io),o(io,vd),m(In,vd,null),o(_e,K_),o(_e,yd),o(yd,Z_),l(e,Gl,u),l(e,ge,u),m(Vn,ge,null),o(ge,eg),o(ge,Td),o(Td,tg),l(e,Jl,u),l(e,ve,u),o(ve,uo),o(uo,bd),m(Qn,bd,null),o(ve,og),o(ve,wd),o(wd,ng),l(e,Kl,u),l(e,ye,u),m(Rn,ye,null),o(ye,sg),o(ye,xd),o(xd,ag),l(e,Zl,u),l(e,Te,u),o(Te,lo),o(lo,$d),m(Xn,$d,null),o(Te,rg),o(Te,Od),o(Od,dg),l(e,ep,u),l(e,be,u),m(Un,be,null),o(be,ig),o(be,qd),o(qd,ug),l(e,tp,u),l(e,we,u),o(we,po),o(po,Fd),m(Yn,Fd,null),o(we,lg),o(we,Sd),o(Sd,pg),l(e,op,u),l(e,xe,u),m(Gn,xe,null),o(xe,cg),o(xe,Md),o(Md,hg),l(e,np,u),l(e,$e,u),o($e,co),o(co,kd),m(Jn,kd,null),o($e,fg),o($e,Ad),o(Ad,mg),l(e,sp,u),l(e,Oe,u),m(Kn,Oe,null),o(Oe,_g),o(Oe,Cd),o(Cd,gg),l(e,ap,u),l(e,qe,u),o(qe,ho),o(ho,Ed),m(Zn,Ed,null),o(qe,vg),o(qe,Nd),o(Nd,yg),l(e,rp,u),l(e,Fe,u),m(es,Fe,null),o(Fe,Tg),o(Fe,zd),o(zd,bg),l(e,dp,u),l(e,Se,u),o(Se,fo),o(fo,Pd),m(ts,Pd,null),o(Se,wg),o(Se,Bd),o(Bd,xg),l(e,ip,u),l(e,Me,u),m(os,Me,null),o(Me,$g),o(Me,Ld),o(Ld,Og),l(e,up,u),l(e,ke,u),o(ke,mo),o(mo,Wd),m(ns,Wd,null),o(ke,qg),o(ke,jd),o(jd,Fg),l(e,lp,u),l(e,Ae,u),m(ss,Ae,null),o(Ae,Sg),o(Ae,Dd),o(Dd,Mg),l(e,pp,u),l(e,Ce,u),o(Ce,_o),o(_o,Hd),m(as,Hd,null),o(Ce,kg),o(Ce,Id),o(Id,Ag),l(e,cp,u),l(e,Ee,u),m(rs,Ee,null),o(Ee,Cg),o(Ee,Vd),o(Vd,Eg),l(e,hp,u),l(e,Ne,u),o(Ne,go),o(go,Qd),m(ds,Qd,null),o(Ne,Ng),o(Ne,Rd),o(Rd,zg),l(e,fp,u),l(e,ze,u),m(is,ze,null),o(ze,Pg),o(ze,Xd),o(Xd,Bg),l(e,mp,u),l(e,Pe,u),o(Pe,vo),o(vo,Ud),m(us,Ud,null),o(Pe,Lg),o(Pe,Yd),o(Yd,Wg),l(e,_p,u),l(e,Be,u),m(ls,Be,null),o(Be,jg),o(Be,Gd),o(Gd,Dg),l(e,gp,u),l(e,Le,u),o(Le,yo),o(yo,Jd),m(ps,Jd,null),o(Le,Hg),o(Le,Kd),o(Kd,Ig),l(e,vp,u),l(e,We,u),m(cs,We,null),o(We,Vg),o(We,Zd),o(Zd,Qg),l(e,yp,u),l(e,je,u),o(je,To),o(To,ei),m(hs,ei,null),o(je,Rg),o(je,ti),o(ti,Xg),l(e,Tp,u),l(e,De,u),m(fs,De,null),o(De,Ug),o(De,oi),o(oi,Yg),l(e,bp,u),l(e,He,u),o(He,bo),o(bo,ni),m(ms,ni,null),o(He,Gg),o(He,si),o(si,Jg),l(e,wp,u),l(e,Ie,u),m(_s,Ie,null),o(Ie,Kg),o(Ie,ai),o(ai,Zg),l(e,xp,u),l(e,Ve,u),o(Ve,wo),o(wo,ri),m(gs,ri,null),o(Ve,ev),o(Ve,di),o(di,tv),l(e,$p,u),l(e,Qe,u),m(vs,Qe,null),o(Qe,ov),o(Qe,ii),o(ii,nv),l(e,Op,u),l(e,Re,u),o(Re,xo),o(xo,ui),m(ys,ui,null),o(Re,sv),o(Re,li),o(li,av),l(e,qp,u),l(e,Xe,u),m(Ts,Xe,null),o(Xe,rv),o(Xe,pi),o(pi,dv),l(e,Fp,u),l(e,Ue,u),o(Ue,$o),o($o,ci),m(bs,ci,null),o(Ue,iv),o(Ue,hi),o(hi,uv),l(e,Sp,u),l(e,Ye,u),m(ws,Ye,null),o(Ye,lv),o(Ye,fi),o(fi,pv),l(e,Mp,u),l(e,Ge,u),o(Ge,Oo),o(Oo,mi),m(xs,mi,null),o(Ge,cv),o(Ge,_i),o(_i,hv),l(e,kp,u),l(e,Je,u),m($s,Je,null),o(Je,fv),o(Je,Os),o(Os,mv),o(Os,ar),o(ar,_v),o(Os,gv),l(e,Ap,u),l(e,Ke,u),o(Ke,qo),o(qo,gi),m(qs,gi,null),o(Ke,vv),o(Ke,vi),o(vi,yv),l(e,Cp,u),l(e,Ze,u),m(Fs,Ze,null),o(Ze,Tv),o(Ze,yi),o(yi,bv),l(e,Ep,u),l(e,et,u),o(et,Fo),o(Fo,Ti),m(Ss,Ti,null),o(et,wv),o(et,bi),o(bi,xv),l(e,Np,u),l(e,tt,u),m(Ms,tt,null),o(tt,$v),o(tt,wi),o(wi,Ov),l(e,zp,u),l(e,ot,u),o(ot,So),o(So,xi),m(ks,xi,null),o(ot,qv),o(ot,$i),o($i,Fv),l(e,Pp,u),l(e,nt,u),m(As,nt,null),o(nt,Sv),o(nt,Oi),o(Oi,Mv),l(e,Bp,u),l(e,st,u),o(st,Mo),o(Mo,qi),m(Cs,qi,null),o(st,kv),o(st,Fi),o(Fi,Av),l(e,Lp,u),l(e,at,u),m(Es,at,null),o(at,Cv),o(at,Si),o(Si,Ev),l(e,Wp,u),l(e,rt,u),o(rt,ko),o(ko,Mi),m(Ns,Mi,null),o(rt,Nv),o(rt,ki),o(ki,zv),l(e,jp,u),l(e,dt,u),m(zs,dt,null),o(dt,Pv),o(dt,Ai),o(Ai,Bv),l(e,Dp,u),l(e,it,u),o(it,Ao),o(Ao,Ci),m(Ps,Ci,null),o(it,Lv),o(it,Ei),o(Ei,Wv),l(e,Hp,u),l(e,ut,u),m(Bs,ut,null),o(ut,jv),o(ut,Ni),o(Ni,Dv),l(e,Ip,u),l(e,lt,u),o(lt,Co),o(Co,zi),m(Ls,zi,null),o(lt,Hv),o(lt,Pi),o(Pi,Iv),l(e,Vp,u),l(e,pt,u),m(Ws,pt,null),o(pt,Vv),o(pt,Bi),o(Bi,Qv),l(e,Qp,u),l(e,ct,u),o(ct,Eo),o(Eo,Li),m(js,Li,null),o(ct,Rv),o(ct,Wi),o(Wi,Xv),l(e,Rp,u),l(e,ht,u),m(Ds,ht,null),o(ht,Uv),o(ht,ji),o(ji,Yv),l(e,Xp,u),l(e,ft,u),o(ft,No),o(No,Di),m(Hs,Di,null),o(ft,Gv),o(ft,Hi),o(Hi,Jv),l(e,Up,u),l(e,mt,u),m(Is,mt,null),o(mt,Kv),o(mt,Ii),o(Ii,Zv),l(e,Yp,u),l(e,_t,u),o(_t,zo),o(zo,Vi),m(Vs,Vi,null),o(_t,ey),o(_t,Qi),o(Qi,ty),l(e,Gp,u),l(e,gt,u),m(Qs,gt,null),o(gt,oy),o(gt,Ri),o(Ri,ny),l(e,Jp,u),l(e,vt,u),o(vt,Po),o(Po,Xi),m(Rs,Xi,null),o(vt,sy),o(vt,Ui),o(Ui,ay),l(e,Kp,u),l(e,yt,u),m(Xs,yt,null),o(yt,ry),o(yt,Yi),o(Yi,dy),l(e,Zp,u),l(e,Tt,u),o(Tt,Bo),o(Bo,Gi),m(Us,Gi,null),o(Tt,iy),o(Tt,Ji),o(Ji,uy),l(e,ec,u),l(e,bt,u),m(Ys,bt,null),o(bt,ly),o(bt,Ki),o(Ki,py),l(e,tc,u),l(e,wt,u),o(wt,Lo),o(Lo,Zi),m(Gs,Zi,null),o(wt,cy),o(wt,eu),o(eu,hy),l(e,oc,u),l(e,xt,u),m(Js,xt,null),o(xt,fy),o(xt,tu),o(tu,my),l(e,nc,u),l(e,$t,u),o($t,Wo),o(Wo,ou),m(Ks,ou,null),o($t,_y),o($t,nu),o(nu,gy),l(e,sc,u),l(e,Ot,u),m(Zs,Ot,null),o(Ot,vy),o(Ot,su),o(su,yy),l(e,ac,u),l(e,qt,u),o(qt,jo),o(jo,au),m(ea,au,null),o(qt,Ty),o(qt,ru),o(ru,by),l(e,rc,u),l(e,Ft,u),m(ta,Ft,null),o(Ft,wy),o(Ft,du),o(du,xy),l(e,dc,u),l(e,St,u),o(St,Do),o(Do,iu),m(oa,iu,null),o(St,$y),o(St,uu),o(uu,Oy),l(e,ic,u),l(e,Mt,u),m(na,Mt,null),o(Mt,qy),o(Mt,lu),o(lu,Fy),l(e,uc,u),l(e,kt,u),o(kt,Ho),o(Ho,pu),m(sa,pu,null),o(kt,Sy),o(kt,cu),o(cu,My),l(e,lc,u),l(e,At,u),m(aa,At,null),o(At,ky),o(At,hu),o(hu,Ay),l(e,pc,u),l(e,Ct,u),o(Ct,Io),o(Io,fu),m(ra,fu,null),o(Ct,Cy),o(Ct,mu),o(mu,Ey),l(e,cc,u),l(e,Et,u),m(da,Et,null),o(Et,Ny),o(Et,_u),o(_u,zy),l(e,hc,u),l(e,Nt,u),o(Nt,Vo),o(Vo,gu),m(ia,gu,null),o(Nt,Py),o(Nt,vu),o(vu,By),l(e,fc,u),l(e,E,u),m(ua,E,null),o(E,Ly),o(E,yu),o(yu,Wy),o(E,jy),o(E,Qo),m(la,Qo,null),o(Qo,Dy),o(Qo,Tu),o(Tu,Hy),l(e,mc,u),l(e,zt,u),o(zt,Ro),o(Ro,bu),m(pa,bu,null),o(zt,Iy),o(zt,wu),o(wu,Vy),l(e,_c,u),l(e,N,u),m(ca,N,null),o(N,Qy),o(N,xu),o(xu,Ry),o(N,Xy),o(N,Xo),m(ha,Xo,null),o(Xo,Uy),o(Xo,$u),o($u,Yy),l(e,gc,u),l(e,Pt,u),o(Pt,Uo),o(Uo,Ou),m(fa,Ou,null),o(Pt,Gy),o(Pt,qu),o(qu,Jy),l(e,vc,u),l(e,z,u),m(ma,z,null),o(z,Ky),o(z,Fu),o(Fu,Zy),o(z,eT),o(z,Yo),m(_a,Yo,null),o(Yo,tT),o(Yo,Su),o(Su,oT),l(e,yc,u),l(e,Bt,u),o(Bt,Go),o(Go,Mu),m(ga,Mu,null),o(Bt,nT),o(Bt,ku),o(ku,sT),l(e,Tc,u),l(e,P,u),m(va,P,null),o(P,aT),o(P,Au),o(Au,rT),o(P,dT),o(P,Jo),m(ya,Jo,null),o(Jo,iT),o(Jo,Cu),o(Cu,uT),l(e,bc,u),l(e,Lt,u),o(Lt,Ko),o(Ko,Eu),m(Ta,Eu,null),o(Lt,lT),o(Lt,Nu),o(Nu,pT),l(e,wc,u),l(e,B,u),m(ba,B,null),o(B,cT),o(B,zu),o(zu,hT),o(B,fT),o(B,Zo),m(wa,Zo,null),o(Zo,mT),o(Zo,Pu),o(Pu,_T),l(e,xc,u),l(e,Wt,u),o(Wt,en),o(en,Bu),m(xa,Bu,null),o(Wt,gT),o(Wt,Lu),o(Lu,vT),l(e,$c,u),l(e,L,u),m($a,L,null),o(L,yT),o(L,Wu),o(Wu,TT),o(L,bT),o(L,tn),m(Oa,tn,null),o(tn,wT),o(tn,ju),o(ju,xT),l(e,Oc,u),l(e,jt,u),o(jt,on),o(on,Du),m(qa,Du,null),o(jt,$T),o(jt,Hu),o(Hu,OT),l(e,qc,u),l(e,W,u),m(Fa,W,null),o(W,qT),o(W,Iu),o(Iu,FT),o(W,ST),o(W,nn),m(Sa,nn,null),o(nn,MT),o(nn,Vu),o(Vu,kT),l(e,Fc,u),l(e,Dt,u),o(Dt,sn),o(sn,Qu),m(Ma,Qu,null),o(Dt,AT),o(Dt,Ru),o(Ru,CT),l(e,Sc,u),l(e,j,u),m(ka,j,null),o(j,ET),o(j,Xu),o(Xu,NT),o(j,zT),o(j,an),m(Aa,an,null),o(an,PT),o(an,Uu),o(Uu,BT),l(e,Mc,u),l(e,Ht,u),o(Ht,rn),o(rn,Yu),m(Ca,Yu,null),o(Ht,LT),o(Ht,Gu),o(Gu,WT),l(e,kc,u),l(e,D,u),m(Ea,D,null),o(D,jT),o(D,Ju),o(Ju,DT),o(D,HT),o(D,dn),m(Na,dn,null),o(dn,IT),o(dn,Ku),o(Ku,VT),l(e,Ac,u),l(e,It,u),o(It,un),o(un,Zu),m(za,Zu,null),o(It,QT),o(It,el),o(el,RT),l(e,Cc,u),l(e,H,u),m(Pa,H,null),o(H,XT),o(H,tl),o(tl,UT),o(H,YT),o(H,ln),m(Ba,ln,null),o(ln,GT),o(ln,ol),o(ol,JT),l(e,Ec,u),l(e,Vt,u),o(Vt,pn),o(pn,nl),m(La,nl,null),o(Vt,KT),o(Vt,sl),o(sl,ZT),l(e,Nc,u),l(e,I,u),m(Wa,I,null),o(I,e2),o(I,al),o(al,t2),o(I,o2),o(I,cn),m(ja,cn,null),o(cn,n2),o(cn,rl),o(rl,s2),l(e,zc,u),l(e,Qt,u),o(Qt,hn),o(hn,dl),m(Da,dl,null),o(Qt,a2),o(Qt,il),o(il,r2),l(e,Pc,u),l(e,V,u),m(Ha,V,null),o(V,d2),o(V,ul),o(ul,i2),o(V,u2),o(V,fn),m(Ia,fn,null),o(fn,l2),o(fn,ll),o(ll,p2),l(e,Bc,u),l(e,Rt,u),o(Rt,mn),o(mn,pl),m(Va,pl,null),o(Rt,c2),o(Rt,cl),o(cl,h2),l(e,Lc,u),l(e,Q,u),m(Qa,Q,null),o(Q,f2),o(Q,hl),o(hl,m2),o(Q,_2),o(Q,_n),m(Ra,_n,null),o(_n,g2),o(_n,fl),o(fl,v2),l(e,Wc,u),l(e,Xt,u),o(Xt,gn),o(gn,ml),m(Xa,ml,null),o(Xt,y2),o(Xt,_l),o(_l,T2),l(e,jc,u),l(e,R,u),m(Ua,R,null),o(R,b2),o(R,gl),o(gl,w2),o(R,x2),o(R,vn),m(Ya,vn,null),o(vn,$2),o(vn,vl),o(vl,O2),l(e,Dc,u),l(e,Ut,u),o(Ut,yn),o(yn,yl),m(Ga,yl,null),o(Ut,q2),o(Ut,Tl),o(Tl,F2),l(e,Hc,u),l(e,X,u),m(Ja,X,null),o(X,S2),o(X,bl),o(bl,M2),o(X,k2),o(X,Tn),m(Ka,Tn,null),o(Tn,A2),o(Tn,wl),o(wl,C2),Ic=!0},p(e,[u]){const Za={};u&2&&(Za.$$scope={dirty:u,ctx:e}),Kt.$set(Za)},i(e){Ic||(_(O.$$.fragment,e),_(xn.$$.fragment,e),_($n.$$.fragment,e),_(On.$$.fragment,e),_(qn.$$.fragment,e),_(Kt.$$.fragment,e),_(Fn.$$.fragment,e),_(Mn.$$.fragment,e),_(kn.$$.fragment,e),_(An.$$.fragment,e),_(Cn.$$.fragment,e),_(En.$$.fragment,e),_(Nn.$$.fragment,e),_(zn.$$.fragment,e),_(Pn.$$.fragment,e),_(Bn.$$.fragment,e),_(Ln.$$.fragment,e),_(Wn.$$.fragment,e),_(jn.$$.fragment,e),_(Dn.$$.fragment,e),_(Hn.$$.fragment,e),_(In.$$.fragment,e),_(Vn.$$.fragment,e),_(Qn.$$.fragment,e),_(Rn.$$.fragment,e),_(Xn.$$.fragment,e),_(Un.$$.fragment,e),_(Yn.$$.fragment,e),_(Gn.$$.fragment,e),_(Jn.$$.fragment,e),_(Kn.$$.fragment,e),_(Zn.$$.fragment,e),_(es.$$.fragment,e),_(ts.$$.fragment,e),_(os.$$.fragment,e),_(ns.$$.fragment,e),_(ss.$$.fragment,e),_(as.$$.fragment,e),_(rs.$$.fragment,e),_(ds.$$.fragment,e),_(is.$$.fragment,e),_(us.$$.fragment,e),_(ls.$$.fragment,e),_(ps.$$.fragment,e),_(cs.$$.fragment,e),_(hs.$$.fragment,e),_(fs.$$.fragment,e),_(ms.$$.fragment,e),_(_s.$$.fragment,e),_(gs.$$.fragment,e),_(vs.$$.fragment,e),_(ys.$$.fragment,e),_(Ts.$$.fragment,e),_(bs.$$.fragment,e),_(ws.$$.fragment,e),_(xs.$$.fragment,e),_($s.$$.fragment,e),_(qs.$$.fragment,e),_(Fs.$$.fragment,e),_(Ss.$$.fragment,e),_(Ms.$$.fragment,e),_(ks.$$.fragment,e),_(As.$$.fragment,e),_(Cs.$$.fragment,e),_(Es.$$.fragment,e),_(Ns.$$.fragment,e),_(zs.$$.fragment,e),_(Ps.$$.fragment,e),_(Bs.$$.fragment,e),_(Ls.$$.fragment,e),_(Ws.$$.fragment,e),_(js.$$.fragment,e),_(Ds.$$.fragment,e),_(Hs.$$.fragment,e),_(Is.$$.fragment,e),_(Vs.$$.fragment,e),_(Qs.$$.fragment,e),_(Rs.$$.fragment,e),_(Xs.$$.fragment,e),_(Us.$$.fragment,e),_(Ys.$$.fragment,e),_(Gs.$$.fragment,e),_(Js.$$.fragment,e),_(Ks.$$.fragment,e),_(Zs.$$.fragment,e),_(ea.$$.fragment,e),_(ta.$$.fragment,e),_(oa.$$.fragment,e),_(na.$$.fragment,e),_(sa.$$.fragment,e),_(aa.$$.fragment,e),_(ra.$$.fragment,e),_(da.$$.fragment,e),_(ia.$$.fragment,e),_(ua.$$.fragment,e),_(la.$$.fragment,e),_(pa.$$.fragment,e),_(ca.$$.fragment,e),_(ha.$$.fragment,e),_(fa.$$.fragment,e),_(ma.$$.fragment,e),_(_a.$$.fragment,e),_(ga.$$.fragment,e),_(va.$$.fragment,e),_(ya.$$.fragment,e),_(Ta.$$.fragment,e),_(ba.$$.fragment,e),_(wa.$$.fragment,e),_(xa.$$.fragment,e),_($a.$$.fragment,e),_(Oa.$$.fragment,e),_(qa.$$.fragment,e),_(Fa.$$.fragment,e),_(Sa.$$.fragment,e),_(Ma.$$.fragment,e),_(ka.$$.fragment,e),_(Aa.$$.fragment,e),_(Ca.$$.fragment,e),_(Ea.$$.fragment,e),_(Na.$$.fragment,e),_(za.$$.fragment,e),_(Pa.$$.fragment,e),_(Ba.$$.fragment,e),_(La.$$.fragment,e),_(Wa.$$.fragment,e),_(ja.$$.fragment,e),_(Da.$$.fragment,e),_(Ha.$$.fragment,e),_(Ia.$$.fragment,e),_(Va.$$.fragment,e),_(Qa.$$.fragment,e),_(Ra.$$.fragment,e),_(Xa.$$.fragment,e),_(Ua.$$.fragment,e),_(Ya.$$.fragment,e),_(Ga.$$.fragment,e),_(Ja.$$.fragment,e),_(Ka.$$.fragment,e),Ic=!0)},o(e){g(O.$$.fragment,e),g(xn.$$.fragment,e),g($n.$$.fragment,e),g(On.$$.fragment,e),g(qn.$$.fragment,e),g(Kt.$$.fragment,e),g(Fn.$$.fragment,e),g(Mn.$$.fragment,e),g(kn.$$.fragment,e),g(An.$$.fragment,e),g(Cn.$$.fragment,e),g(En.$$.fragment,e),g(Nn.$$.fragment,e),g(zn.$$.fragment,e),g(Pn.$$.fragment,e),g(Bn.$$.fragment,e),g(Ln.$$.fragment,e),g(Wn.$$.fragment,e),g(jn.$$.fragment,e),g(Dn.$$.fragment,e),g(Hn.$$.fragment,e),g(In.$$.fragment,e),g(Vn.$$.fragment,e),g(Qn.$$.fragment,e),g(Rn.$$.fragment,e),g(Xn.$$.fragment,e),g(Un.$$.fragment,e),g(Yn.$$.fragment,e),g(Gn.$$.fragment,e),g(Jn.$$.fragment,e),g(Kn.$$.fragment,e),g(Zn.$$.fragment,e),g(es.$$.fragment,e),g(ts.$$.fragment,e),g(os.$$.fragment,e),g(ns.$$.fragment,e),g(ss.$$.fragment,e),g(as.$$.fragment,e),g(rs.$$.fragment,e),g(ds.$$.fragment,e),g(is.$$.fragment,e),g(us.$$.fragment,e),g(ls.$$.fragment,e),g(ps.$$.fragment,e),g(cs.$$.fragment,e),g(hs.$$.fragment,e),g(fs.$$.fragment,e),g(ms.$$.fragment,e),g(_s.$$.fragment,e),g(gs.$$.fragment,e),g(vs.$$.fragment,e),g(ys.$$.fragment,e),g(Ts.$$.fragment,e),g(bs.$$.fragment,e),g(ws.$$.fragment,e),g(xs.$$.fragment,e),g($s.$$.fragment,e),g(qs.$$.fragment,e),g(Fs.$$.fragment,e),g(Ss.$$.fragment,e),g(Ms.$$.fragment,e),g(ks.$$.fragment,e),g(As.$$.fragment,e),g(Cs.$$.fragment,e),g(Es.$$.fragment,e),g(Ns.$$.fragment,e),g(zs.$$.fragment,e),g(Ps.$$.fragment,e),g(Bs.$$.fragment,e),g(Ls.$$.fragment,e),g(Ws.$$.fragment,e),g(js.$$.fragment,e),g(Ds.$$.fragment,e),g(Hs.$$.fragment,e),g(Is.$$.fragment,e),g(Vs.$$.fragment,e),g(Qs.$$.fragment,e),g(Rs.$$.fragment,e),g(Xs.$$.fragment,e),g(Us.$$.fragment,e),g(Ys.$$.fragment,e),g(Gs.$$.fragment,e),g(Js.$$.fragment,e),g(Ks.$$.fragment,e),g(Zs.$$.fragment,e),g(ea.$$.fragment,e),g(ta.$$.fragment,e),g(oa.$$.fragment,e),g(na.$$.fragment,e),g(sa.$$.fragment,e),g(aa.$$.fragment,e),g(ra.$$.fragment,e),g(da.$$.fragment,e),g(ia.$$.fragment,e),g(ua.$$.fragment,e),g(la.$$.fragment,e),g(pa.$$.fragment,e),g(ca.$$.fragment,e),g(ha.$$.fragment,e),g(fa.$$.fragment,e),g(ma.$$.fragment,e),g(_a.$$.fragment,e),g(ga.$$.fragment,e),g(va.$$.fragment,e),g(ya.$$.fragment,e),g(Ta.$$.fragment,e),g(ba.$$.fragment,e),g(wa.$$.fragment,e),g(xa.$$.fragment,e),g($a.$$.fragment,e),g(Oa.$$.fragment,e),g(qa.$$.fragment,e),g(Fa.$$.fragment,e),g(Sa.$$.fragment,e),g(Ma.$$.fragment,e),g(ka.$$.fragment,e),g(Aa.$$.fragment,e),g(Ca.$$.fragment,e),g(Ea.$$.fragment,e),g(Na.$$.fragment,e),g(za.$$.fragment,e),g(Pa.$$.fragment,e),g(Ba.$$.fragment,e),g(La.$$.fragment,e),g(Wa.$$.fragment,e),g(ja.$$.fragment,e),g(Da.$$.fragment,e),g(Ha.$$.fragment,e),g(Ia.$$.fragment,e),g(Va.$$.fragment,e),g(Qa.$$.fragment,e),g(Ra.$$.fragment,e),g(Xa.$$.fragment,e),g(Ua.$$.fragment,e),g(Ya.$$.fragment,e),g(Ga.$$.fragment,e),g(Ja.$$.fragment,e),g(Ka.$$.fragment,e),Ic=!1},d(e){t(x),e&&t(Yt),e&&t($),v(O),e&&t(M),e&&t(C),e&&t($l),e&&t(or),e&&t(Ol),v(xn,e),e&&t(ql),e&&t(b),e&&t(Fl),e&&t(q),e&&t(Sl),e&&t(F),e&&t(Ml),v($n,e),e&&t(kl),e&&t(Gt),e&&t(Al),e&&t(S),e&&t(Cl),e&&t(sr),e&&t(El),e&&t(ee),v(On),e&&t(Nl),e&&t(k),v(qn),v(Kt),v(Fn),e&&t(zl),e&&t(oe),v(Mn),e&&t(Pl),e&&t(ne),v(kn),e&&t(Bl),e&&t(se),v(An),e&&t(Ll),e&&t(ae),v(Cn),e&&t(Wl),e&&t(re),v(En),e&&t(jl),e&&t(de),v(Nn),e&&t(Dl),e&&t(ie),v(zn),e&&t(Hl),e&&t(ue),v(Pn),e&&t(Il),e&&t(le),v(Bn),e&&t(Vl),e&&t(pe),v(Ln),e&&t(Ql),e&&t(ce),v(Wn),e&&t(Rl),e&&t(he),v(jn),e&&t(Xl),e&&t(fe),v(Dn),e&&t(Ul),e&&t(me),v(Hn),e&&t(Yl),e&&t(_e),v(In),e&&t(Gl),e&&t(ge),v(Vn),e&&t(Jl),e&&t(ve),v(Qn),e&&t(Kl),e&&t(ye),v(Rn),e&&t(Zl),e&&t(Te),v(Xn),e&&t(ep),e&&t(be),v(Un),e&&t(tp),e&&t(we),v(Yn),e&&t(op),e&&t(xe),v(Gn),e&&t(np),e&&t($e),v(Jn),e&&t(sp),e&&t(Oe),v(Kn),e&&t(ap),e&&t(qe),v(Zn),e&&t(rp),e&&t(Fe),v(es),e&&t(dp),e&&t(Se),v(ts),e&&t(ip),e&&t(Me),v(os),e&&t(up),e&&t(ke),v(ns),e&&t(lp),e&&t(Ae),v(ss),e&&t(pp),e&&t(Ce),v(as),e&&t(cp),e&&t(Ee),v(rs),e&&t(hp),e&&t(Ne),v(ds),e&&t(fp),e&&t(ze),v(is),e&&t(mp),e&&t(Pe),v(us),e&&t(_p),e&&t(Be),v(ls),e&&t(gp),e&&t(Le),v(ps),e&&t(vp),e&&t(We),v(cs),e&&t(yp),e&&t(je),v(hs),e&&t(Tp),e&&t(De),v(fs),e&&t(bp),e&&t(He),v(ms),e&&t(wp),e&&t(Ie),v(_s),e&&t(xp),e&&t(Ve),v(gs),e&&t($p),e&&t(Qe),v(vs),e&&t(Op),e&&t(Re),v(ys),e&&t(qp),e&&t(Xe),v(Ts),e&&t(Fp),e&&t(Ue),v(bs),e&&t(Sp),e&&t(Ye),v(ws),e&&t(Mp),e&&t(Ge),v(xs),e&&t(kp),e&&t(Je),v($s),e&&t(Ap),e&&t(Ke),v(qs),e&&t(Cp),e&&t(Ze),v(Fs),e&&t(Ep),e&&t(et),v(Ss),e&&t(Np),e&&t(tt),v(Ms),e&&t(zp),e&&t(ot),v(ks),e&&t(Pp),e&&t(nt),v(As),e&&t(Bp),e&&t(st),v(Cs),e&&t(Lp),e&&t(at),v(Es),e&&t(Wp),e&&t(rt),v(Ns),e&&t(jp),e&&t(dt),v(zs),e&&t(Dp),e&&t(it),v(Ps),e&&t(Hp),e&&t(ut),v(Bs),e&&t(Ip),e&&t(lt),v(Ls),e&&t(Vp),e&&t(pt),v(Ws),e&&t(Qp),e&&t(ct),v(js),e&&t(Rp),e&&t(ht),v(Ds),e&&t(Xp),e&&t(ft),v(Hs),e&&t(Up),e&&t(mt),v(Is),e&&t(Yp),e&&t(_t),v(Vs),e&&t(Gp),e&&t(gt),v(Qs),e&&t(Jp),e&&t(vt),v(Rs),e&&t(Kp),e&&t(yt),v(Xs),e&&t(Zp),e&&t(Tt),v(Us),e&&t(ec),e&&t(bt),v(Ys),e&&t(tc),e&&t(wt),v(Gs),e&&t(oc),e&&t(xt),v(Js),e&&t(nc),e&&t($t),v(Ks),e&&t(sc),e&&t(Ot),v(Zs),e&&t(ac),e&&t(qt),v(ea),e&&t(rc),e&&t(Ft),v(ta),e&&t(dc),e&&t(St),v(oa),e&&t(ic),e&&t(Mt),v(na),e&&t(uc),e&&t(kt),v(sa),e&&t(lc),e&&t(At),v(aa),e&&t(pc),e&&t(Ct),v(ra),e&&t(cc),e&&t(Et),v(da),e&&t(hc),e&&t(Nt),v(ia),e&&t(fc),e&&t(E),v(ua),v(la),e&&t(mc),e&&t(zt),v(pa),e&&t(_c),e&&t(N),v(ca),v(ha),e&&t(gc),e&&t(Pt),v(fa),e&&t(vc),e&&t(z),v(ma),v(_a),e&&t(yc),e&&t(Bt),v(ga),e&&t(Tc),e&&t(P),v(va),v(ya),e&&t(bc),e&&t(Lt),v(Ta),e&&t(wc),e&&t(B),v(ba),v(wa),e&&t(xc),e&&t(Wt),v(xa),e&&t($c),e&&t(L),v($a),v(Oa),e&&t(Oc),e&&t(jt),v(qa),e&&t(qc),e&&t(W),v(Fa),v(Sa),e&&t(Fc),e&&t(Dt),v(Ma),e&&t(Sc),e&&t(j),v(ka),v(Aa),e&&t(Mc),e&&t(Ht),v(Ca),e&&t(kc),e&&t(D),v(Ea),v(Na),e&&t(Ac),e&&t(It),v(za),e&&t(Cc),e&&t(H),v(Pa),v(Ba),e&&t(Ec),e&&t(Vt),v(La),e&&t(Nc),e&&t(I),v(Wa),v(ja),e&&t(zc),e&&t(Qt),v(Da),e&&t(Pc),e&&t(V),v(Ha),v(Ia),e&&t(Bc),e&&t(Rt),v(Va),e&&t(Lc),e&&t(Q),v(Qa),v(Ra),e&&t(Wc),e&&t(Xt),v(Xa),e&&t(jc),e&&t(R),v(Ua),v(Ya),e&&t(Dc),e&&t(Ut),v(Ga),e&&t(Hc),e&&t(X),v(Ja),v(Ka)}}}const oO={local:"model-outputs",sections:[{local:"transformers.utils.ModelOutput",title:"ModelOutput"},{local:"transformers.modeling_outputs.BaseModelOutput",title:"BaseModelOutput"},{local:"transformers.modeling_outputs.BaseModelOutputWithPooling",title:"BaseModelOutputWithPooling"},{local:"transformers.modeling_outputs.BaseModelOutputWithCrossAttentions",title:"BaseModelOutputWithCrossAttentions"},{local:"transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions",title:"BaseModelOutputWithPoolingAndCrossAttentions"},{local:"transformers.modeling_outputs.BaseModelOutputWithPast",title:"BaseModelOutputWithPast"},{local:"transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions",title:"BaseModelOutputWithPastAndCrossAttentions"},{local:"transformers.modeling_outputs.Seq2SeqModelOutput",title:"Seq2SeqModelOutput"},{local:"transformers.modeling_outputs.CausalLMOutput",title:"CausalLMOutput"},{local:"transformers.modeling_outputs.CausalLMOutputWithCrossAttentions",title:"CausalLMOutputWithCrossAttentions"},{local:"transformers.modeling_outputs.CausalLMOutputWithPast",title:"CausalLMOutputWithPast"},{local:"transformers.modeling_outputs.MaskedLMOutput",title:"MaskedLMOutput"},{local:"transformers.modeling_outputs.Seq2SeqLMOutput",title:"Seq2SeqLMOutput"},{local:"transformers.modeling_outputs.NextSentencePredictorOutput",title:"NextSentencePredictorOutput"},{local:"transformers.modeling_outputs.SequenceClassifierOutput",title:"SequenceClassifierOutput"},{local:"transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput",title:"Seq2SeqSequenceClassifierOutput"},{local:"transformers.modeling_outputs.MultipleChoiceModelOutput",title:"MultipleChoiceModelOutput"},{local:"transformers.modeling_outputs.TokenClassifierOutput",title:"TokenClassifierOutput"},{local:"transformers.modeling_outputs.QuestionAnsweringModelOutput",title:"QuestionAnsweringModelOutput"},{local:"transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput",title:"Seq2SeqQuestionAnsweringModelOutput"},{local:"transformers.modeling_outputs.SemanticSegmenterOutput",title:"SemanticSegmenterOutput"},{local:"transformers.modeling_outputs.ImageClassifierOutput",title:"ImageClassifierOutput"},{local:"transformers.modeling_outputs.ImageClassifierOutputWithNoAttention",title:"ImageClassifierOutputWithNoAttention"},{local:"transformers.modeling_outputs.DepthEstimatorOutput",title:"DepthEstimatorOutput"},{local:"transformers.modeling_outputs.Wav2Vec2BaseModelOutput",title:"Wav2Vec2BaseModelOutput"},{local:"transformers.modeling_outputs.XVectorOutput",title:"XVectorOutput"},{local:"transformers.modeling_tf_outputs.TFBaseModelOutput",title:"TFBaseModelOutput"},{local:"transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling",title:"TFBaseModelOutputWithPooling"},{local:"transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions",title:"TFBaseModelOutputWithPoolingAndCrossAttentions"},{local:"transformers.modeling_tf_outputs.TFBaseModelOutputWithPast",title:"TFBaseModelOutputWithPast"},{local:"transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions",title:"TFBaseModelOutputWithPastAndCrossAttentions"},{local:"transformers.modeling_tf_outputs.TFSeq2SeqModelOutput",title:"TFSeq2SeqModelOutput"},{local:"transformers.modeling_tf_outputs.TFCausalLMOutput",title:"TFCausalLMOutput"},{local:"transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions",title:"TFCausalLMOutputWithCrossAttentions"},{local:"transformers.modeling_tf_outputs.TFCausalLMOutputWithPast",title:"TFCausalLMOutputWithPast"},{local:"transformers.modeling_tf_outputs.TFMaskedLMOutput",title:"TFMaskedLMOutput"},{local:"transformers.modeling_tf_outputs.TFSeq2SeqLMOutput",title:"TFSeq2SeqLMOutput"},{local:"transformers.modeling_tf_outputs.TFNextSentencePredictorOutput",title:"TFNextSentencePredictorOutput"},{local:"transformers.modeling_tf_outputs.TFSequenceClassifierOutput",title:"TFSequenceClassifierOutput"},{local:"transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput",title:"TFSeq2SeqSequenceClassifierOutput"},{local:"transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput",title:"TFMultipleChoiceModelOutput"},{local:"transformers.modeling_tf_outputs.TFTokenClassifierOutput",title:"TFTokenClassifierOutput"},{local:"transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput",title:"TFQuestionAnsweringModelOutput"},{local:"transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput",title:"TFSeq2SeqQuestionAnsweringModelOutput"},{local:"transformers.modeling_flax_outputs.FlaxBaseModelOutput",title:"FlaxBaseModelOutput"},{local:"transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast",title:"FlaxBaseModelOutputWithPast"},{local:"transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling",title:"FlaxBaseModelOutputWithPooling"},{local:"transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions",title:"FlaxBaseModelOutputWithPastAndCrossAttentions"},{local:"transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput",title:"FlaxSeq2SeqModelOutput"},{local:"transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions",title:"FlaxCausalLMOutputWithCrossAttentions"},{local:"transformers.modeling_flax_outputs.FlaxMaskedLMOutput",title:"FlaxMaskedLMOutput"},{local:"transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput",title:"FlaxSeq2SeqLMOutput"},{local:"transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput",title:"FlaxNextSentencePredictorOutput"},{local:"transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput",title:"FlaxSequenceClassifierOutput"},{local:"transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput",title:"FlaxSeq2SeqSequenceClassifierOutput"},{local:"transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput",title:"FlaxMultipleChoiceModelOutput"},{local:"transformers.modeling_flax_outputs.FlaxTokenClassifierOutput",title:"FlaxTokenClassifierOutput"},{local:"transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput",title:"FlaxQuestionAnsweringModelOutput"},{local:"transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput",title:"FlaxSeq2SeqQuestionAnsweringModelOutput"}],title:"Model outputs"};function nO(xl){return K$(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class uO extends U${constructor(x){super();Y$(this,x,nO,tO,G$,{})}}export{uO as default,oO as metadata};
10
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/main_classes/callback.mdx-hf-doc-builder.js
import{S as Dg,i as xg,s as Og,e as o,k as c,w as g,t as r,M as Mg,c as s,d as t,m as d,a as l,x as u,h as a,b as i,G as e,g as m,y as _,q as b,o as v,B as E,v as Sg,L as Ig}from"../../chunks/vendor-hf-doc-builder.js";import{T as Ng}from"../../chunks/Tip-hf-doc-builder.js";import{D as w}from"../../chunks/Docstring-hf-doc-builder.js";import{C as Jm}from"../../chunks/CodeBlock-hf-doc-builder.js";import{I as eo}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{E as Pg}from"../../chunks/ExampleCodeBlock-hf-doc-builder.js";function Fg(_r){let A,z,x,O,F;return O=new Jm({props:{code:`class PrinterCallback(TrainerCallback): def on_log(self, args, state, control, logs=None, **kwargs): _ = logs.pop("total_flos", None) if state.is_local_process_zero: print(logs)`,highlighted:`<span class="hljs-keyword">class</span> <span class="hljs-title class_">PrinterCallback</span>(<span class="hljs-title class_ inherited__">TrainerCallback</span>): <span class="hljs-keyword">def</span> <span class="hljs-title function_">on_log</span>(<span class="hljs-params">self, args, state, control, logs=<span class="hljs-literal">None</span>, **kwargs</span>): _ = logs.pop(<span class="hljs-string">&quot;total_flos&quot;</span>, <span class="hljs-literal">None</span>) <span class="hljs-keyword">if</span> state.is_local_process_zero: <span class="hljs-built_in">print</span>(logs)`}}),{c(){A=o("p"),z=r("Example:"),x=c(),g(O.$$.fragment)},l(y){A=s(y,"P",{});var W=l(A);z=a(W,"Example:"),W.forEach(t),x=d(y),u(O.$$.fragment,y)},m(y,W){m(y,A,W),e(A,z),m(y,x,W),_(O,y,W),F=!0},p:Ig,i(y){F||(b(O.$$.fragment,y),F=!0)},o(y){v(O.$$.fragment,y),F=!1},d(y){y&&t(A),y&&t(x),E(O,y)}}}function Wg(_r){let A,z,x,O,F,y,W,re;return{c(){A=o("p"),z=r(`In all this class, one step is to be understood as one update step. When using gradient accumulation, one update step may require several forward and backward passes: if you use `),x=o("code"),O=r("gradient_accumulation_steps=n"),F=r(`, then one update step requires going through `),y=o("em"),W=r("n"),re=r(" batches.")},l(ae){A=s(ae,"P",{});var j=l(A);z=a(j,`In all this class, one step is to be understood as one update step. When using gradient accumulation, one update step may require several forward and backward passes: if you use `),x=s(j,"CODE",{});var G=l(x);O=a(G,"gradient_accumulation_steps=n"),G.forEach(t),F=a(j,`, then one update step requires going through `),y=s(j,"EM",{});var br=l(y);W=a(br,"n"),br.forEach(t),re=a(j," batches."),j.forEach(t)},m(ae,j){m(ae,A,j),e(A,z),e(A,x),e(x,O),e(A,F),e(A,y),e(y,W),e(A,re)},d(ae){ae&&t(A)}}}function jg(_r){let A,z,x,O,F,y,W,re,ae,j,G,br,vr,ks,Ts,to,B,$s,Er,Cs,ws,kr,ys,As,Tr,Ls,Ds,ro,Te,xs,$r,Os,Ms,ao,D,Cr,wr,Ss,Is,Ns,Z,yr,Ps,Fs,Ar,Ws,js,Lr,zs,Bs,Rs,Dr,xr,Vs,Us,qs,$e,Or,Gs,Hs,nt,Js,Ys,Xs,Ce,Mr,Ks,Qs,ot,Zs,el,tl,we,Sr,rl,al,st,nl,ol,sl,ye,Ir,ll,il,lt,cl,dl,fl,Ae,Nr,ml,hl,it,pl,gl,ul,Le,Pr,_l,bl,ct,vl,El,no,I,kl,Fr,Tl,$l,Wr,Cl,wl,jr,yl,Al,zr,Ll,Dl,Br,xl,Ol,oo,ne,De,va,dt,Ml,Ea,Sl,so,xe,Il,Rr,Nl,Pl,lo,H,ft,Fl,oe,Wl,Vr,jl,zl,mt,Bl,Rl,Vl,R,ht,Ul,ka,ql,Gl,L,Hl,Ta,Jl,Yl,$a,Xl,Kl,Ca,Ql,Zl,wa,ei,ti,ya,ri,ai,Aa,ni,oi,La,si,li,Da,ii,ci,xa,di,fi,mi,pt,hi,gt,pi,gi,io,se,ut,ui,_t,_i,Ur,bi,vi,co,le,bt,Ei,vt,ki,qr,Ti,$i,fo,ie,Et,Ci,kt,wi,Gr,yi,Ai,mo,J,Tt,Li,$t,Di,Hr,xi,Oi,Mi,Y,Si,Jr,Ii,Ni,Oa,Pi,Fi,Yr,Wi,ji,ho,ce,Ct,zi,de,Bi,Xr,Ri,Vi,wt,Ui,qi,po,X,yt,Gi,fe,Hi,Kr,Ji,Yi,At,Xi,Ki,Qi,V,Lt,Zi,Dt,ec,Ma,tc,rc,ac,xt,nc,Ot,oc,sc,lc,h,ic,Sa,cc,dc,Ia,fc,mc,Na,hc,pc,Pa,gc,uc,Fa,_c,bc,Wa,vc,Ec,ja,kc,Tc,za,$c,Cc,Ba,wc,yc,Ra,Ac,Lc,Va,Dc,xc,Ua,Oc,Mc,qa,Sc,Ic,Ga,Nc,Pc,Ha,Fc,Wc,Ja,jc,zc,Ya,Bc,Rc,Xa,Vc,Uc,Ka,qc,Gc,go,K,Mt,Hc,Q,Jc,Qr,Yc,Xc,St,Kc,Qc,Qa,Zc,ed,td,ee,It,rd,Za,ad,nd,p,od,en,sd,ld,tn,id,cd,rn,dd,fd,an,md,hd,Zr,pd,gd,nn,ud,_d,on,bd,vd,sn,Ed,kd,ln,Td,$d,cn,Cd,wd,dn,yd,Ad,fn,Ld,Dd,mn,xd,Od,hn,Md,Sd,pn,Id,Nd,gn,Pd,Fd,un,Wd,jd,_n,zd,Bd,bn,Rd,Vd,uo,me,Nt,Ud,he,qd,ea,Gd,Hd,Pt,Jd,Yd,_o,pe,Ft,Xd,Wt,Kd,ta,Qd,Zd,bo,ge,jt,ef,zt,tf,Bt,rf,af,vo,ue,Oe,vn,Rt,nf,En,of,Eo,k,Vt,sf,kn,lf,cf,Ut,df,Tn,ff,mf,hf,N,pf,$n,gf,uf,Cn,_f,bf,wn,vf,Ef,yn,kf,Tf,An,$f,Cf,wf,Me,yf,Se,qt,Af,Ln,Lf,Df,Ie,Gt,xf,Dn,Of,Mf,Ne,Ht,Sf,xn,If,Nf,Pe,Jt,Pf,Yt,Ff,ra,Wf,jf,zf,Fe,Xt,Bf,On,Rf,Vf,We,Kt,Uf,Mn,qf,Gf,je,Qt,Hf,Sn,Jf,Yf,ze,Zt,Xf,In,Kf,Qf,Be,er,Zf,Nn,em,tm,Re,tr,rm,Pn,am,nm,Ve,rr,om,Fn,sm,lm,Ue,ar,im,Wn,cm,dm,qe,nr,fm,jn,mm,ko,Ge,hm,aa,pm,gm,To,or,$o,He,um,zn,_m,bm,Co,sr,wo,_e,Je,Bn,lr,vm,Rn,Em,yo,P,ir,km,be,Tm,na,$m,Cm,oa,wm,ym,Am,Ye,Lm,Xe,cr,Dm,dr,xm,Vn,Om,Mm,Sm,Ke,fr,Im,mr,Nm,Un,Pm,Fm,Ao,ve,Qe,qn,hr,Wm,Gn,jm,Lo,Ee,pr,zm,ke,Bm,sa,Rm,Vm,la,Um,qm,Do;return y=new eo({}),dt=new eo({}),ft=new w({props:{name:"class transformers.integrations.CometCallback",anchor:"transformers.integrations.CometCallback",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/integrations.py#L765"}}),ht=new w({props:{name:"setup",anchor:"transformers.integrations.CometCallback.setup",parameters:[{name:"args",val:""},{name:"state",val:""},{name:"model",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/integrations.py#L776"}}),ut=new w({props:{name:"class transformers.DefaultFlowCallback",anchor:"transformers.DefaultFlowCallback",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L415"}}),bt=new w({props:{name:"class transformers.PrinterCallback",anchor:"transformers.PrinterCallback",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L513"}}),Et=new w({props:{name:"class transformers.ProgressCallback",anchor:"transformers.ProgressCallback",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L465"}}),Tt=new w({props:{name:"class transformers.EarlyStoppingCallback",anchor:"transformers.EarlyStoppingCallback",parameters:[{name:"early_stopping_patience",val:": int = 1"},{name:"early_stopping_threshold",val:": typing.Optional[float] = 0.0"}],parametersDescription:[{anchor:"transformers.EarlyStoppingCallback.early_stopping_patience",description:`<strong>early_stopping_patience</strong> (<code>int</code>) &#x2014; Use with <code>metric_for_best_model</code> to stop training when the specified metric worsens for <code>early_stopping_patience</code> evaluation calls.`,name:"early_stopping_patience"},{anchor:"transformers.EarlyStoppingCallback.early_stopping_threshold(float,",description:`<strong>early_stopping_threshold(<code>float</code>,</strong> <em>optional</em>) &#x2014; Use with TrainingArguments <code>metric_for_best_model</code> and <code>early_stopping_patience</code> to denote how much the specified metric must improve to satisfy early stopping conditions. \``,name:"early_stopping_threshold(float,"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L524"}}),Ct=new w({props:{name:"class transformers.integrations.TensorBoardCallback",anchor:"transformers.integrations.TensorBoardCallback",parameters:[{name:"tb_writer",val:" = None"}],parametersDescription:[{anchor:"transformers.integrations.TensorBoardCallback.tb_writer",description:`<strong>tb_writer</strong> (<code>SummaryWriter</code>, <em>optional</em>) &#x2014; The writer to use. Will instantiate one if not set.`,name:"tb_writer"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/integrations.py#L550"}}),yt=new w({props:{name:"class transformers.integrations.WandbCallback",anchor:"transformers.integrations.WandbCallback",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/integrations.py#L639"}}),Lt=new w({props:{name:"setup",anchor:"transformers.integrations.WandbCallback.setup",parameters:[{name:"args",val:""},{name:"state",val:""},{name:"model",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/integrations.py#L656"}}),Mt=new w({props:{name:"class transformers.integrations.MLflowCallback",anchor:"transformers.integrations.MLflowCallback",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/integrations.py#L865"}}),It=new w({props:{name:"setup",anchor:"transformers.integrations.MLflowCallback.setup",parameters:[{name:"args",val:""},{name:"state",val:""},{name:"model",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/integrations.py#L884"}}),Nt=new w({props:{name:"class transformers.integrations.AzureMLCallback",anchor:"transformers.integrations.AzureMLCallback",parameters:[{name:"azureml_run",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/integrations.py#L842"}}),Ft=new w({props:{name:"class transformers.integrations.CodeCarbonCallback",anchor:"transformers.integrations.CodeCarbonCallback",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/integrations.py#L1273"}}),jt=new w({props:{name:"class transformers.integrations.NeptuneCallback",anchor:"transformers.integrations.NeptuneCallback",parameters:[{name:"api_token",val:": typing.Optional[str] = None"},{name:"project",val:": typing.Optional[str] = None"},{name:"name",val:": typing.Optional[str] = None"},{name:"base_namespace",val:": str = 'finetuning'"},{name:"run",val:": typing.Optional[ForwardRef('Run')] = None"},{name:"log_parameters",val:": bool = True"},{name:"log_checkpoints",val:": typing.Optional[str] = None"},{name:"**neptune_run_kwargs",val:""}],parametersDescription:[{anchor:"transformers.integrations.NeptuneCallback.api_token",description:`<strong>api_token</strong> (<code>str</code>, optional) &#x2014; Neptune API token obtained upon registration. You can leave this argument out if you have saved your token to the <code>NEPTUNE_API_TOKEN</code> environment variable (strongly recommended). See full setup instructions in the <a href="https://docs.neptune.ai/getting-started/installation" rel="nofollow">docs</a>.`,name:"api_token"},{anchor:"transformers.integrations.NeptuneCallback.project",description:`<strong>project</strong> (<code>str</code>, optional) &#x2014; Name of an existing Neptune project, in the form: &#x201C;workspace-name/project-name&#x201D;. You can find and copy the name from the project Settings -&gt; Properties in Neptune. If None (default), the value of the <code>NEPTUNE_PROJECT</code> environment variable will be used.`,name:"project"},{anchor:"transformers.integrations.NeptuneCallback.name",description:"<strong>name</strong> (<code>str</code>, optional) &#x2014; Custom name for the run.",name:"name"},{anchor:"transformers.integrations.NeptuneCallback.base_namespace",description:`<strong>base_namespace</strong> (<code>str</code>, optional, defaults to &#x201C;finetuning&#x201D;) &#x2014; In the Neptune run, the root namespace that will contain all of the logged metadata.`,name:"base_namespace"},{anchor:"transformers.integrations.NeptuneCallback.log_parameters",description:`<strong>log_parameters</strong> (<code>bool</code>, optional, defaults to True) &#x2014; If True, logs all Trainer arguments and model parameters provided by the Trainer.`,name:"log_parameters"},{anchor:"transformers.integrations.NeptuneCallback.log_checkpoints",description:`<strong>log_checkpoints</strong> (<code>str</code>, optional, defaults to None) &#x2014; If &#x201C;same&#x201D;, uploads checkpoints whenever they are saved by the Trainer. If &#x201C;last&#x201D;, uploads only the most recently saved checkpoint. If &#x201C;best&#x201D;, uploads the best checkpoint (among the ones saved by the Trainer). If None, does not upload checkpoints.`,name:"log_checkpoints"},{anchor:"transformers.integrations.NeptuneCallback.run",description:`<strong>run</strong> (<code>Run</code>, optional) &#x2014; Pass a Neptune run object if you want to continue logging to an existing run. Read more about resuming runs in the <a href="https://docs.neptune.ai/how-to-guides/neptune-api/resume-run" rel="nofollow">docs</a>.`,name:"run"},{anchor:"transformers.integrations.NeptuneCallback.*neptune_run_kwargs",description:`*<strong>*neptune_run_kwargs</strong> (optional) &#x2014; Additional keyword arguments to be passed directly to the <a href="https://docs.neptune.ai/api-reference/neptune#.init_run" rel="nofollow">neptune.init_run()</a> function when a new run is created.`,name:"*neptune_run_kwargs"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/integrations.py#L1012"}}),Rt=new eo({}),Vt=new w({props:{name:"class transformers.TrainerCallback",anchor:"transformers.TrainerCallback",parameters:[],parametersDescription:[{anchor:"transformers.TrainerCallback.args",description:`<strong>args</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>) &#x2014; The training arguments used to instantiate the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>.`,name:"args"},{anchor:"transformers.TrainerCallback.state",description:`<strong>state</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerState">TrainerState</a>) &#x2014; The current state of the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>.`,name:"state"},{anchor:"transformers.TrainerCallback.control",description:`<strong>control</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerControl">TrainerControl</a>) &#x2014; The object that is returned to the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> and can be used to make some decisions.`,name:"control"},{anchor:"transformers.TrainerCallback.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <code>torch.nn.Module</code>) &#x2014; The model being trained.`,name:"model"},{anchor:"transformers.TrainerCallback.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer used for encoding the data.`,name:"tokenizer"},{anchor:"transformers.TrainerCallback.optimizer",description:`<strong>optimizer</strong> (<code>torch.optim.Optimizer</code>) &#x2014; The optimizer used for the training steps.`,name:"optimizer"},{anchor:"transformers.TrainerCallback.lr_scheduler",description:`<strong>lr_scheduler</strong> (<code>torch.optim.lr_scheduler.LambdaLR</code>) &#x2014; The scheduler used for setting the learning rate.`,name:"lr_scheduler"},{anchor:"transformers.TrainerCallback.train_dataloader",description:`<strong>train_dataloader</strong> (<code>torch.utils.data.DataLoader</code>, <em>optional</em>) &#x2014; The current dataloader used for training.`,name:"train_dataloader"},{anchor:"transformers.TrainerCallback.eval_dataloader",description:`<strong>eval_dataloader</strong> (<code>torch.utils.data.DataLoader</code>, <em>optional</em>) &#x2014; The current dataloader used for training.`,name:"eval_dataloader"},{anchor:"transformers.TrainerCallback.metrics",description:`<strong>metrics</strong> (<code>Dict[str, float]</code>) &#x2014; The metrics computed by the last evaluation phase.</p> <p>Those are only accessible in the event <code>on_evaluate</code>.`,name:"metrics"},{anchor:"transformers.TrainerCallback.logs",description:`<strong>logs</strong> (<code>Dict[str, float]</code>) &#x2014; The values to log.</p> <p>Those are only accessible in the event <code>on_log</code>.`,name:"logs"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L159"}}),Me=new Pg({props:{anchor:"transformers.TrainerCallback.example",$$slots:{default:[Fg]},$$scope:{ctx:_r}}}),qt=new w({props:{name:"on_epoch_begin",anchor:"transformers.TrainerCallback.on_epoch_begin",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L227"}}),Gt=new w({props:{name:"on_epoch_end",anchor:"transformers.TrainerCallback.on_epoch_end",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L233"}}),Ht=new w({props:{name:"on_evaluate",anchor:"transformers.TrainerCallback.on_evaluate",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L259"}}),Jt=new w({props:{name:"on_init_end",anchor:"transformers.TrainerCallback.on_init_end",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L209"}}),Xt=new w({props:{name:"on_log",anchor:"transformers.TrainerCallback.on_log",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L277"}}),Kt=new w({props:{name:"on_predict",anchor:"transformers.TrainerCallback.on_predict",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"metrics",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L265"}}),Qt=new w({props:{name:"on_prediction_step",anchor:"transformers.TrainerCallback.on_prediction_step",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L283"}}),Zt=new w({props:{name:"on_save",anchor:"transformers.TrainerCallback.on_save",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L271"}}),er=new w({props:{name:"on_step_begin",anchor:"transformers.TrainerCallback.on_step_begin",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L239"}}),tr=new w({props:{name:"on_step_end",anchor:"transformers.TrainerCallback.on_step_end",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L252"}}),rr=new w({props:{name:"on_substep_end",anchor:"transformers.TrainerCallback.on_substep_end",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L246"}}),ar=new w({props:{name:"on_train_begin",anchor:"transformers.TrainerCallback.on_train_begin",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L215"}}),nr=new w({props:{name:"on_train_end",anchor:"transformers.TrainerCallback.on_train_end",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L221"}}),or=new Jm({props:{code:`class MyCallback(TrainerCallback): "A callback that prints a message at the beginning of training" def on_train_begin(self, args, state, control, **kwargs): print("Starting training") trainer = Trainer( model, args, train_dataset=train_dataset, eval_dataset=eval_dataset, callbacks=[MyCallback], # We can either pass the callback class this way or an instance of it (MyCallback()) )`,highlighted:`<span class="hljs-keyword">class</span> <span class="hljs-title class_">MyCallback</span>(<span class="hljs-title class_ inherited__">TrainerCallback</span>): <span class="hljs-string">&quot;A callback that prints a message at the beginning of training&quot;</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">on_train_begin</span>(<span class="hljs-params">self, args, state, control, **kwargs</span>): <span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Starting training&quot;</span>) trainer = Trainer( model, args, train_dataset=train_dataset, eval_dataset=eval_dataset, callbacks=[MyCallback], <span class="hljs-comment"># We can either pass the callback class this way or an instance of it (MyCallback())</span> )`}}),sr=new Jm({props:{code:`trainer = Trainer(...) trainer.add_callback(MyCallback) # Alternatively, we can pass an instance of the callback class trainer.add_callback(MyCallback())`,highlighted:`trainer = Trainer(...) trainer.add_callback(MyCallback) <span class="hljs-comment"># Alternatively, we can pass an instance of the callback class</span> trainer.add_callback(MyCallback())`}}),lr=new eo({}),ir=new w({props:{name:"class transformers.TrainerState",anchor:"transformers.TrainerState",parameters:[{name:"epoch",val:": typing.Optional[float] = None"},{name:"global_step",val:": int = 0"},{name:"max_steps",val:": int = 0"},{name:"num_train_epochs",val:": int = 0"},{name:"total_flos",val:": float = 0"},{name:"log_history",val:": typing.List[typing.Dict[str, float]] = None"},{name:"best_metric",val:": typing.Optional[float] = None"},{name:"best_model_checkpoint",val:": typing.Optional[str] = None"},{name:"is_local_process_zero",val:": bool = True"},{name:"is_world_process_zero",val:": bool = True"},{name:"is_hyper_param_search",val:": bool = False"},{name:"trial_name",val:": str = None"},{name:"trial_params",val:": typing.Dict[str, typing.Union[str, float, int, bool]] = None"}],parametersDescription:[{anchor:"transformers.TrainerState.epoch",description:`<strong>epoch</strong> (<code>float</code>, <em>optional</em>) &#x2014; Only set during training, will represent the epoch the training is at (the decimal part being the percentage of the current epoch completed).`,name:"epoch"},{anchor:"transformers.TrainerState.global_step",description:`<strong>global_step</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; During training, represents the number of update steps completed.`,name:"global_step"},{anchor:"transformers.TrainerState.max_steps",description:`<strong>max_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The number of update steps to do during the current training.`,name:"max_steps"},{anchor:"transformers.TrainerState.total_flos",description:`<strong>total_flos</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The total number of floating operations done by the model since the beginning of training (stored as floats to avoid overflow).`,name:"total_flos"},{anchor:"transformers.TrainerState.log_history",description:`<strong>log_history</strong> (<code>List[Dict[str, float]]</code>, <em>optional</em>) &#x2014; The list of logs done since the beginning of training.`,name:"log_history"},{anchor:"transformers.TrainerState.best_metric",description:`<strong>best_metric</strong> (<code>float</code>, <em>optional</em>) &#x2014; When tracking the best model, the value of the best metric encountered so far.`,name:"best_metric"},{anchor:"transformers.TrainerState.best_model_checkpoint",description:`<strong>best_model_checkpoint</strong> (<code>str</code>, <em>optional</em>) &#x2014; When tracking the best model, the value of the name of the checkpoint for the best model encountered so far.`,name:"best_model_checkpoint"},{anchor:"transformers.TrainerState.is_local_process_zero",description:`<strong>is_local_process_zero</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several machines) main process.`,name:"is_local_process_zero"},{anchor:"transformers.TrainerState.is_world_process_zero",description:`<strong>is_world_process_zero</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not this process is the global main process (when training in a distributed fashion on several machines, this is only going to be <code>True</code> for one process).`,name:"is_world_process_zero"},{anchor:"transformers.TrainerState.is_hyper_param_search",description:`<strong>is_hyper_param_search</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether we are in the process of a hyper parameter search using Trainer.hyperparameter_search. This will impact the way data will be logged in TensorBoard.`,name:"is_hyper_param_search"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L35"}}),Ye=new Ng({props:{$$slots:{default:[Wg]},$$scope:{ctx:_r}}}),cr=new w({props:{name:"load_from_json",anchor:"transformers.TrainerState.load_from_json",parameters:[{name:"json_path",val:": str"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L101"}}),fr=new w({props:{name:"save_to_json",anchor:"transformers.TrainerState.save_to_json",parameters:[{name:"json_path",val:": str"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L95"}}),hr=new eo({}),pr=new w({props:{name:"class transformers.TrainerControl",anchor:"transformers.TrainerControl",parameters:[{name:"should_training_stop",val:": bool = False"},{name:"should_epoch_stop",val:": bool = False"},{name:"should_save",val:": bool = False"},{name:"should_evaluate",val:": bool = False"},{name:"should_log",val:": bool = False"}],parametersDescription:[{anchor:"transformers.TrainerControl.should_training_stop",description:`<strong>should_training_stop</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the training should be interrupted.</p> <p>If <code>True</code>, this variable will not be set back to <code>False</code>. The training will just stop.`,name:"should_training_stop"},{anchor:"transformers.TrainerControl.should_epoch_stop",description:`<strong>should_epoch_stop</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the current epoch should be interrupted.</p> <p>If <code>True</code>, this variable will be set back to <code>False</code> at the beginning of the next epoch.`,name:"should_epoch_stop"},{anchor:"transformers.TrainerControl.should_save",description:`<strong>should_save</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the model should be saved at this step.</p> <p>If <code>True</code>, this variable will be set back to <code>False</code> at the beginning of the next step.`,name:"should_save"},{anchor:"transformers.TrainerControl.should_evaluate",description:`<strong>should_evaluate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the model should be evaluated at this step.</p> <p>If <code>True</code>, this variable will be set back to <code>False</code> at the beginning of the next step.`,name:"should_evaluate"},{anchor:"transformers.TrainerControl.should_log",description:`<strong>should_log</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the logs should be reported at this step.</p> <p>If <code>True</code>, this variable will be set back to <code>False</code> at the beginning of the next step.`,name:"should_log"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L110"}}),{c(){A=o("meta"),z=c(),x=o("h1"),O=o("a"),F=o("span"),g(y.$$.fragment),W=c(),re=o("span"),ae=r("Callbacks"),j=c(),G=o("p"),br=r(`Callbacks are objects that can customize the behavior of the training loop in the PyTorch `),vr=o("a"),ks=r("Trainer"),Ts=r(` (this feature is not yet implemented in TensorFlow) that can inspect the training loop state (for progress reporting, logging on TensorBoard or other ML platforms\u2026) and take decisions (like early stopping).`),to=c(),B=o("p"),$s=r("Callbacks are \u201Cread only\u201D pieces of code, apart from the "),Er=o("a"),Cs=r("TrainerControl"),ws=r(` object they return, they cannot change anything in the training loop. For customizations that require changes in the training loop, you should subclass `),kr=o("a"),ys=r("Trainer"),As=r(" and override the methods you need (see "),Tr=o("a"),Ls=r("trainer"),Ds=r(" for examples)."),ro=c(),Te=o("p"),xs=r("By default a "),$r=o("a"),Os=r("Trainer"),Ms=r(" will use the following callbacks:"),ao=c(),D=o("ul"),Cr=o("li"),wr=o("a"),Ss=r("DefaultFlowCallback"),Is=r(" which handles the default behavior for logging, saving and evaluation."),Ns=c(),Z=o("li"),yr=o("a"),Ps=r("PrinterCallback"),Fs=r(" or "),Ar=o("a"),Ws=r("ProgressCallback"),js=r(` to display progress and print the logs (the first one is used if you deactivate tqdm through the `),Lr=o("a"),zs=r("TrainingArguments"),Bs=r(`, otherwise it\u2019s the second one).`),Rs=c(),Dr=o("li"),xr=o("a"),Vs=r("TensorBoardCallback"),Us=r(` if tensorboard is accessible (either through PyTorch >= 1.4 or tensorboardX).`),qs=c(),$e=o("li"),Or=o("a"),Gs=r("WandbCallback"),Hs=r(" if "),nt=o("a"),Js=r("wandb"),Ys=r(" is installed."),Xs=c(),Ce=o("li"),Mr=o("a"),Ks=r("CometCallback"),Qs=r(" if "),ot=o("a"),Zs=r("comet_ml"),el=r(" is installed."),tl=c(),we=o("li"),Sr=o("a"),rl=r("MLflowCallback"),al=r(" if "),st=o("a"),nl=r("mlflow"),ol=r(" is installed."),sl=c(),ye=o("li"),Ir=o("a"),ll=r("NeptuneCallback"),il=r(" if "),lt=o("a"),cl=r("neptune"),dl=r(" is installed."),fl=c(),Ae=o("li"),Nr=o("a"),ml=r("AzureMLCallback"),hl=r(" if "),it=o("a"),pl=r("azureml-sdk"),gl=r(` is installed.`),ul=c(),Le=o("li"),Pr=o("a"),_l=r("CodeCarbonCallback"),bl=r(" if "),ct=o("a"),vl=r("codecarbon"),El=r(` is installed.`),no=c(),I=o("p"),kl=r("The main class that implements callbacks is "),Fr=o("a"),Tl=r("TrainerCallback"),$l=r(`. It gets the `),Wr=o("a"),Cl=r("TrainingArguments"),wl=r(" used to instantiate the "),jr=o("a"),yl=r("Trainer"),Al=r(`, can access that Trainer\u2019s internal state via `),zr=o("a"),Ll=r("TrainerState"),Dl=r(`, and can take some actions on the training loop via `),Br=o("a"),xl=r("TrainerControl"),Ol=r("."),oo=c(),ne=o("h2"),De=o("a"),va=o("span"),g(dt.$$.fragment),Ml=c(),Ea=o("span"),Sl=r("Available Callbacks"),so=c(),xe=o("p"),Il=r("Here is the list of the available "),Rr=o("a"),Nl=r("TrainerCallback"),Pl=r(" in the library:"),lo=c(),H=o("div"),g(ft.$$.fragment),Fl=c(),oe=o("p"),Wl=r("A "),Vr=o("a"),jl=r("TrainerCallback"),zl=r(" that sends the logs to "),mt=o("a"),Bl=r("Comet ML"),Rl=r("."),Vl=c(),R=o("div"),g(ht.$$.fragment),Ul=c(),ka=o("p"),ql=r("Setup the optional Comet.ml integration."),Gl=c(),L=o("p"),Hl=r(`Environment: COMET_MODE (`),Ta=o("code"),Jl=r("str"),Yl=r(", "),$a=o("em"),Xl=r("optional"),Kl=r(`): Whether to create an online, offline experiment or disable Comet logging. Can be \u201COFFLINE\u201D, \u201CONLINE\u201D, or \u201CDISABLED\u201D. Defaults to \u201CONLINE\u201D. COMET_PROJECT_NAME (`),Ca=o("code"),Ql=r("str"),Zl=r(", "),wa=o("em"),ei=r("optional"),ti=r(`): Comet project name for experiments COMET_OFFLINE_DIRECTORY (`),ya=o("code"),ri=r("str"),ai=r(", "),Aa=o("em"),ni=r("optional"),oi=r(`): Folder to use for saving offline experiments when `),La=o("code"),si=r("COMET_MODE"),li=r(` is \u201COFFLINE\u201D COMET_LOG_ASSETS (`),Da=o("code"),ii=r("str"),ci=r(", "),xa=o("em"),di=r("optional"),fi=r(`): Whether or not to log training assets (tf event logs, checkpoints, etc), to Comet. Can be \u201CTRUE\u201D, or \u201CFALSE\u201D. Defaults to \u201CTRUE\u201D.`),mi=c(),pt=o("p"),hi=r(`For a number of configurable items in the environment, see `),gt=o("a"),pi=r("here"),gi=r("."),io=c(),se=o("div"),g(ut.$$.fragment),ui=c(),_t=o("p"),_i=r("A "),Ur=o("a"),bi=r("TrainerCallback"),vi=r(" that handles the default flow of the training loop for logs, evaluation and checkpoints."),co=c(),le=o("div"),g(bt.$$.fragment),Ei=c(),vt=o("p"),ki=r("A bare "),qr=o("a"),Ti=r("TrainerCallback"),$i=r(" that just prints the logs."),fo=c(),ie=o("div"),g(Et.$$.fragment),Ci=c(),kt=o("p"),wi=r("A "),Gr=o("a"),yi=r("TrainerCallback"),Ai=r(" that displays the progress of training or evaluation."),mo=c(),J=o("div"),g(Tt.$$.fragment),Li=c(),$t=o("p"),Di=r("A "),Hr=o("a"),xi=r("TrainerCallback"),Oi=r(" that handles early stopping."),Mi=c(),Y=o("p"),Si=r("This callback depends on "),Jr=o("a"),Ii=r("TrainingArguments"),Ni=r(" argument "),Oa=o("em"),Pi=r("load_best_model_at_end"),Fi=r(` functionality to set best_metric in `),Yr=o("a"),Wi=r("TrainerState"),ji=r("."),ho=c(),ce=o("div"),g(Ct.$$.fragment),zi=c(),de=o("p"),Bi=r("A "),Xr=o("a"),Ri=r("TrainerCallback"),Vi=r(" that sends the logs to "),wt=o("a"),Ui=r("TensorBoard"),qi=r("."),po=c(),X=o("div"),g(yt.$$.fragment),Gi=c(),fe=o("p"),Hi=r("A "),Kr=o("a"),Ji=r("TrainerCallback"),Yi=r(" that sends the logs to "),At=o("a"),Xi=r("Weight and Biases"),Ki=r("."),Qi=c(),V=o("div"),g(Lt.$$.fragment),Zi=c(),Dt=o("p"),ec=r("Setup the optional Weights & Biases ("),Ma=o("em"),tc=r("wandb"),rc=r(") integration."),ac=c(),xt=o("p"),nc=r(`One can subclass and override this method to customize the setup if needed. Find more information `),Ot=o("a"),oc=r("here"),sc=r(`. You can also override the following environment variables:`),lc=c(),h=o("p"),ic=r(`Environment: WANDB_LOG_MODEL (`),Sa=o("code"),cc=r("bool"),dc=r(", "),Ia=o("em"),fc=r("optional"),mc=r(", defaults to "),Na=o("code"),hc=r("False"),pc=r(`): Whether or not to log model as artifact at the end of training. Use along with `),Pa=o("em"),gc=r("TrainingArguments.load_best_model_at_end"),uc=r(` to upload best model. WANDB_WATCH (`),Fa=o("code"),_c=r("str"),bc=r(", "),Wa=o("em"),vc=r("optional"),Ec=r(" defaults to "),ja=o("code"),kc=r('"gradients"'),Tc=r(`): Can be `),za=o("code"),$c=r('"gradients"'),Cc=r(", "),Ba=o("code"),wc=r('"all"'),yc=r(" or "),Ra=o("code"),Ac=r('"false"'),Lc=r(". Set to "),Va=o("code"),Dc=r('"false"'),xc=r(" to disable gradient logging or "),Ua=o("code"),Oc=r('"all"'),Mc=r(` to log gradients and parameters. WANDB_PROJECT (`),qa=o("code"),Sc=r("str"),Ic=r(", "),Ga=o("em"),Nc=r("optional"),Pc=r(", defaults to "),Ha=o("code"),Fc=r('"huggingface"'),Wc=r(`): Set this to a custom string to store results in a different project. WANDB_DISABLED (`),Ja=o("code"),jc=r("bool"),zc=r(", "),Ya=o("em"),Bc=r("optional"),Rc=r(", defaults to "),Xa=o("code"),Vc=r("False"),Uc=r(`): Whether or not to disable wandb entirely. Set `),Ka=o("em"),qc=r("WANDB_DISABLED=true"),Gc=r(" to disable."),go=c(),K=o("div"),g(Mt.$$.fragment),Hc=c(),Q=o("p"),Jc=r("A "),Qr=o("a"),Yc=r("TrainerCallback"),Xc=r(" that sends the logs to "),St=o("a"),Kc=r("MLflow"),Qc=r(`. Can be disabled by setting environment variable `),Qa=o("code"),Zc=r("DISABLE_MLFLOW_INTEGRATION = TRUE"),ed=r("."),td=c(),ee=o("div"),g(It.$$.fragment),rd=c(),Za=o("p"),ad=r("Setup the optional MLflow integration."),nd=c(),p=o("p"),od=r(`Environment: HF_MLFLOW_LOG_ARTIFACTS (`),en=o("code"),sd=r("str"),ld=r(", "),tn=o("em"),id=r("optional"),cd=r(`): Whether to use MLflow .log_artifact() facility to log artifacts. This only makes sense if logging to a remote server, e.g. s3 or GCS. If set to `),rn=o("code"),dd=r("True"),fd=r(" or "),an=o("em"),md=r("1"),hd=r(`, will copy each saved checkpoint on each save in `),Zr=o("a"),pd=r("TrainingArguments"),gd=r("\u2019s "),nn=o("code"),ud=r("output_dir"),_d=r(` to the local or remote artifact storage. Using it without a remote storage will just copy the files to your artifact location. MLFLOW_EXPERIMENT_NAME (`),on=o("code"),bd=r("str"),vd=r(", "),sn=o("em"),Ed=r("optional"),kd=r(`): Whether to use an MLflow experiment_name under which to launch the run. Default to \u201CNone\u201D which will point to the \u201CDefault\u201D experiment in MLflow. Otherwise, it is a case sensitive name of the experiment to be activated. If an experiment with this name does not exist, a new experiment with this name is created. MLFLOW_TAGS (`),ln=o("code"),Td=r("str"),$d=r(", "),cn=o("em"),Cd=r("optional"),wd=r(`): A string dump of a dictionary of key/value pair to be added to the MLflow run as tags. Example: os.environ[\u2018MLFLOW_TAGS\u2019]=\u2019{\u201Crelease.candidate\u201D: \u201CRC1\u201D, \u201Crelease.version\u201D: \u201C2.2.0\u201D}\u2019 MLFLOW_NESTED_RUN (`),dn=o("code"),yd=r("str"),Ad=r(", "),fn=o("em"),Ld=r("optional"),Dd=r(`): Whether to use MLflow nested runs. If set to `),mn=o("code"),xd=r("True"),Od=r(" or "),hn=o("em"),Md=r("1"),Sd=r(`, will create a nested run inside the current run. MLFLOW_RUN_ID (`),pn=o("code"),Id=r("str"),Nd=r(", "),gn=o("em"),Pd=r("optional"),Fd=r(`): Allow to reattach to an existing run which can be usefull when resuming training from a checkpoint. When MLFLOW_RUN_ID environment variable is set, start_run attempts to resume a run with the specified run ID and other parameters are ignored. MLFLOW_FLATTEN_PARAMS (`),un=o("code"),Wd=r("str"),jd=r(", "),_n=o("em"),zd=r("optional"),Bd=r(`): Whether to flatten the parameters dictionary before logging. Default to `),bn=o("code"),Rd=r("False"),Vd=r("."),uo=c(),me=o("div"),g(Nt.$$.fragment),Ud=c(),he=o("p"),qd=r("A "),ea=o("a"),Gd=r("TrainerCallback"),Hd=r(" that sends the logs to "),Pt=o("a"),Jd=r("AzureML"),Yd=r("."),_o=c(),pe=o("div"),g(Ft.$$.fragment),Xd=c(),Wt=o("p"),Kd=r("A "),ta=o("a"),Qd=r("TrainerCallback"),Zd=r(" that tracks the CO2 emission of training."),bo=c(),ge=o("div"),g(jt.$$.fragment),ef=c(),zt=o("p"),tf=r("TrainerCallback that sends the logs to "),Bt=o("a"),rf=r("Neptune"),af=r("."),vo=c(),ue=o("h2"),Oe=o("a"),vn=o("span"),g(Rt.$$.fragment),nf=c(),En=o("span"),of=r("TrainerCallback"),Eo=c(),k=o("div"),g(Vt.$$.fragment),sf=c(),kn=o("p"),lf=r(`A class for objects that will inspect the state of the training loop at some events and take some decisions. At each of those events the following arguments are available:`),cf=c(),Ut=o("p"),df=r("The "),Tn=o("code"),ff=r("control"),mf=r(` object is the only one that can be changed by the callback, in which case the event that changes it should return the modified version.`),hf=c(),N=o("p"),pf=r("The argument "),$n=o("code"),gf=r("args"),uf=r(", "),Cn=o("code"),_f=r("state"),bf=r(" and "),wn=o("code"),vf=r("control"),Ef=r(" are positionals for all events, all the others are grouped in "),yn=o("code"),kf=r("kwargs"),Tf=r(`. You can unpack the ones you need in the signature of the event using them. As an example, see the code of the simple `),An=o("code"),$f=r("~transformer.PrinterCallback"),Cf=r("."),wf=c(),g(Me.$$.fragment),yf=c(),Se=o("div"),g(qt.$$.fragment),Af=c(),Ln=o("p"),Lf=r("Event called at the beginning of an epoch."),Df=c(),Ie=o("div"),g(Gt.$$.fragment),xf=c(),Dn=o("p"),Of=r("Event called at the end of an epoch."),Mf=c(),Ne=o("div"),g(Ht.$$.fragment),Sf=c(),xn=o("p"),If=r("Event called after an evaluation phase."),Nf=c(),Pe=o("div"),g(Jt.$$.fragment),Pf=c(),Yt=o("p"),Ff=r("Event called at the end of the initialization of the "),ra=o("a"),Wf=r("Trainer"),jf=r("."),zf=c(),Fe=o("div"),g(Xt.$$.fragment),Bf=c(),On=o("p"),Rf=r("Event called after logging the last logs."),Vf=c(),We=o("div"),g(Kt.$$.fragment),Uf=c(),Mn=o("p"),qf=r("Event called after a successful prediction."),Gf=c(),je=o("div"),g(Qt.$$.fragment),Hf=c(),Sn=o("p"),Jf=r("Event called after a prediction step."),Yf=c(),ze=o("div"),g(Zt.$$.fragment),Xf=c(),In=o("p"),Kf=r("Event called after a checkpoint save."),Qf=c(),Be=o("div"),g(er.$$.fragment),Zf=c(),Nn=o("p"),em=r(`Event called at the beginning of a training step. If using gradient accumulation, one training step might take several inputs.`),tm=c(),Re=o("div"),g(tr.$$.fragment),rm=c(),Pn=o("p"),am=r(`Event called at the end of a training step. If using gradient accumulation, one training step might take several inputs.`),nm=c(),Ve=o("div"),g(rr.$$.fragment),om=c(),Fn=o("p"),sm=r("Event called at the end of an substep during gradient accumulation."),lm=c(),Ue=o("div"),g(ar.$$.fragment),im=c(),Wn=o("p"),cm=r("Event called at the beginning of training."),dm=c(),qe=o("div"),g(nr.$$.fragment),fm=c(),jn=o("p"),mm=r("Event called at the end of training."),ko=c(),Ge=o("p"),hm=r("Here is an example of how to register a custom callback with the PyTorch "),aa=o("a"),pm=r("Trainer"),gm=r(":"),To=c(),g(or.$$.fragment),$o=c(),He=o("p"),um=r("Another way to register a callback is to call "),zn=o("code"),_m=r("trainer.add_callback()"),bm=r(" as follows:"),Co=c(),g(sr.$$.fragment),wo=c(),_e=o("h2"),Je=o("a"),Bn=o("span"),g(lr.$$.fragment),vm=c(),Rn=o("span"),Em=r("TrainerState"),yo=c(),P=o("div"),g(ir.$$.fragment),km=c(),be=o("p"),Tm=r("A class containing the "),na=o("a"),$m=r("Trainer"),Cm=r(` inner state that will be saved along the model and optimizer when checkpointing and passed to the `),oa=o("a"),wm=r("TrainerCallback"),ym=r("."),Am=c(),g(Ye.$$.fragment),Lm=c(),Xe=o("div"),g(cr.$$.fragment),Dm=c(),dr=o("p"),xm=r("Create an instance from the content of "),Vn=o("code"),Om=r("json_path"),Mm=r("."),Sm=c(),Ke=o("div"),g(fr.$$.fragment),Im=c(),mr=o("p"),Nm=r("Save the content of this instance in JSON format inside "),Un=o("code"),Pm=r("json_path"),Fm=r("."),Ao=c(),ve=o("h2"),Qe=o("a"),qn=o("span"),g(hr.$$.fragment),Wm=c(),Gn=o("span"),jm=r("TrainerControl"),Lo=c(),Ee=o("div"),g(pr.$$.fragment),zm=c(),ke=o("p"),Bm=r("A class that handles the "),sa=o("a"),Rm=r("Trainer"),Vm=r(" control flow. This class is used by the "),la=o("a"),Um=r("TrainerCallback"),qm=r(` to activate some switches in the training loop.`),this.h()},l(n){const f=Mg('[data-svelte="svelte-1phssyn"]',document.head);A=s(f,"META",{name:!0,content:!0}),f.forEach(t),z=d(n),x=s(n,"H1",{class:!0});var gr=l(x);O=s(gr,"A",{id:!0,class:!0,href:!0});var Hn=l(O);F=s(Hn,"SPAN",{});var Ym=l(F);u(y.$$.fragment,Ym),Ym.forEach(t),Hn.forEach(t),W=d(gr),re=s(gr,"SPAN",{});var Xm=l(re);ae=a(Xm,"Callbacks"),Xm.forEach(t),gr.forEach(t),j=d(n),G=s(n,"P",{});var xo=l(G);br=a(xo,`Callbacks are objects that can customize the behavior of the training loop in the PyTorch `),vr=s(xo,"A",{href:!0});var Km=l(vr);ks=a(Km,"Trainer"),Km.forEach(t),Ts=a(xo,` (this feature is not yet implemented in TensorFlow) that can inspect the training loop state (for progress reporting, logging on TensorBoard or other ML platforms\u2026) and take decisions (like early stopping).`),xo.forEach(t),to=d(n),B=s(n,"P",{});var Ze=l(B);$s=a(Ze,"Callbacks are \u201Cread only\u201D pieces of code, apart from the "),Er=s(Ze,"A",{href:!0});var Qm=l(Er);Cs=a(Qm,"TrainerControl"),Qm.forEach(t),ws=a(Ze,` object they return, they cannot change anything in the training loop. For customizations that require changes in the training loop, you should subclass `),kr=s(Ze,"A",{href:!0});var Zm=l(kr);ys=a(Zm,"Trainer"),Zm.forEach(t),As=a(Ze," and override the methods you need (see "),Tr=s(Ze,"A",{href:!0});var eh=l(Tr);Ls=a(eh,"trainer"),eh.forEach(t),Ds=a(Ze," for examples)."),Ze.forEach(t),ro=d(n),Te=s(n,"P",{});var Oo=l(Te);xs=a(Oo,"By default a "),$r=s(Oo,"A",{href:!0});var th=l($r);Os=a(th,"Trainer"),th.forEach(t),Ms=a(Oo," will use the following callbacks:"),Oo.forEach(t),ao=d(n),D=s(n,"UL",{});var S=l(D);Cr=s(S,"LI",{});var Gm=l(Cr);wr=s(Gm,"A",{href:!0});var rh=l(wr);Ss=a(rh,"DefaultFlowCallback"),rh.forEach(t),Is=a(Gm," which handles the default behavior for logging, saving and evaluation."),Gm.forEach(t),Ns=d(S),Z=s(S,"LI",{});var ur=l(Z);yr=s(ur,"A",{href:!0});var ah=l(yr);Ps=a(ah,"PrinterCallback"),ah.forEach(t),Fs=a(ur," or "),Ar=s(ur,"A",{href:!0});var nh=l(Ar);Ws=a(nh,"ProgressCallback"),nh.forEach(t),js=a(ur,` to display progress and print the logs (the first one is used if you deactivate tqdm through the `),Lr=s(ur,"A",{href:!0});var oh=l(Lr);zs=a(oh,"TrainingArguments"),oh.forEach(t),Bs=a(ur,`, otherwise it\u2019s the second one).`),ur.forEach(t),Rs=d(S),Dr=s(S,"LI",{});var Hm=l(Dr);xr=s(Hm,"A",{href:!0});var sh=l(xr);Vs=a(sh,"TensorBoardCallback"),sh.forEach(t),Us=a(Hm,` if tensorboard is accessible (either through PyTorch >= 1.4 or tensorboardX).`),Hm.forEach(t),qs=d(S),$e=s(S,"LI",{});var Jn=l($e);Or=s(Jn,"A",{href:!0});var lh=l(Or);Gs=a(lh,"WandbCallback"),lh.forEach(t),Hs=a(Jn," if "),nt=s(Jn,"A",{href:!0,rel:!0});var ih=l(nt);Js=a(ih,"wandb"),ih.forEach(t),Ys=a(Jn," is installed."),Jn.forEach(t),Xs=d(S),Ce=s(S,"LI",{});var Yn=l(Ce);Mr=s(Yn,"A",{href:!0});var ch=l(Mr);Ks=a(ch,"CometCallback"),ch.forEach(t),Qs=a(Yn," if "),ot=s(Yn,"A",{href:!0,rel:!0});var dh=l(ot);Zs=a(dh,"comet_ml"),dh.forEach(t),el=a(Yn," is installed."),Yn.forEach(t),tl=d(S),we=s(S,"LI",{});var Xn=l(we);Sr=s(Xn,"A",{href:!0});var fh=l(Sr);rl=a(fh,"MLflowCallback"),fh.forEach(t),al=a(Xn," if "),st=s(Xn,"A",{href:!0,rel:!0});var mh=l(st);nl=a(mh,"mlflow"),mh.forEach(t),ol=a(Xn," is installed."),Xn.forEach(t),sl=d(S),ye=s(S,"LI",{});var Kn=l(ye);Ir=s(Kn,"A",{href:!0});var hh=l(Ir);ll=a(hh,"NeptuneCallback"),hh.forEach(t),il=a(Kn," if "),lt=s(Kn,"A",{href:!0,rel:!0});var ph=l(lt);cl=a(ph,"neptune"),ph.forEach(t),dl=a(Kn," is installed."),Kn.forEach(t),fl=d(S),Ae=s(S,"LI",{});var Qn=l(Ae);Nr=s(Qn,"A",{href:!0});var gh=l(Nr);ml=a(gh,"AzureMLCallback"),gh.forEach(t),hl=a(Qn," if "),it=s(Qn,"A",{href:!0,rel:!0});var uh=l(it);pl=a(uh,"azureml-sdk"),uh.forEach(t),gl=a(Qn,` is installed.`),Qn.forEach(t),ul=d(S),Le=s(S,"LI",{});var Zn=l(Le);Pr=s(Zn,"A",{href:!0});var _h=l(Pr);_l=a(_h,"CodeCarbonCallback"),_h.forEach(t),bl=a(Zn," if "),ct=s(Zn,"A",{href:!0,rel:!0});var bh=l(ct);vl=a(bh,"codecarbon"),bh.forEach(t),El=a(Zn,` is installed.`),Zn.forEach(t),S.forEach(t),no=d(n),I=s(n,"P",{});var U=l(I);kl=a(U,"The main class that implements callbacks is "),Fr=s(U,"A",{href:!0});var vh=l(Fr);Tl=a(vh,"TrainerCallback"),vh.forEach(t),$l=a(U,`. It gets the `),Wr=s(U,"A",{href:!0});var Eh=l(Wr);Cl=a(Eh,"TrainingArguments"),Eh.forEach(t),wl=a(U," used to instantiate the "),jr=s(U,"A",{href:!0});var kh=l(jr);yl=a(kh,"Trainer"),kh.forEach(t),Al=a(U,`, can access that Trainer\u2019s internal state via `),zr=s(U,"A",{href:!0});var Th=l(zr);Ll=a(Th,"TrainerState"),Th.forEach(t),Dl=a(U,`, and can take some actions on the training loop via `),Br=s(U,"A",{href:!0});var $h=l(Br);xl=a($h,"TrainerControl"),$h.forEach(t),Ol=a(U,"."),U.forEach(t),oo=d(n),ne=s(n,"H2",{class:!0});var Mo=l(ne);De=s(Mo,"A",{id:!0,class:!0,href:!0});var Ch=l(De);va=s(Ch,"SPAN",{});var wh=l(va);u(dt.$$.fragment,wh),wh.forEach(t),Ch.forEach(t),Ml=d(Mo),Ea=s(Mo,"SPAN",{});var yh=l(Ea);Sl=a(yh,"Available Callbacks"),yh.forEach(t),Mo.forEach(t),so=d(n),xe=s(n,"P",{});var So=l(xe);Il=a(So,"Here is the list of the available "),Rr=s(So,"A",{href:!0});var Ah=l(Rr);Nl=a(Ah,"TrainerCallback"),Ah.forEach(t),Pl=a(So," in the library:"),So.forEach(t),lo=d(n),H=s(n,"DIV",{class:!0});var ia=l(H);u(ft.$$.fragment,ia),Fl=d(ia),oe=s(ia,"P",{});var ca=l(oe);Wl=a(ca,"A "),Vr=s(ca,"A",{href:!0});var Lh=l(Vr);jl=a(Lh,"TrainerCallback"),Lh.forEach(t),zl=a(ca," that sends the logs to "),mt=s(ca,"A",{href:!0,rel:!0});var Dh=l(mt);Bl=a(Dh,"Comet ML"),Dh.forEach(t),Rl=a(ca,"."),ca.forEach(t),Vl=d(ia),R=s(ia,"DIV",{class:!0});var et=l(R);u(ht.$$.fragment,et),Ul=d(et),ka=s(et,"P",{});var xh=l(ka);ql=a(xh,"Setup the optional Comet.ml integration."),xh.forEach(t),Gl=d(et),L=s(et,"P",{});var M=l(L);Hl=a(M,`Environment: COMET_MODE (`),Ta=s(M,"CODE",{});var Oh=l(Ta);Jl=a(Oh,"str"),Oh.forEach(t),Yl=a(M,", "),$a=s(M,"EM",{});var Mh=l($a);Xl=a(Mh,"optional"),Mh.forEach(t),Kl=a(M,`): Whether to create an online, offline experiment or disable Comet logging. Can be \u201COFFLINE\u201D, \u201CONLINE\u201D, or \u201CDISABLED\u201D. Defaults to \u201CONLINE\u201D. COMET_PROJECT_NAME (`),Ca=s(M,"CODE",{});var Sh=l(Ca);Ql=a(Sh,"str"),Sh.forEach(t),Zl=a(M,", "),wa=s(M,"EM",{});var Ih=l(wa);ei=a(Ih,"optional"),Ih.forEach(t),ti=a(M,`): Comet project name for experiments COMET_OFFLINE_DIRECTORY (`),ya=s(M,"CODE",{});var Nh=l(ya);ri=a(Nh,"str"),Nh.forEach(t),ai=a(M,", "),Aa=s(M,"EM",{});var Ph=l(Aa);ni=a(Ph,"optional"),Ph.forEach(t),oi=a(M,`): Folder to use for saving offline experiments when `),La=s(M,"CODE",{});var Fh=l(La);si=a(Fh,"COMET_MODE"),Fh.forEach(t),li=a(M,` is \u201COFFLINE\u201D COMET_LOG_ASSETS (`),Da=s(M,"CODE",{});var Wh=l(Da);ii=a(Wh,"str"),Wh.forEach(t),ci=a(M,", "),xa=s(M,"EM",{});var jh=l(xa);di=a(jh,"optional"),jh.forEach(t),fi=a(M,`): Whether or not to log training assets (tf event logs, checkpoints, etc), to Comet. Can be \u201CTRUE\u201D, or \u201CFALSE\u201D. Defaults to \u201CTRUE\u201D.`),M.forEach(t),mi=d(et),pt=s(et,"P",{});var Io=l(pt);hi=a(Io,`For a number of configurable items in the environment, see `),gt=s(Io,"A",{href:!0,rel:!0});var zh=l(gt);pi=a(zh,"here"),zh.forEach(t),gi=a(Io,"."),Io.forEach(t),et.forEach(t),ia.forEach(t),io=d(n),se=s(n,"DIV",{class:!0});var No=l(se);u(ut.$$.fragment,No),ui=d(No),_t=s(No,"P",{});var Po=l(_t);_i=a(Po,"A "),Ur=s(Po,"A",{href:!0});var Bh=l(Ur);bi=a(Bh,"TrainerCallback"),Bh.forEach(t),vi=a(Po," that handles the default flow of the training loop for logs, evaluation and checkpoints."),Po.forEach(t),No.forEach(t),co=d(n),le=s(n,"DIV",{class:!0});var Fo=l(le);u(bt.$$.fragment,Fo),Ei=d(Fo),vt=s(Fo,"P",{});var Wo=l(vt);ki=a(Wo,"A bare "),qr=s(Wo,"A",{href:!0});var Rh=l(qr);Ti=a(Rh,"TrainerCallback"),Rh.forEach(t),$i=a(Wo," that just prints the logs."),Wo.forEach(t),Fo.forEach(t),fo=d(n),ie=s(n,"DIV",{class:!0});var jo=l(ie);u(Et.$$.fragment,jo),Ci=d(jo),kt=s(jo,"P",{});var zo=l(kt);wi=a(zo,"A "),Gr=s(zo,"A",{href:!0});var Vh=l(Gr);yi=a(Vh,"TrainerCallback"),Vh.forEach(t),Ai=a(zo," that displays the progress of training or evaluation."),zo.forEach(t),jo.forEach(t),mo=d(n),J=s(n,"DIV",{class:!0});var da=l(J);u(Tt.$$.fragment,da),Li=d(da),$t=s(da,"P",{});var Bo=l($t);Di=a(Bo,"A "),Hr=s(Bo,"A",{href:!0});var Uh=l(Hr);xi=a(Uh,"TrainerCallback"),Uh.forEach(t),Oi=a(Bo," that handles early stopping."),Bo.forEach(t),Mi=d(da),Y=s(da,"P",{});var tt=l(Y);Si=a(tt,"This callback depends on "),Jr=s(tt,"A",{href:!0});var qh=l(Jr);Ii=a(qh,"TrainingArguments"),qh.forEach(t),Ni=a(tt," argument "),Oa=s(tt,"EM",{});var Gh=l(Oa);Pi=a(Gh,"load_best_model_at_end"),Gh.forEach(t),Fi=a(tt,` functionality to set best_metric in `),Yr=s(tt,"A",{href:!0});var Hh=l(Yr);Wi=a(Hh,"TrainerState"),Hh.forEach(t),ji=a(tt,"."),tt.forEach(t),da.forEach(t),ho=d(n),ce=s(n,"DIV",{class:!0});var Ro=l(ce);u(Ct.$$.fragment,Ro),zi=d(Ro),de=s(Ro,"P",{});var fa=l(de);Bi=a(fa,"A "),Xr=s(fa,"A",{href:!0});var Jh=l(Xr);Ri=a(Jh,"TrainerCallback"),Jh.forEach(t),Vi=a(fa," that sends the logs to "),wt=s(fa,"A",{href:!0,rel:!0});var Yh=l(wt);Ui=a(Yh,"TensorBoard"),Yh.forEach(t),qi=a(fa,"."),fa.forEach(t),Ro.forEach(t),po=d(n),X=s(n,"DIV",{class:!0});var ma=l(X);u(yt.$$.fragment,ma),Gi=d(ma),fe=s(ma,"P",{});var ha=l(fe);Hi=a(ha,"A "),Kr=s(ha,"A",{href:!0});var Xh=l(Kr);Ji=a(Xh,"TrainerCallback"),Xh.forEach(t),Yi=a(ha," that sends the logs to "),At=s(ha,"A",{href:!0,rel:!0});var Kh=l(At);Xi=a(Kh,"Weight and Biases"),Kh.forEach(t),Ki=a(ha,"."),ha.forEach(t),Qi=d(ma),V=s(ma,"DIV",{class:!0});var rt=l(V);u(Lt.$$.fragment,rt),Zi=d(rt),Dt=s(rt,"P",{});var Vo=l(Dt);ec=a(Vo,"Setup the optional Weights & Biases ("),Ma=s(Vo,"EM",{});var Qh=l(Ma);tc=a(Qh,"wandb"),Qh.forEach(t),rc=a(Vo,") integration."),Vo.forEach(t),ac=d(rt),xt=s(rt,"P",{});var Uo=l(xt);nc=a(Uo,`One can subclass and override this method to customize the setup if needed. Find more information `),Ot=s(Uo,"A",{href:!0,rel:!0});var Zh=l(Ot);oc=a(Zh,"here"),Zh.forEach(t),sc=a(Uo,`. You can also override the following environment variables:`),Uo.forEach(t),lc=d(rt),h=s(rt,"P",{});var T=l(h);ic=a(T,`Environment: WANDB_LOG_MODEL (`),Sa=s(T,"CODE",{});var ep=l(Sa);cc=a(ep,"bool"),ep.forEach(t),dc=a(T,", "),Ia=s(T,"EM",{});var tp=l(Ia);fc=a(tp,"optional"),tp.forEach(t),mc=a(T,", defaults to "),Na=s(T,"CODE",{});var rp=l(Na);hc=a(rp,"False"),rp.forEach(t),pc=a(T,`): Whether or not to log model as artifact at the end of training. Use along with `),Pa=s(T,"EM",{});var ap=l(Pa);gc=a(ap,"TrainingArguments.load_best_model_at_end"),ap.forEach(t),uc=a(T,` to upload best model. WANDB_WATCH (`),Fa=s(T,"CODE",{});var np=l(Fa);_c=a(np,"str"),np.forEach(t),bc=a(T,", "),Wa=s(T,"EM",{});var op=l(Wa);vc=a(op,"optional"),op.forEach(t),Ec=a(T," defaults to "),ja=s(T,"CODE",{});var sp=l(ja);kc=a(sp,'"gradients"'),sp.forEach(t),Tc=a(T,`): Can be `),za=s(T,"CODE",{});var lp=l(za);$c=a(lp,'"gradients"'),lp.forEach(t),Cc=a(T,", "),Ba=s(T,"CODE",{});var ip=l(Ba);wc=a(ip,'"all"'),ip.forEach(t),yc=a(T," or "),Ra=s(T,"CODE",{});var cp=l(Ra);Ac=a(cp,'"false"'),cp.forEach(t),Lc=a(T,". Set to "),Va=s(T,"CODE",{});var dp=l(Va);Dc=a(dp,'"false"'),dp.forEach(t),xc=a(T," to disable gradient logging or "),Ua=s(T,"CODE",{});var fp=l(Ua);Oc=a(fp,'"all"'),fp.forEach(t),Mc=a(T,` to log gradients and parameters. WANDB_PROJECT (`),qa=s(T,"CODE",{});var mp=l(qa);Sc=a(mp,"str"),mp.forEach(t),Ic=a(T,", "),Ga=s(T,"EM",{});var hp=l(Ga);Nc=a(hp,"optional"),hp.forEach(t),Pc=a(T,", defaults to "),Ha=s(T,"CODE",{});var pp=l(Ha);Fc=a(pp,'"huggingface"'),pp.forEach(t),Wc=a(T,`): Set this to a custom string to store results in a different project. WANDB_DISABLED (`),Ja=s(T,"CODE",{});var gp=l(Ja);jc=a(gp,"bool"),gp.forEach(t),zc=a(T,", "),Ya=s(T,"EM",{});var up=l(Ya);Bc=a(up,"optional"),up.forEach(t),Rc=a(T,", defaults to "),Xa=s(T,"CODE",{});var _p=l(Xa);Vc=a(_p,"False"),_p.forEach(t),Uc=a(T,`): Whether or not to disable wandb entirely. Set `),Ka=s(T,"EM",{});var bp=l(Ka);qc=a(bp,"WANDB_DISABLED=true"),bp.forEach(t),Gc=a(T," to disable."),T.forEach(t),rt.forEach(t),ma.forEach(t),go=d(n),K=s(n,"DIV",{class:!0});var pa=l(K);u(Mt.$$.fragment,pa),Hc=d(pa),Q=s(pa,"P",{});var at=l(Q);Jc=a(at,"A "),Qr=s(at,"A",{href:!0});var vp=l(Qr);Yc=a(vp,"TrainerCallback"),vp.forEach(t),Xc=a(at," that sends the logs to "),St=s(at,"A",{href:!0,rel:!0});var Ep=l(St);Kc=a(Ep,"MLflow"),Ep.forEach(t),Qc=a(at,`. Can be disabled by setting environment variable `),Qa=s(at,"CODE",{});var kp=l(Qa);Zc=a(kp,"DISABLE_MLFLOW_INTEGRATION = TRUE"),kp.forEach(t),ed=a(at,"."),at.forEach(t),td=d(pa),ee=s(pa,"DIV",{class:!0});var ga=l(ee);u(It.$$.fragment,ga),rd=d(ga),Za=s(ga,"P",{});var Tp=l(Za);ad=a(Tp,"Setup the optional MLflow integration."),Tp.forEach(t),nd=d(ga),p=s(ga,"P",{});var $=l(p);od=a($,`Environment: HF_MLFLOW_LOG_ARTIFACTS (`),en=s($,"CODE",{});var $p=l(en);sd=a($p,"str"),$p.forEach(t),ld=a($,", "),tn=s($,"EM",{});var Cp=l(tn);id=a(Cp,"optional"),Cp.forEach(t),cd=a($,`): Whether to use MLflow .log_artifact() facility to log artifacts. This only makes sense if logging to a remote server, e.g. s3 or GCS. If set to `),rn=s($,"CODE",{});var wp=l(rn);dd=a(wp,"True"),wp.forEach(t),fd=a($," or "),an=s($,"EM",{});var yp=l(an);md=a(yp,"1"),yp.forEach(t),hd=a($,`, will copy each saved checkpoint on each save in `),Zr=s($,"A",{href:!0});var Ap=l(Zr);pd=a(Ap,"TrainingArguments"),Ap.forEach(t),gd=a($,"\u2019s "),nn=s($,"CODE",{});var Lp=l(nn);ud=a(Lp,"output_dir"),Lp.forEach(t),_d=a($,` to the local or remote artifact storage. Using it without a remote storage will just copy the files to your artifact location. MLFLOW_EXPERIMENT_NAME (`),on=s($,"CODE",{});var Dp=l(on);bd=a(Dp,"str"),Dp.forEach(t),vd=a($,", "),sn=s($,"EM",{});var xp=l(sn);Ed=a(xp,"optional"),xp.forEach(t),kd=a($,`): Whether to use an MLflow experiment_name under which to launch the run. Default to \u201CNone\u201D which will point to the \u201CDefault\u201D experiment in MLflow. Otherwise, it is a case sensitive name of the experiment to be activated. If an experiment with this name does not exist, a new experiment with this name is created. MLFLOW_TAGS (`),ln=s($,"CODE",{});var Op=l(ln);Td=a(Op,"str"),Op.forEach(t),$d=a($,", "),cn=s($,"EM",{});var Mp=l(cn);Cd=a(Mp,"optional"),Mp.forEach(t),wd=a($,`): A string dump of a dictionary of key/value pair to be added to the MLflow run as tags. Example: os.environ[\u2018MLFLOW_TAGS\u2019]=\u2019{\u201Crelease.candidate\u201D: \u201CRC1\u201D, \u201Crelease.version\u201D: \u201C2.2.0\u201D}\u2019 MLFLOW_NESTED_RUN (`),dn=s($,"CODE",{});var Sp=l(dn);yd=a(Sp,"str"),Sp.forEach(t),Ad=a($,", "),fn=s($,"EM",{});var Ip=l(fn);Ld=a(Ip,"optional"),Ip.forEach(t),Dd=a($,`): Whether to use MLflow nested runs. If set to `),mn=s($,"CODE",{});var Np=l(mn);xd=a(Np,"True"),Np.forEach(t),Od=a($," or "),hn=s($,"EM",{});var Pp=l(hn);Md=a(Pp,"1"),Pp.forEach(t),Sd=a($,`, will create a nested run inside the current run. MLFLOW_RUN_ID (`),pn=s($,"CODE",{});var Fp=l(pn);Id=a(Fp,"str"),Fp.forEach(t),Nd=a($,", "),gn=s($,"EM",{});var Wp=l(gn);Pd=a(Wp,"optional"),Wp.forEach(t),Fd=a($,`): Allow to reattach to an existing run which can be usefull when resuming training from a checkpoint. When MLFLOW_RUN_ID environment variable is set, start_run attempts to resume a run with the specified run ID and other parameters are ignored. MLFLOW_FLATTEN_PARAMS (`),un=s($,"CODE",{});var jp=l(un);Wd=a(jp,"str"),jp.forEach(t),jd=a($,", "),_n=s($,"EM",{});var zp=l(_n);zd=a(zp,"optional"),zp.forEach(t),Bd=a($,`): Whether to flatten the parameters dictionary before logging. Default to `),bn=s($,"CODE",{});var Bp=l(bn);Rd=a(Bp,"False"),Bp.forEach(t),Vd=a($,"."),$.forEach(t),ga.forEach(t),pa.forEach(t),uo=d(n),me=s(n,"DIV",{class:!0});var qo=l(me);u(Nt.$$.fragment,qo),Ud=d(qo),he=s(qo,"P",{});var ua=l(he);qd=a(ua,"A "),ea=s(ua,"A",{href:!0});var Rp=l(ea);Gd=a(Rp,"TrainerCallback"),Rp.forEach(t),Hd=a(ua," that sends the logs to "),Pt=s(ua,"A",{href:!0,rel:!0});var Vp=l(Pt);Jd=a(Vp,"AzureML"),Vp.forEach(t),Yd=a(ua,"."),ua.forEach(t),qo.forEach(t),_o=d(n),pe=s(n,"DIV",{class:!0});var Go=l(pe);u(Ft.$$.fragment,Go),Xd=d(Go),Wt=s(Go,"P",{});var Ho=l(Wt);Kd=a(Ho,"A "),ta=s(Ho,"A",{href:!0});var Up=l(ta);Qd=a(Up,"TrainerCallback"),Up.forEach(t),Zd=a(Ho," that tracks the CO2 emission of training."),Ho.forEach(t),Go.forEach(t),bo=d(n),ge=s(n,"DIV",{class:!0});var Jo=l(ge);u(jt.$$.fragment,Jo),ef=d(Jo),zt=s(Jo,"P",{});var Yo=l(zt);tf=a(Yo,"TrainerCallback that sends the logs to "),Bt=s(Yo,"A",{href:!0,rel:!0});var qp=l(Bt);rf=a(qp,"Neptune"),qp.forEach(t),af=a(Yo,"."),Yo.forEach(t),Jo.forEach(t),vo=d(n),ue=s(n,"H2",{class:!0});var Xo=l(ue);Oe=s(Xo,"A",{id:!0,class:!0,href:!0});var Gp=l(Oe);vn=s(Gp,"SPAN",{});var Hp=l(vn);u(Rt.$$.fragment,Hp),Hp.forEach(t),Gp.forEach(t),nf=d(Xo),En=s(Xo,"SPAN",{});var Jp=l(En);of=a(Jp,"TrainerCallback"),Jp.forEach(t),Xo.forEach(t),Eo=d(n),k=s(n,"DIV",{class:!0});var C=l(k);u(Vt.$$.fragment,C),sf=d(C),kn=s(C,"P",{});var Yp=l(kn);lf=a(Yp,`A class for objects that will inspect the state of the training loop at some events and take some decisions. At each of those events the following arguments are available:`),Yp.forEach(t),cf=d(C),Ut=s(C,"P",{});var Ko=l(Ut);df=a(Ko,"The "),Tn=s(Ko,"CODE",{});var Xp=l(Tn);ff=a(Xp,"control"),Xp.forEach(t),mf=a(Ko,` object is the only one that can be changed by the callback, in which case the event that changes it should return the modified version.`),Ko.forEach(t),hf=d(C),N=s(C,"P",{});var q=l(N);pf=a(q,"The argument "),$n=s(q,"CODE",{});var Kp=l($n);gf=a(Kp,"args"),Kp.forEach(t),uf=a(q,", "),Cn=s(q,"CODE",{});var Qp=l(Cn);_f=a(Qp,"state"),Qp.forEach(t),bf=a(q," and "),wn=s(q,"CODE",{});var Zp=l(wn);vf=a(Zp,"control"),Zp.forEach(t),Ef=a(q," are positionals for all events, all the others are grouped in "),yn=s(q,"CODE",{});var eg=l(yn);kf=a(eg,"kwargs"),eg.forEach(t),Tf=a(q,`. You can unpack the ones you need in the signature of the event using them. As an example, see the code of the simple `),An=s(q,"CODE",{});var tg=l(An);$f=a(tg,"~transformer.PrinterCallback"),tg.forEach(t),Cf=a(q,"."),q.forEach(t),wf=d(C),u(Me.$$.fragment,C),yf=d(C),Se=s(C,"DIV",{class:!0});var Qo=l(Se);u(qt.$$.fragment,Qo),Af=d(Qo),Ln=s(Qo,"P",{});var rg=l(Ln);Lf=a(rg,"Event called at the beginning of an epoch."),rg.forEach(t),Qo.forEach(t),Df=d(C),Ie=s(C,"DIV",{class:!0});var Zo=l(Ie);u(Gt.$$.fragment,Zo),xf=d(Zo),Dn=s(Zo,"P",{});var ag=l(Dn);Of=a(ag,"Event called at the end of an epoch."),ag.forEach(t),Zo.forEach(t),Mf=d(C),Ne=s(C,"DIV",{class:!0});var es=l(Ne);u(Ht.$$.fragment,es),Sf=d(es),xn=s(es,"P",{});var ng=l(xn);If=a(ng,"Event called after an evaluation phase."),ng.forEach(t),es.forEach(t),Nf=d(C),Pe=s(C,"DIV",{class:!0});var ts=l(Pe);u(Jt.$$.fragment,ts),Pf=d(ts),Yt=s(ts,"P",{});var rs=l(Yt);Ff=a(rs,"Event called at the end of the initialization of the "),ra=s(rs,"A",{href:!0});var og=l(ra);Wf=a(og,"Trainer"),og.forEach(t),jf=a(rs,"."),rs.forEach(t),ts.forEach(t),zf=d(C),Fe=s(C,"DIV",{class:!0});var as=l(Fe);u(Xt.$$.fragment,as),Bf=d(as),On=s(as,"P",{});var sg=l(On);Rf=a(sg,"Event called after logging the last logs."),sg.forEach(t),as.forEach(t),Vf=d(C),We=s(C,"DIV",{class:!0});var ns=l(We);u(Kt.$$.fragment,ns),Uf=d(ns),Mn=s(ns,"P",{});var lg=l(Mn);qf=a(lg,"Event called after a successful prediction."),lg.forEach(t),ns.forEach(t),Gf=d(C),je=s(C,"DIV",{class:!0});var os=l(je);u(Qt.$$.fragment,os),Hf=d(os),Sn=s(os,"P",{});var ig=l(Sn);Jf=a(ig,"Event called after a prediction step."),ig.forEach(t),os.forEach(t),Yf=d(C),ze=s(C,"DIV",{class:!0});var ss=l(ze);u(Zt.$$.fragment,ss),Xf=d(ss),In=s(ss,"P",{});var cg=l(In);Kf=a(cg,"Event called after a checkpoint save."),cg.forEach(t),ss.forEach(t),Qf=d(C),Be=s(C,"DIV",{class:!0});var ls=l(Be);u(er.$$.fragment,ls),Zf=d(ls),Nn=s(ls,"P",{});var dg=l(Nn);em=a(dg,`Event called at the beginning of a training step. If using gradient accumulation, one training step might take several inputs.`),dg.forEach(t),ls.forEach(t),tm=d(C),Re=s(C,"DIV",{class:!0});var is=l(Re);u(tr.$$.fragment,is),rm=d(is),Pn=s(is,"P",{});var fg=l(Pn);am=a(fg,`Event called at the end of a training step. If using gradient accumulation, one training step might take several inputs.`),fg.forEach(t),is.forEach(t),nm=d(C),Ve=s(C,"DIV",{class:!0});var cs=l(Ve);u(rr.$$.fragment,cs),om=d(cs),Fn=s(cs,"P",{});var mg=l(Fn);sm=a(mg,"Event called at the end of an substep during gradient accumulation."),mg.forEach(t),cs.forEach(t),lm=d(C),Ue=s(C,"DIV",{class:!0});var ds=l(Ue);u(ar.$$.fragment,ds),im=d(ds),Wn=s(ds,"P",{});var hg=l(Wn);cm=a(hg,"Event called at the beginning of training."),hg.forEach(t),ds.forEach(t),dm=d(C),qe=s(C,"DIV",{class:!0});var fs=l(qe);u(nr.$$.fragment,fs),fm=d(fs),jn=s(fs,"P",{});var pg=l(jn);mm=a(pg,"Event called at the end of training."),pg.forEach(t),fs.forEach(t),C.forEach(t),ko=d(n),Ge=s(n,"P",{});var ms=l(Ge);hm=a(ms,"Here is an example of how to register a custom callback with the PyTorch "),aa=s(ms,"A",{href:!0});var gg=l(aa);pm=a(gg,"Trainer"),gg.forEach(t),gm=a(ms,":"),ms.forEach(t),To=d(n),u(or.$$.fragment,n),$o=d(n),He=s(n,"P",{});var hs=l(He);um=a(hs,"Another way to register a callback is to call "),zn=s(hs,"CODE",{});var ug=l(zn);_m=a(ug,"trainer.add_callback()"),ug.forEach(t),bm=a(hs," as follows:"),hs.forEach(t),Co=d(n),u(sr.$$.fragment,n),wo=d(n),_e=s(n,"H2",{class:!0});var ps=l(_e);Je=s(ps,"A",{id:!0,class:!0,href:!0});var _g=l(Je);Bn=s(_g,"SPAN",{});var bg=l(Bn);u(lr.$$.fragment,bg),bg.forEach(t),_g.forEach(t),vm=d(ps),Rn=s(ps,"SPAN",{});var vg=l(Rn);Em=a(vg,"TrainerState"),vg.forEach(t),ps.forEach(t),yo=d(n),P=s(n,"DIV",{class:!0});var te=l(P);u(ir.$$.fragment,te),km=d(te),be=s(te,"P",{});var _a=l(be);Tm=a(_a,"A class containing the "),na=s(_a,"A",{href:!0});var Eg=l(na);$m=a(Eg,"Trainer"),Eg.forEach(t),Cm=a(_a,` inner state that will be saved along the model and optimizer when checkpointing and passed to the `),oa=s(_a,"A",{href:!0});var kg=l(oa);wm=a(kg,"TrainerCallback"),kg.forEach(t),ym=a(_a,"."),_a.forEach(t),Am=d(te),u(Ye.$$.fragment,te),Lm=d(te),Xe=s(te,"DIV",{class:!0});var gs=l(Xe);u(cr.$$.fragment,gs),Dm=d(gs),dr=s(gs,"P",{});var us=l(dr);xm=a(us,"Create an instance from the content of "),Vn=s(us,"CODE",{});var Tg=l(Vn);Om=a(Tg,"json_path"),Tg.forEach(t),Mm=a(us,"."),us.forEach(t),gs.forEach(t),Sm=d(te),Ke=s(te,"DIV",{class:!0});var _s=l(Ke);u(fr.$$.fragment,_s),Im=d(_s),mr=s(_s,"P",{});var bs=l(mr);Nm=a(bs,"Save the content of this instance in JSON format inside "),Un=s(bs,"CODE",{});var $g=l(Un);Pm=a($g,"json_path"),$g.forEach(t),Fm=a(bs,"."),bs.forEach(t),_s.forEach(t),te.forEach(t),Ao=d(n),ve=s(n,"H2",{class:!0});var vs=l(ve);Qe=s(vs,"A",{id:!0,class:!0,href:!0});var Cg=l(Qe);qn=s(Cg,"SPAN",{});var wg=l(qn);u(hr.$$.fragment,wg),wg.forEach(t),Cg.forEach(t),Wm=d(vs),Gn=s(vs,"SPAN",{});var yg=l(Gn);jm=a(yg,"TrainerControl"),yg.forEach(t),vs.forEach(t),Lo=d(n),Ee=s(n,"DIV",{class:!0});var Es=l(Ee);u(pr.$$.fragment,Es),zm=d(Es),ke=s(Es,"P",{});var ba=l(ke);Bm=a(ba,"A class that handles the "),sa=s(ba,"A",{href:!0});var Ag=l(sa);Rm=a(Ag,"Trainer"),Ag.forEach(t),Vm=a(ba," control flow. This class is used by the "),la=s(ba,"A",{href:!0});var Lg=l(la);Um=a(Lg,"TrainerCallback"),Lg.forEach(t),qm=a(ba,` to activate some switches in the training loop.`),ba.forEach(t),Es.forEach(t),this.h()},h(){i(A,"name","hf:doc:metadata"),i(A,"content",JSON.stringify(zg)),i(O,"id","callbacks"),i(O,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(O,"href","#callbacks"),i(x,"class","relative group"),i(vr,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),i(Er,"href","/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerControl"),i(kr,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),i(Tr,"href","trainer"),i($r,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),i(wr,"href","/docs/transformers/pr_19429/en/main_classes/callback#transformers.DefaultFlowCallback"),i(yr,"href","/docs/transformers/pr_19429/en/main_classes/callback#transformers.PrinterCallback"),i(Ar,"href","/docs/transformers/pr_19429/en/main_classes/callback#transformers.ProgressCallback"),i(Lr,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments"),i(xr,"href","/docs/transformers/pr_19429/en/main_classes/callback#transformers.integrations.TensorBoardCallback"),i(Or,"href","/docs/transformers/pr_19429/en/main_classes/callback#transformers.integrations.WandbCallback"),i(nt,"href","https://www.wandb.com/"),i(nt,"rel","nofollow"),i(Mr,"href","/docs/transformers/pr_19429/en/main_classes/callback#transformers.integrations.CometCallback"),i(ot,"href","https://www.comet.ml/site/"),i(ot,"rel","nofollow"),i(Sr,"href","/docs/transformers/pr_19429/en/main_classes/callback#transformers.integrations.MLflowCallback"),i(st,"href","https://www.mlflow.org/"),i(st,"rel","nofollow"),i(Ir,"href","/docs/transformers/pr_19429/en/main_classes/callback#transformers.integrations.NeptuneCallback"),i(lt,"href","https://neptune.ai/"),i(lt,"rel","nofollow"),i(Nr,"href","/docs/transformers/pr_19429/en/main_classes/callback#transformers.integrations.AzureMLCallback"),i(it,"href","https://pypi.org/project/azureml-sdk/"),i(it,"rel","nofollow"),i(Pr,"href","/docs/transformers/pr_19429/en/main_classes/callback#transformers.integrations.CodeCarbonCallback"),i(ct,"href","https://pypi.org/project/codecarbon/"),i(ct,"rel","nofollow"),i(Fr,"href","/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerCallback"),i(Wr,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments"),i(jr,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),i(zr,"href","/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerState"),i(Br,"href","/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerControl"),i(De,"id","transformers.integrations.CometCallback"),i(De,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(De,"href","#transformers.integrations.CometCallback"),i(ne,"class","relative group"),i(Rr,"href","/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerCallback"),i(Vr,"href","/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerCallback"),i(mt,"href","https://www.comet.ml/site/"),i(mt,"rel","nofollow"),i(gt,"href","https://www.comet.ml/docs/python-sdk/advanced/#comet-configuration-variables"),i(gt,"rel","nofollow"),i(R,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(H,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(Ur,"href","/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerCallback"),i(se,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(qr,"href","/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerCallback"),i(le,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(Gr,"href","/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerCallback"),i(ie,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(Hr,"href","/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerCallback"),i(Jr,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments"),i(Yr,"href","/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerState"),i(J,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(Xr,"href","/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerCallback"),i(wt,"href","https://www.tensorflow.org/tensorboard"),i(wt,"rel","nofollow"),i(ce,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(Kr,"href","/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerCallback"),i(At,"href","https://www.wandb.com/"),i(At,"rel","nofollow"),i(Ot,"href","https://docs.wandb.ai/integrations/huggingface"),i(Ot,"rel","nofollow"),i(V,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(X,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(Qr,"href","/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerCallback"),i(St,"href","https://www.mlflow.org/"),i(St,"rel","nofollow"),i(Zr,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments"),i(ee,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(K,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(ea,"href","/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerCallback"),i(Pt,"href","https://pypi.org/project/azureml-sdk/"),i(Pt,"rel","nofollow"),i(me,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(ta,"href","/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerCallback"),i(pe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(Bt,"href","https://neptune.ai"),i(Bt,"rel","nofollow"),i(ge,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(Oe,"id","transformers.TrainerCallback"),i(Oe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(Oe,"href","#transformers.TrainerCallback"),i(ue,"class","relative group"),i(Se,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(Ie,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(Ne,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(ra,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),i(Pe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(Fe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(We,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(je,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(ze,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(Be,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(Re,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(Ve,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(Ue,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(qe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(k,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(aa,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),i(Je,"id","transformers.TrainerState"),i(Je,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(Je,"href","#transformers.TrainerState"),i(_e,"class","relative group"),i(na,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),i(oa,"href","/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerCallback"),i(Xe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(Ke,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(P,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),i(Qe,"id","transformers.TrainerControl"),i(Qe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(Qe,"href","#transformers.TrainerControl"),i(ve,"class","relative group"),i(sa,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),i(la,"href","/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerCallback"),i(Ee,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(n,f){e(document.head,A),m(n,z,f),m(n,x,f),e(x,O),e(O,F),_(y,F,null),e(x,W),e(x,re),e(re,ae),m(n,j,f),m(n,G,f),e(G,br),e(G,vr),e(vr,ks),e(G,Ts),m(n,to,f),m(n,B,f),e(B,$s),e(B,Er),e(Er,Cs),e(B,ws),e(B,kr),e(kr,ys),e(B,As),e(B,Tr),e(Tr,Ls),e(B,Ds),m(n,ro,f),m(n,Te,f),e(Te,xs),e(Te,$r),e($r,Os),e(Te,Ms),m(n,ao,f),m(n,D,f),e(D,Cr),e(Cr,wr),e(wr,Ss),e(Cr,Is),e(D,Ns),e(D,Z),e(Z,yr),e(yr,Ps),e(Z,Fs),e(Z,Ar),e(Ar,Ws),e(Z,js),e(Z,Lr),e(Lr,zs),e(Z,Bs),e(D,Rs),e(D,Dr),e(Dr,xr),e(xr,Vs),e(Dr,Us),e(D,qs),e(D,$e),e($e,Or),e(Or,Gs),e($e,Hs),e($e,nt),e(nt,Js),e($e,Ys),e(D,Xs),e(D,Ce),e(Ce,Mr),e(Mr,Ks),e(Ce,Qs),e(Ce,ot),e(ot,Zs),e(Ce,el),e(D,tl),e(D,we),e(we,Sr),e(Sr,rl),e(we,al),e(we,st),e(st,nl),e(we,ol),e(D,sl),e(D,ye),e(ye,Ir),e(Ir,ll),e(ye,il),e(ye,lt),e(lt,cl),e(ye,dl),e(D,fl),e(D,Ae),e(Ae,Nr),e(Nr,ml),e(Ae,hl),e(Ae,it),e(it,pl),e(Ae,gl),e(D,ul),e(D,Le),e(Le,Pr),e(Pr,_l),e(Le,bl),e(Le,ct),e(ct,vl),e(Le,El),m(n,no,f),m(n,I,f),e(I,kl),e(I,Fr),e(Fr,Tl),e(I,$l),e(I,Wr),e(Wr,Cl),e(I,wl),e(I,jr),e(jr,yl),e(I,Al),e(I,zr),e(zr,Ll),e(I,Dl),e(I,Br),e(Br,xl),e(I,Ol),m(n,oo,f),m(n,ne,f),e(ne,De),e(De,va),_(dt,va,null),e(ne,Ml),e(ne,Ea),e(Ea,Sl),m(n,so,f),m(n,xe,f),e(xe,Il),e(xe,Rr),e(Rr,Nl),e(xe,Pl),m(n,lo,f),m(n,H,f),_(ft,H,null),e(H,Fl),e(H,oe),e(oe,Wl),e(oe,Vr),e(Vr,jl),e(oe,zl),e(oe,mt),e(mt,Bl),e(oe,Rl),e(H,Vl),e(H,R),_(ht,R,null),e(R,Ul),e(R,ka),e(ka,ql),e(R,Gl),e(R,L),e(L,Hl),e(L,Ta),e(Ta,Jl),e(L,Yl),e(L,$a),e($a,Xl),e(L,Kl),e(L,Ca),e(Ca,Ql),e(L,Zl),e(L,wa),e(wa,ei),e(L,ti),e(L,ya),e(ya,ri),e(L,ai),e(L,Aa),e(Aa,ni),e(L,oi),e(L,La),e(La,si),e(L,li),e(L,Da),e(Da,ii),e(L,ci),e(L,xa),e(xa,di),e(L,fi),e(R,mi),e(R,pt),e(pt,hi),e(pt,gt),e(gt,pi),e(pt,gi),m(n,io,f),m(n,se,f),_(ut,se,null),e(se,ui),e(se,_t),e(_t,_i),e(_t,Ur),e(Ur,bi),e(_t,vi),m(n,co,f),m(n,le,f),_(bt,le,null),e(le,Ei),e(le,vt),e(vt,ki),e(vt,qr),e(qr,Ti),e(vt,$i),m(n,fo,f),m(n,ie,f),_(Et,ie,null),e(ie,Ci),e(ie,kt),e(kt,wi),e(kt,Gr),e(Gr,yi),e(kt,Ai),m(n,mo,f),m(n,J,f),_(Tt,J,null),e(J,Li),e(J,$t),e($t,Di),e($t,Hr),e(Hr,xi),e($t,Oi),e(J,Mi),e(J,Y),e(Y,Si),e(Y,Jr),e(Jr,Ii),e(Y,Ni),e(Y,Oa),e(Oa,Pi),e(Y,Fi),e(Y,Yr),e(Yr,Wi),e(Y,ji),m(n,ho,f),m(n,ce,f),_(Ct,ce,null),e(ce,zi),e(ce,de),e(de,Bi),e(de,Xr),e(Xr,Ri),e(de,Vi),e(de,wt),e(wt,Ui),e(de,qi),m(n,po,f),m(n,X,f),_(yt,X,null),e(X,Gi),e(X,fe),e(fe,Hi),e(fe,Kr),e(Kr,Ji),e(fe,Yi),e(fe,At),e(At,Xi),e(fe,Ki),e(X,Qi),e(X,V),_(Lt,V,null),e(V,Zi),e(V,Dt),e(Dt,ec),e(Dt,Ma),e(Ma,tc),e(Dt,rc),e(V,ac),e(V,xt),e(xt,nc),e(xt,Ot),e(Ot,oc),e(xt,sc),e(V,lc),e(V,h),e(h,ic),e(h,Sa),e(Sa,cc),e(h,dc),e(h,Ia),e(Ia,fc),e(h,mc),e(h,Na),e(Na,hc),e(h,pc),e(h,Pa),e(Pa,gc),e(h,uc),e(h,Fa),e(Fa,_c),e(h,bc),e(h,Wa),e(Wa,vc),e(h,Ec),e(h,ja),e(ja,kc),e(h,Tc),e(h,za),e(za,$c),e(h,Cc),e(h,Ba),e(Ba,wc),e(h,yc),e(h,Ra),e(Ra,Ac),e(h,Lc),e(h,Va),e(Va,Dc),e(h,xc),e(h,Ua),e(Ua,Oc),e(h,Mc),e(h,qa),e(qa,Sc),e(h,Ic),e(h,Ga),e(Ga,Nc),e(h,Pc),e(h,Ha),e(Ha,Fc),e(h,Wc),e(h,Ja),e(Ja,jc),e(h,zc),e(h,Ya),e(Ya,Bc),e(h,Rc),e(h,Xa),e(Xa,Vc),e(h,Uc),e(h,Ka),e(Ka,qc),e(h,Gc),m(n,go,f),m(n,K,f),_(Mt,K,null),e(K,Hc),e(K,Q),e(Q,Jc),e(Q,Qr),e(Qr,Yc),e(Q,Xc),e(Q,St),e(St,Kc),e(Q,Qc),e(Q,Qa),e(Qa,Zc),e(Q,ed),e(K,td),e(K,ee),_(It,ee,null),e(ee,rd),e(ee,Za),e(Za,ad),e(ee,nd),e(ee,p),e(p,od),e(p,en),e(en,sd),e(p,ld),e(p,tn),e(tn,id),e(p,cd),e(p,rn),e(rn,dd),e(p,fd),e(p,an),e(an,md),e(p,hd),e(p,Zr),e(Zr,pd),e(p,gd),e(p,nn),e(nn,ud),e(p,_d),e(p,on),e(on,bd),e(p,vd),e(p,sn),e(sn,Ed),e(p,kd),e(p,ln),e(ln,Td),e(p,$d),e(p,cn),e(cn,Cd),e(p,wd),e(p,dn),e(dn,yd),e(p,Ad),e(p,fn),e(fn,Ld),e(p,Dd),e(p,mn),e(mn,xd),e(p,Od),e(p,hn),e(hn,Md),e(p,Sd),e(p,pn),e(pn,Id),e(p,Nd),e(p,gn),e(gn,Pd),e(p,Fd),e(p,un),e(un,Wd),e(p,jd),e(p,_n),e(_n,zd),e(p,Bd),e(p,bn),e(bn,Rd),e(p,Vd),m(n,uo,f),m(n,me,f),_(Nt,me,null),e(me,Ud),e(me,he),e(he,qd),e(he,ea),e(ea,Gd),e(he,Hd),e(he,Pt),e(Pt,Jd),e(he,Yd),m(n,_o,f),m(n,pe,f),_(Ft,pe,null),e(pe,Xd),e(pe,Wt),e(Wt,Kd),e(Wt,ta),e(ta,Qd),e(Wt,Zd),m(n,bo,f),m(n,ge,f),_(jt,ge,null),e(ge,ef),e(ge,zt),e(zt,tf),e(zt,Bt),e(Bt,rf),e(zt,af),m(n,vo,f),m(n,ue,f),e(ue,Oe),e(Oe,vn),_(Rt,vn,null),e(ue,nf),e(ue,En),e(En,of),m(n,Eo,f),m(n,k,f),_(Vt,k,null),e(k,sf),e(k,kn),e(kn,lf),e(k,cf),e(k,Ut),e(Ut,df),e(Ut,Tn),e(Tn,ff),e(Ut,mf),e(k,hf),e(k,N),e(N,pf),e(N,$n),e($n,gf),e(N,uf),e(N,Cn),e(Cn,_f),e(N,bf),e(N,wn),e(wn,vf),e(N,Ef),e(N,yn),e(yn,kf),e(N,Tf),e(N,An),e(An,$f),e(N,Cf),e(k,wf),_(Me,k,null),e(k,yf),e(k,Se),_(qt,Se,null),e(Se,Af),e(Se,Ln),e(Ln,Lf),e(k,Df),e(k,Ie),_(Gt,Ie,null),e(Ie,xf),e(Ie,Dn),e(Dn,Of),e(k,Mf),e(k,Ne),_(Ht,Ne,null),e(Ne,Sf),e(Ne,xn),e(xn,If),e(k,Nf),e(k,Pe),_(Jt,Pe,null),e(Pe,Pf),e(Pe,Yt),e(Yt,Ff),e(Yt,ra),e(ra,Wf),e(Yt,jf),e(k,zf),e(k,Fe),_(Xt,Fe,null),e(Fe,Bf),e(Fe,On),e(On,Rf),e(k,Vf),e(k,We),_(Kt,We,null),e(We,Uf),e(We,Mn),e(Mn,qf),e(k,Gf),e(k,je),_(Qt,je,null),e(je,Hf),e(je,Sn),e(Sn,Jf),e(k,Yf),e(k,ze),_(Zt,ze,null),e(ze,Xf),e(ze,In),e(In,Kf),e(k,Qf),e(k,Be),_(er,Be,null),e(Be,Zf),e(Be,Nn),e(Nn,em),e(k,tm),e(k,Re),_(tr,Re,null),e(Re,rm),e(Re,Pn),e(Pn,am),e(k,nm),e(k,Ve),_(rr,Ve,null),e(Ve,om),e(Ve,Fn),e(Fn,sm),e(k,lm),e(k,Ue),_(ar,Ue,null),e(Ue,im),e(Ue,Wn),e(Wn,cm),e(k,dm),e(k,qe),_(nr,qe,null),e(qe,fm),e(qe,jn),e(jn,mm),m(n,ko,f),m(n,Ge,f),e(Ge,hm),e(Ge,aa),e(aa,pm),e(Ge,gm),m(n,To,f),_(or,n,f),m(n,$o,f),m(n,He,f),e(He,um),e(He,zn),e(zn,_m),e(He,bm),m(n,Co,f),_(sr,n,f),m(n,wo,f),m(n,_e,f),e(_e,Je),e(Je,Bn),_(lr,Bn,null),e(_e,vm),e(_e,Rn),e(Rn,Em),m(n,yo,f),m(n,P,f),_(ir,P,null),e(P,km),e(P,be),e(be,Tm),e(be,na),e(na,$m),e(be,Cm),e(be,oa),e(oa,wm),e(be,ym),e(P,Am),_(Ye,P,null),e(P,Lm),e(P,Xe),_(cr,Xe,null),e(Xe,Dm),e(Xe,dr),e(dr,xm),e(dr,Vn),e(Vn,Om),e(dr,Mm),e(P,Sm),e(P,Ke),_(fr,Ke,null),e(Ke,Im),e(Ke,mr),e(mr,Nm),e(mr,Un),e(Un,Pm),e(mr,Fm),m(n,Ao,f),m(n,ve,f),e(ve,Qe),e(Qe,qn),_(hr,qn,null),e(ve,Wm),e(ve,Gn),e(Gn,jm),m(n,Lo,f),m(n,Ee,f),_(pr,Ee,null),e(Ee,zm),e(Ee,ke),e(ke,Bm),e(ke,sa),e(sa,Rm),e(ke,Vm),e(ke,la),e(la,Um),e(ke,qm),Do=!0},p(n,[f]){const gr={};f&2&&(gr.$$scope={dirty:f,ctx:n}),Me.$set(gr);const Hn={};f&2&&(Hn.$$scope={dirty:f,ctx:n}),Ye.$set(Hn)},i(n){Do||(b(y.$$.fragment,n),b(dt.$$.fragment,n),b(ft.$$.fragment,n),b(ht.$$.fragment,n),b(ut.$$.fragment,n),b(bt.$$.fragment,n),b(Et.$$.fragment,n),b(Tt.$$.fragment,n),b(Ct.$$.fragment,n),b(yt.$$.fragment,n),b(Lt.$$.fragment,n),b(Mt.$$.fragment,n),b(It.$$.fragment,n),b(Nt.$$.fragment,n),b(Ft.$$.fragment,n),b(jt.$$.fragment,n),b(Rt.$$.fragment,n),b(Vt.$$.fragment,n),b(Me.$$.fragment,n),b(qt.$$.fragment,n),b(Gt.$$.fragment,n),b(Ht.$$.fragment,n),b(Jt.$$.fragment,n),b(Xt.$$.fragment,n),b(Kt.$$.fragment,n),b(Qt.$$.fragment,n),b(Zt.$$.fragment,n),b(er.$$.fragment,n),b(tr.$$.fragment,n),b(rr.$$.fragment,n),b(ar.$$.fragment,n),b(nr.$$.fragment,n),b(or.$$.fragment,n),b(sr.$$.fragment,n),b(lr.$$.fragment,n),b(ir.$$.fragment,n),b(Ye.$$.fragment,n),b(cr.$$.fragment,n),b(fr.$$.fragment,n),b(hr.$$.fragment,n),b(pr.$$.fragment,n),Do=!0)},o(n){v(y.$$.fragment,n),v(dt.$$.fragment,n),v(ft.$$.fragment,n),v(ht.$$.fragment,n),v(ut.$$.fragment,n),v(bt.$$.fragment,n),v(Et.$$.fragment,n),v(Tt.$$.fragment,n),v(Ct.$$.fragment,n),v(yt.$$.fragment,n),v(Lt.$$.fragment,n),v(Mt.$$.fragment,n),v(It.$$.fragment,n),v(Nt.$$.fragment,n),v(Ft.$$.fragment,n),v(jt.$$.fragment,n),v(Rt.$$.fragment,n),v(Vt.$$.fragment,n),v(Me.$$.fragment,n),v(qt.$$.fragment,n),v(Gt.$$.fragment,n),v(Ht.$$.fragment,n),v(Jt.$$.fragment,n),v(Xt.$$.fragment,n),v(Kt.$$.fragment,n),v(Qt.$$.fragment,n),v(Zt.$$.fragment,n),v(er.$$.fragment,n),v(tr.$$.fragment,n),v(rr.$$.fragment,n),v(ar.$$.fragment,n),v(nr.$$.fragment,n),v(or.$$.fragment,n),v(sr.$$.fragment,n),v(lr.$$.fragment,n),v(ir.$$.fragment,n),v(Ye.$$.fragment,n),v(cr.$$.fragment,n),v(fr.$$.fragment,n),v(hr.$$.fragment,n),v(pr.$$.fragment,n),Do=!1},d(n){t(A),n&&t(z),n&&t(x),E(y),n&&t(j),n&&t(G),n&&t(to),n&&t(B),n&&t(ro),n&&t(Te),n&&t(ao),n&&t(D),n&&t(no),n&&t(I),n&&t(oo),n&&t(ne),E(dt),n&&t(so),n&&t(xe),n&&t(lo),n&&t(H),E(ft),E(ht),n&&t(io),n&&t(se),E(ut),n&&t(co),n&&t(le),E(bt),n&&t(fo),n&&t(ie),E(Et),n&&t(mo),n&&t(J),E(Tt),n&&t(ho),n&&t(ce),E(Ct),n&&t(po),n&&t(X),E(yt),E(Lt),n&&t(go),n&&t(K),E(Mt),E(It),n&&t(uo),n&&t(me),E(Nt),n&&t(_o),n&&t(pe),E(Ft),n&&t(bo),n&&t(ge),E(jt),n&&t(vo),n&&t(ue),E(Rt),n&&t(Eo),n&&t(k),E(Vt),E(Me),E(qt),E(Gt),E(Ht),E(Jt),E(Xt),E(Kt),E(Qt),E(Zt),E(er),E(tr),E(rr),E(ar),E(nr),n&&t(ko),n&&t(Ge),n&&t(To),E(or,n),n&&t($o),n&&t(He),n&&t(Co),E(sr,n),n&&t(wo),n&&t(_e),E(lr),n&&t(yo),n&&t(P),E(ir),E(Ye),E(cr),E(fr),n&&t(Ao),n&&t(ve),E(hr),n&&t(Lo),n&&t(Ee),E(pr)}}}const zg={local:"callbacks",sections:[{local:"transformers.integrations.CometCallback",title:"Available Callbacks"},{local:"transformers.TrainerCallback",title:"TrainerCallback"},{local:"transformers.TrainerState",title:"TrainerState"},{local:"transformers.TrainerControl",title:"TrainerControl"}],title:"Callbacks"};function Bg(_r){return Sg(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Jg extends Dg{constructor(A){super();xg(this,A,Bg,jg,Og,{})}}export{Jg as default,zg as metadata};
11
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/main_classes/text_generation.mdx-hf-doc-builder.js
import{S as Qp,i as Yp,s as em,e as a,k as m,w as k,t,M as tm,c as r,d as o,m as g,a as i,x as v,h as n,b as h,G as e,g as x,y,q as j,o as M,B as w,v as nm,L as _e}from"../../chunks/vendor-hf-doc-builder.js";import{T as Jp}from"../../chunks/Tip-hf-doc-builder.js";import{D as ce}from"../../chunks/Docstring-hf-doc-builder.js";import{C as ue}from"../../chunks/CodeBlock-hf-doc-builder.js";import{I as Cs}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{E as ge}from"../../chunks/ExampleCodeBlock-hf-doc-builder.js";function om(L){let d,f,_,c,u,s,p,C,q,I,S;return{c(){d=a("p"),f=t("Apart from "),_=a("code"),c=t("inputs"),u=t(`, all the arguments below will default to the value of the attribute of the same name as defined in the model\u2019s config (`),s=a("code"),p=t("config.json"),C=t(`) which in turn defaults to the `),q=a("a"),I=t("PretrainedConfig"),S=t(" of the model."),this.h()},l(z){d=r(z,"P",{});var $=i(d);f=n($,"Apart from "),_=r($,"CODE",{});var we=i(_);c=n(we,"inputs"),we.forEach(o),u=n($,`, all the arguments below will default to the value of the attribute of the same name as defined in the model\u2019s config (`),s=r($,"CODE",{});var Le=i(s);p=n(Le,"config.json"),Le.forEach(o),C=n($,`) which in turn defaults to the `),q=r($,"A",{href:!0});var he=i(q);I=n(he,"PretrainedConfig"),he.forEach(o),S=n($," of the model."),$.forEach(o),this.h()},h(){h(q,"href","/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig")},m(z,$){x(z,d,$),e(d,f),e(d,_),e(_,c),e(d,u),e(d,s),e(s,p),e(d,C),e(d,q),e(q,I),e(d,S)},d(z){z&&o(d)}}}function sm(L){let d,f,_,c,u;return c=new ue({props:{code:`from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("gpt2") model = AutoModelForCausalLM.from_pretrained("gpt2") prompt = "Today I believe we can finally" input_ids = tokenizer(prompt, return_tensors="pt").input_ids # generate up to 30 tokens outputs = model.generate(input_ids, do_sample=False, max_length=30) tokenizer.batch_decode(outputs, skip_special_tokens=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;Today I believe we can finally&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(prompt, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># generate up to 30 tokens</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.generate(input_ids, do_sample=<span class="hljs-literal">False</span>, max_length=<span class="hljs-number">30</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Today I believe we can finally get to the point where we can make a difference in the lives of the people of the United States of America.\\n&#x27;</span>]`}}),{c(){d=a("p"),f=t("Greedy Decoding:"),_=m(),k(c.$$.fragment)},l(s){d=r(s,"P",{});var p=i(d);f=n(p,"Greedy Decoding:"),p.forEach(o),_=g(s),v(c.$$.fragment,s)},m(s,p){x(s,d,p),e(d,f),x(s,_,p),y(c,s,p),u=!0},p:_e,i(s){u||(j(c.$$.fragment,s),u=!0)},o(s){M(c.$$.fragment,s),u=!1},d(s){s&&o(d),s&&o(_),w(c,s)}}}function am(L){let d,f,_,c,u;return c=new ue({props:{code:`from transformers import AutoTokenizer, AutoModelForCausalLM import torch tokenizer = AutoTokenizer.from_pretrained("gpt2") model = AutoModelForCausalLM.from_pretrained("gpt2") prompt = "Today I believe we can finally" input_ids = tokenizer(prompt, return_tensors="pt").input_ids # sample up to 30 tokens torch.manual_seed(0) outputs = model.generate(input_ids, do_sample=True, max_length=30) tokenizer.batch_decode(outputs, skip_special_tokens=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;Today I believe we can finally&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(prompt, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># sample up to 30 tokens</span> <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.generate(input_ids, do_sample=<span class="hljs-literal">True</span>, max_length=<span class="hljs-number">30</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Today I believe we can finally get rid of discrimination,&quot; said Rep. Mark Pocan (D-Wis.).\\n\\n&quot;Just look at the&#x27;</span>]`}}),{c(){d=a("p"),f=t("Multinomial Sampling:"),_=m(),k(c.$$.fragment)},l(s){d=r(s,"P",{});var p=i(d);f=n(p,"Multinomial Sampling:"),p.forEach(o),_=g(s),v(c.$$.fragment,s)},m(s,p){x(s,d,p),e(d,f),x(s,_,p),y(c,s,p),u=!0},p:_e,i(s){u||(j(c.$$.fragment,s),u=!0)},o(s){M(c.$$.fragment,s),u=!1},d(s){s&&o(d),s&&o(_),w(c,s)}}}function rm(L){let d,f,_,c,u;return c=new ue({props:{code:`from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de") model = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-en-de") sentence = "Paris is one of the densest populated areas in Europe." input_ids = tokenizer(sentence, return_tensors="pt").input_ids outputs = model.generate(input_ids, num_beams=5) tokenizer.batch_decode(outputs, skip_special_tokens=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForSeq2SeqLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;Helsinki-NLP/opus-mt-en-de&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;Helsinki-NLP/opus-mt-en-de&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sentence = <span class="hljs-string">&quot;Paris is one of the densest populated areas in Europe.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(sentence, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.generate(input_ids, num_beams=<span class="hljs-number">5</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Paris ist eines der dichtesten besiedelten Gebiete Europas.&#x27;</span>]`}}),{c(){d=a("p"),f=t("Beam-search decoding:"),_=m(),k(c.$$.fragment)},l(s){d=r(s,"P",{});var p=i(d);f=n(p,"Beam-search decoding:"),p.forEach(o),_=g(s),v(c.$$.fragment,s)},m(s,p){x(s,d,p),e(d,f),x(s,_,p),y(c,s,p),u=!0},p:_e,i(s){u||(j(c.$$.fragment,s),u=!0)},o(s){M(c.$$.fragment,s),u=!1},d(s){s&&o(d),s&&o(_),w(c,s)}}}function im(L){let d,f,_,c,u;return c=new ue({props:{code:`from transformers import ( AutoTokenizer, AutoModelForCausalLM, LogitsProcessorList, MinLengthLogitsProcessor, StoppingCriteriaList, MaxLengthCriteria, ) tokenizer = AutoTokenizer.from_pretrained("gpt2") model = AutoModelForCausalLM.from_pretrained("gpt2") # set pad_token_id to eos_token_id because GPT2 does not have a PAD token model.config.pad_token_id = model.config.eos_token_id input_prompt = "It might be possible to" input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids # instantiate logits processors logits_processor = LogitsProcessorList( [ MinLengthLogitsProcessor(10, eos_token_id=model.config.eos_token_id), ] ) stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=20)]) outputs = model.greedy_search( input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria ) tokenizer.batch_decode(outputs, skip_special_tokens=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ( <span class="hljs-meta">... </span> AutoTokenizer, <span class="hljs-meta">... </span> AutoModelForCausalLM, <span class="hljs-meta">... </span> LogitsProcessorList, <span class="hljs-meta">... </span> MinLengthLogitsProcessor, <span class="hljs-meta">... </span> StoppingCriteriaList, <span class="hljs-meta">... </span> MaxLengthCriteria, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># set pad_token_id to eos_token_id because GPT2 does not have a PAD token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.pad_token_id = model.config.eos_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>input_prompt = <span class="hljs-string">&quot;It might be possible to&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(input_prompt, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_processor = LogitsProcessorList( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> MinLengthLogitsProcessor(<span class="hljs-number">10</span>, eos_token_id=model.config.eos_token_id), <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=<span class="hljs-number">20</span>)]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.greedy_search( <span class="hljs-meta">... </span> input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&quot;It might be possible to get a better understanding of the nature of the problem, but it&#x27;s not&quot;</span>]`}}),{c(){d=a("p"),f=t("Examples:"),_=m(),k(c.$$.fragment)},l(s){d=r(s,"P",{});var p=i(d);f=n(p,"Examples:"),p.forEach(o),_=g(s),v(c.$$.fragment,s)},m(s,p){x(s,d,p),e(d,f),x(s,_,p),y(c,s,p),u=!0},p:_e,i(s){u||(j(c.$$.fragment,s),u=!0)},o(s){M(c.$$.fragment,s),u=!1},d(s){s&&o(d),s&&o(_),w(c,s)}}}function lm(L){let d,f,_,c,u;return c=new ue({props:{code:`from transformers import ( AutoTokenizer, AutoModelForCausalLM, LogitsProcessorList, MinLengthLogitsProcessor, TopKLogitsWarper, TemperatureLogitsWarper, StoppingCriteriaList, MaxLengthCriteria, ) import torch tokenizer = AutoTokenizer.from_pretrained("gpt2") model = AutoModelForCausalLM.from_pretrained("gpt2") # set pad_token_id to eos_token_id because GPT2 does not have a EOS token model.config.pad_token_id = model.config.eos_token_id input_prompt = "Today is a beautiful day, and" input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids # instantiate logits processors logits_processor = LogitsProcessorList( [ MinLengthLogitsProcessor(15, eos_token_id=model.config.eos_token_id), ] ) # instantiate logits processors logits_warper = LogitsProcessorList( [ TopKLogitsWarper(50), TemperatureLogitsWarper(0.7), ] ) stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=20)]) torch.manual_seed(0) outputs = model.sample( input_ids, logits_processor=logits_processor, logits_warper=logits_warper, stopping_criteria=stopping_criteria, ) tokenizer.batch_decode(outputs, skip_special_tokens=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ( <span class="hljs-meta">... </span> AutoTokenizer, <span class="hljs-meta">... </span> AutoModelForCausalLM, <span class="hljs-meta">... </span> LogitsProcessorList, <span class="hljs-meta">... </span> MinLengthLogitsProcessor, <span class="hljs-meta">... </span> TopKLogitsWarper, <span class="hljs-meta">... </span> TemperatureLogitsWarper, <span class="hljs-meta">... </span> StoppingCriteriaList, <span class="hljs-meta">... </span> MaxLengthCriteria, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># set pad_token_id to eos_token_id because GPT2 does not have a EOS token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.pad_token_id = model.config.eos_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>input_prompt = <span class="hljs-string">&quot;Today is a beautiful day, and&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(input_prompt, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_processor = LogitsProcessorList( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> MinLengthLogitsProcessor(<span class="hljs-number">15</span>, eos_token_id=model.config.eos_token_id), <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_warper = LogitsProcessorList( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> TopKLogitsWarper(<span class="hljs-number">50</span>), <span class="hljs-meta">... </span> TemperatureLogitsWarper(<span class="hljs-number">0.7</span>), <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=<span class="hljs-number">20</span>)]) <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.sample( <span class="hljs-meta">... </span> input_ids, <span class="hljs-meta">... </span> logits_processor=logits_processor, <span class="hljs-meta">... </span> logits_warper=logits_warper, <span class="hljs-meta">... </span> stopping_criteria=stopping_criteria, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Today is a beautiful day, and a wonderful day.\\n\\nI was lucky enough to meet the&#x27;</span>]`}}),{c(){d=a("p"),f=t("Examples:"),_=m(),k(c.$$.fragment)},l(s){d=r(s,"P",{});var p=i(d);f=n(p,"Examples:"),p.forEach(o),_=g(s),v(c.$$.fragment,s)},m(s,p){x(s,d,p),e(d,f),x(s,_,p),y(c,s,p),u=!0},p:_e,i(s){u||(j(c.$$.fragment,s),u=!0)},o(s){M(c.$$.fragment,s),u=!1},d(s){s&&o(d),s&&o(_),w(c,s)}}}function dm(L){let d,f,_,c,u;return c=new ue({props:{code:`from transformers import ( AutoTokenizer, AutoModelForSeq2SeqLM, LogitsProcessorList, MinLengthLogitsProcessor, BeamSearchScorer, ) import torch tokenizer = AutoTokenizer.from_pretrained("t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") encoder_input_str = "translate English to German: How old are you?" encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids # lets run beam search using 3 beams num_beams = 3 # define decoder start token ids input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) input_ids = input_ids * model.config.decoder_start_token_id # add encoder_outputs to model keyword arguments model_kwargs = { "encoder_outputs": model.get_encoder()( encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ) } # instantiate beam scorer beam_scorer = BeamSearchScorer( batch_size=1, num_beams=num_beams, device=model.device, ) # instantiate logits processors logits_processor = LogitsProcessorList( [ MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ] ) outputs = model.beam_search(input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs) tokenizer.batch_decode(outputs, skip_special_tokens=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ( <span class="hljs-meta">... </span> AutoTokenizer, <span class="hljs-meta">... </span> AutoModelForSeq2SeqLM, <span class="hljs-meta">... </span> LogitsProcessorList, <span class="hljs-meta">... </span> MinLengthLogitsProcessor, <span class="hljs-meta">... </span> BeamSearchScorer, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_str = <span class="hljs-string">&quot;translate English to German: How old are you?&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_ids = tokenizer(encoder_input_str, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># lets run beam search using 3 beams</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_beams = <span class="hljs-number">3</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># define decoder start token ids</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.ones((num_beams, <span class="hljs-number">1</span>), device=model.device, dtype=torch.long) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = input_ids * model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># add encoder_outputs to model keyword arguments</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model_kwargs = { <span class="hljs-meta">... </span> <span class="hljs-string">&quot;encoder_outputs&quot;</span>: model.get_encoder()( <span class="hljs-meta">... </span> encoder_input_ids.repeat_interleave(num_beams, dim=<span class="hljs-number">0</span>), return_dict=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate beam scorer</span> <span class="hljs-meta">&gt;&gt;&gt; </span>beam_scorer = BeamSearchScorer( <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">1</span>, <span class="hljs-meta">... </span> num_beams=num_beams, <span class="hljs-meta">... </span> device=model.device, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_processor = LogitsProcessorList( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> MinLengthLogitsProcessor(<span class="hljs-number">5</span>, eos_token_id=model.config.eos_token_id), <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.beam_search(input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Wie alt bist du?&#x27;</span>]`}}),{c(){d=a("p"),f=t("Examples:"),_=m(),k(c.$$.fragment)},l(s){d=r(s,"P",{});var p=i(d);f=n(p,"Examples:"),p.forEach(o),_=g(s),v(c.$$.fragment,s)},m(s,p){x(s,d,p),e(d,f),x(s,_,p),y(c,s,p),u=!0},p:_e,i(s){u||(j(c.$$.fragment,s),u=!0)},o(s){M(c.$$.fragment,s),u=!1},d(s){s&&o(d),s&&o(_),w(c,s)}}}function cm(L){let d,f,_,c,u;return c=new ue({props:{code:`from transformers import ( AutoTokenizer, AutoModelForSeq2SeqLM, LogitsProcessorList, MinLengthLogitsProcessor, TopKLogitsWarper, TemperatureLogitsWarper, BeamSearchScorer, ) import torch tokenizer = AutoTokenizer.from_pretrained("t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") encoder_input_str = "translate English to German: How old are you?" encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids # lets run beam search using 3 beams num_beams = 3 # define decoder start token ids input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) input_ids = input_ids * model.config.decoder_start_token_id # add encoder_outputs to model keyword arguments model_kwargs = { "encoder_outputs": model.get_encoder()( encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ) } # instantiate beam scorer beam_scorer = BeamSearchScorer( batch_size=1, max_length=model.config.max_length, num_beams=num_beams, device=model.device, ) # instantiate logits processors logits_processor = LogitsProcessorList( [MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id)] ) # instantiate logits processors logits_warper = LogitsProcessorList( [ TopKLogitsWarper(50), TemperatureLogitsWarper(0.7), ] ) outputs = model.beam_sample( input_ids, beam_scorer, logits_processor=logits_processor, logits_warper=logits_warper, **model_kwargs ) tokenizer.batch_decode(outputs, skip_special_tokens=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ( <span class="hljs-meta">... </span> AutoTokenizer, <span class="hljs-meta">... </span> AutoModelForSeq2SeqLM, <span class="hljs-meta">... </span> LogitsProcessorList, <span class="hljs-meta">... </span> MinLengthLogitsProcessor, <span class="hljs-meta">... </span> TopKLogitsWarper, <span class="hljs-meta">... </span> TemperatureLogitsWarper, <span class="hljs-meta">... </span> BeamSearchScorer, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_str = <span class="hljs-string">&quot;translate English to German: How old are you?&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_ids = tokenizer(encoder_input_str, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># lets run beam search using 3 beams</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_beams = <span class="hljs-number">3</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># define decoder start token ids</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.ones((num_beams, <span class="hljs-number">1</span>), device=model.device, dtype=torch.long) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = input_ids * model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># add encoder_outputs to model keyword arguments</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model_kwargs = { <span class="hljs-meta">... </span> <span class="hljs-string">&quot;encoder_outputs&quot;</span>: model.get_encoder()( <span class="hljs-meta">... </span> encoder_input_ids.repeat_interleave(num_beams, dim=<span class="hljs-number">0</span>), return_dict=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate beam scorer</span> <span class="hljs-meta">&gt;&gt;&gt; </span>beam_scorer = BeamSearchScorer( <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">1</span>, <span class="hljs-meta">... </span> max_length=model.config.max_length, <span class="hljs-meta">... </span> num_beams=num_beams, <span class="hljs-meta">... </span> device=model.device, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_processor = LogitsProcessorList( <span class="hljs-meta">... </span> [MinLengthLogitsProcessor(<span class="hljs-number">5</span>, eos_token_id=model.config.eos_token_id)] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_warper = LogitsProcessorList( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> TopKLogitsWarper(<span class="hljs-number">50</span>), <span class="hljs-meta">... </span> TemperatureLogitsWarper(<span class="hljs-number">0.7</span>), <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.beam_sample( <span class="hljs-meta">... </span> input_ids, beam_scorer, logits_processor=logits_processor, logits_warper=logits_warper, **model_kwargs <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Wie alt bist du?&#x27;</span>]`}}),{c(){d=a("p"),f=t("Examples:"),_=m(),k(c.$$.fragment)},l(s){d=r(s,"P",{});var p=i(d);f=n(p,"Examples:"),p.forEach(o),_=g(s),v(c.$$.fragment,s)},m(s,p){x(s,d,p),e(d,f),x(s,_,p),y(c,s,p),u=!0},p:_e,i(s){u||(j(c.$$.fragment,s),u=!0)},o(s){M(c.$$.fragment,s),u=!1},d(s){s&&o(d),s&&o(_),w(c,s)}}}function pm(L){let d,f,_,c,u;return c=new ue({props:{code:`from transformers import ( AutoTokenizer, AutoModelForSeq2SeqLM, LogitsProcessorList, MinLengthLogitsProcessor, HammingDiversityLogitsProcessor, BeamSearchScorer, ) import torch tokenizer = AutoTokenizer.from_pretrained("t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") encoder_input_str = "translate English to German: How old are you?" encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids # lets run diverse beam search using 6 beams num_beams = 6 # define decoder start token ids input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) input_ids = input_ids * model.config.decoder_start_token_id # add encoder_outputs to model keyword arguments model_kwargs = { "encoder_outputs": model.get_encoder()( encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ) } # instantiate beam scorer beam_scorer = BeamSearchScorer( batch_size=1, max_length=model.config.max_length, num_beams=num_beams, device=model.device, num_beam_groups=3, ) # instantiate logits processors logits_processor = LogitsProcessorList( [ HammingDiversityLogitsProcessor(5.5, num_beams=6, num_beam_groups=3), MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ] ) outputs = model.group_beam_search( input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs ) tokenizer.batch_decode(outputs, skip_special_tokens=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ( <span class="hljs-meta">... </span> AutoTokenizer, <span class="hljs-meta">... </span> AutoModelForSeq2SeqLM, <span class="hljs-meta">... </span> LogitsProcessorList, <span class="hljs-meta">... </span> MinLengthLogitsProcessor, <span class="hljs-meta">... </span> HammingDiversityLogitsProcessor, <span class="hljs-meta">... </span> BeamSearchScorer, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_str = <span class="hljs-string">&quot;translate English to German: How old are you?&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_ids = tokenizer(encoder_input_str, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># lets run diverse beam search using 6 beams</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_beams = <span class="hljs-number">6</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># define decoder start token ids</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.ones((num_beams, <span class="hljs-number">1</span>), device=model.device, dtype=torch.long) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = input_ids * model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># add encoder_outputs to model keyword arguments</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model_kwargs = { <span class="hljs-meta">... </span> <span class="hljs-string">&quot;encoder_outputs&quot;</span>: model.get_encoder()( <span class="hljs-meta">... </span> encoder_input_ids.repeat_interleave(num_beams, dim=<span class="hljs-number">0</span>), return_dict=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate beam scorer</span> <span class="hljs-meta">&gt;&gt;&gt; </span>beam_scorer = BeamSearchScorer( <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">1</span>, <span class="hljs-meta">... </span> max_length=model.config.max_length, <span class="hljs-meta">... </span> num_beams=num_beams, <span class="hljs-meta">... </span> device=model.device, <span class="hljs-meta">... </span> num_beam_groups=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_processor = LogitsProcessorList( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> HammingDiversityLogitsProcessor(<span class="hljs-number">5.5</span>, num_beams=<span class="hljs-number">6</span>, num_beam_groups=<span class="hljs-number">3</span>), <span class="hljs-meta">... </span> MinLengthLogitsProcessor(<span class="hljs-number">5</span>, eos_token_id=model.config.eos_token_id), <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.group_beam_search( <span class="hljs-meta">... </span> input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Wie alt bist du?&#x27;</span>]`}}),{c(){d=a("p"),f=t("Examples:"),_=m(),k(c.$$.fragment)},l(s){d=r(s,"P",{});var p=i(d);f=n(p,"Examples:"),p.forEach(o),_=g(s),v(c.$$.fragment,s)},m(s,p){x(s,d,p),e(d,f),x(s,_,p),y(c,s,p),u=!0},p:_e,i(s){u||(j(c.$$.fragment,s),u=!0)},o(s){M(c.$$.fragment,s),u=!1},d(s){s&&o(d),s&&o(_),w(c,s)}}}function mm(L){let d,f,_,c,u;return c=new ue({props:{code:`from transformers import ( AutoTokenizer, AutoModelForSeq2SeqLM, LogitsProcessorList, MinLengthLogitsProcessor, ConstrainedBeamSearchScorer, PhrasalConstraint, ) import torch tokenizer = AutoTokenizer.from_pretrained("t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") encoder_input_str = "translate English to German: How old are you?" encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids # lets run beam search using 3 beams num_beams = 3 # define decoder start token ids input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) input_ids = input_ids * model.config.decoder_start_token_id # add encoder_outputs to model keyword arguments model_kwargs = { "encoder_outputs": model.get_encoder()( encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ) } constraint_str = "Sie" constraint_token_ids = tokenizer.encode(constraint_str)[:-1] # slice to remove eos token constraints = [PhrasalConstraint(token_ids=constraint_token_ids)] # instantiate beam scorer beam_scorer = ConstrainedBeamSearchScorer( batch_size=1, num_beams=num_beams, device=model.device, constraints=constraints ) # instantiate logits processors logits_processor = LogitsProcessorList( [ MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ] ) outputs = model.constrained_beam_search( input_ids, beam_scorer, constraints=constraints, logits_processor=logits_processor, **model_kwargs ) tokenizer.batch_decode(outputs, skip_special_tokens=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ( <span class="hljs-meta">... </span> AutoTokenizer, <span class="hljs-meta">... </span> AutoModelForSeq2SeqLM, <span class="hljs-meta">... </span> LogitsProcessorList, <span class="hljs-meta">... </span> MinLengthLogitsProcessor, <span class="hljs-meta">... </span> ConstrainedBeamSearchScorer, <span class="hljs-meta">... </span> PhrasalConstraint, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_str = <span class="hljs-string">&quot;translate English to German: How old are you?&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_ids = tokenizer(encoder_input_str, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># lets run beam search using 3 beams</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_beams = <span class="hljs-number">3</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># define decoder start token ids</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.ones((num_beams, <span class="hljs-number">1</span>), device=model.device, dtype=torch.long) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = input_ids * model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># add encoder_outputs to model keyword arguments</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model_kwargs = { <span class="hljs-meta">... </span> <span class="hljs-string">&quot;encoder_outputs&quot;</span>: model.get_encoder()( <span class="hljs-meta">... </span> encoder_input_ids.repeat_interleave(num_beams, dim=<span class="hljs-number">0</span>), return_dict=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span>constraint_str = <span class="hljs-string">&quot;Sie&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>constraint_token_ids = tokenizer.encode(constraint_str)[:-<span class="hljs-number">1</span>] <span class="hljs-comment"># slice to remove eos token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>constraints = [PhrasalConstraint(token_ids=constraint_token_ids)] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate beam scorer</span> <span class="hljs-meta">&gt;&gt;&gt; </span>beam_scorer = ConstrainedBeamSearchScorer( <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">1</span>, num_beams=num_beams, device=model.device, constraints=constraints <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_processor = LogitsProcessorList( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> MinLengthLogitsProcessor(<span class="hljs-number">5</span>, eos_token_id=model.config.eos_token_id), <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.constrained_beam_search( <span class="hljs-meta">... </span> input_ids, beam_scorer, constraints=constraints, logits_processor=logits_processor, **model_kwargs <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Wie alt sind Sie?&#x27;</span>]`}}),{c(){d=a("p"),f=t("Examples:"),_=m(),k(c.$$.fragment)},l(s){d=r(s,"P",{});var p=i(d);f=n(p,"Examples:"),p.forEach(o),_=g(s),v(c.$$.fragment,s)},m(s,p){x(s,d,p),e(d,f),x(s,_,p),y(c,s,p),u=!0},p:_e,i(s){u||(j(c.$$.fragment,s),u=!0)},o(s){M(c.$$.fragment,s),u=!1},d(s){s&&o(d),s&&o(_),w(c,s)}}}function gm(L){let d,f,_,c,u;return c=new ue({props:{code:`tokenizer = AutoTokenizer.from_pretrained("distilgpt2") # Initialize tokenizer model = TFAutoModelWithLMHead.from_pretrained( "distilgpt2" ) # Download model and configuration from huggingface.co and cache. outputs = model.generate(max_length=40) # do greedy decoding print(f"Generated: {tokenizer.decode(outputs[0], skip_special_tokens=True)}") tokenizer = AutoTokenizer.from_pretrained("openai-gpt") # Initialize tokenizer model = TFAutoModelWithLMHead.from_pretrained( "openai-gpt" ) # Download model and configuration from huggingface.co and cache. input_context = "The dog" input_ids = tokenizer.encode(input_context, return_tensors="tf") # encode input context outputs = model.generate( input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5 ) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog' for i in range(3): # 3 output sequences were generated print(f"Generated {i}: {tokenizer.decode(outputs[i], skip_special_tokens=True)}") tokenizer = AutoTokenizer.from_pretrained("distilgpt2") # Initialize tokenizer model = TFAutoModelWithLMHead.from_pretrained( "distilgpt2" ) # Download model and configuration from huggingface.co and cache. input_context = "The dog" input_ids = tokenizer.encode(input_context, return_tensors="tf") # encode input context outputs = model.generate( input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3, do_sample=True ) # generate 3 candidates using sampling for i in range(3): # 3 output sequences were generated print(f"Generated {i}: {tokenizer.decode(outputs[i], skip_special_tokens=True)}") tokenizer = AutoTokenizer.from_pretrained("ctrl") # Initialize tokenizer model = TFAutoModelWithLMHead.from_pretrained( "ctrl" ) # Download model and configuration from huggingface.co and cache. input_context = "Legal My neighbor is" # "Legal" is one of the control codes for ctrl input_ids = tokenizer.encode(input_context, return_tensors="tf") # encode input context outputs = model.generate( input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2 ) # generate sequences print(f"Generated: {tokenizer.decode(outputs[0], skip_special_tokens=True)}") tokenizer = AutoTokenizer.from_pretrained("gpt2") # Initialize tokenizer model = TFAutoModelWithLMHead.from_pretrained( "gpt2" ) # Download model and configuration from huggingface.co and cache. input_context = "My cute dog" bad_words_ids = [ tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ["idiot", "stupid", "shut up"] ] input_ids = tokenizer.encode(input_context, return_tensors="tf") # encode input context outputs = model.generate( input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids ) # generate sequences without allowing bad_words to be generated`,highlighted:`tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>) <span class="hljs-comment"># Initialize tokenizer</span> model = TFAutoModelWithLMHead.from_pretrained( <span class="hljs-string">&quot;distilgpt2&quot;</span> ) <span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> outputs = model.generate(max_length=<span class="hljs-number">40</span>) <span class="hljs-comment"># do greedy decoding</span> <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;Generated: <span class="hljs-subst">{tokenizer.decode(outputs[<span class="hljs-number">0</span>], skip_special_tokens=<span class="hljs-literal">True</span>)}</span>&quot;</span>) tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;openai-gpt&quot;</span>) <span class="hljs-comment"># Initialize tokenizer</span> model = TFAutoModelWithLMHead.from_pretrained( <span class="hljs-string">&quot;openai-gpt&quot;</span> ) <span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> input_context = <span class="hljs-string">&quot;The dog&quot;</span> input_ids = tokenizer.encode(input_context, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-comment"># encode input context</span> outputs = model.generate( input_ids=input_ids, num_beams=<span class="hljs-number">5</span>, num_return_sequences=<span class="hljs-number">3</span>, temperature=<span class="hljs-number">1.5</span> ) <span class="hljs-comment"># generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context &#x27;The dog&#x27;</span> <span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(<span class="hljs-number">3</span>): <span class="hljs-comment"># 3 output sequences were generated</span> <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;Generated <span class="hljs-subst">{i}</span>: <span class="hljs-subst">{tokenizer.decode(outputs[i], skip_special_tokens=<span class="hljs-literal">True</span>)}</span>&quot;</span>) tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>) <span class="hljs-comment"># Initialize tokenizer</span> model = TFAutoModelWithLMHead.from_pretrained( <span class="hljs-string">&quot;distilgpt2&quot;</span> ) <span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> input_context = <span class="hljs-string">&quot;The dog&quot;</span> input_ids = tokenizer.encode(input_context, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-comment"># encode input context</span> outputs = model.generate( input_ids=input_ids, max_length=<span class="hljs-number">40</span>, temperature=<span class="hljs-number">0.7</span>, num_return_sequences=<span class="hljs-number">3</span>, do_sample=<span class="hljs-literal">True</span> ) <span class="hljs-comment"># generate 3 candidates using sampling</span> <span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(<span class="hljs-number">3</span>): <span class="hljs-comment"># 3 output sequences were generated</span> <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;Generated <span class="hljs-subst">{i}</span>: <span class="hljs-subst">{tokenizer.decode(outputs[i], skip_special_tokens=<span class="hljs-literal">True</span>)}</span>&quot;</span>) tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;ctrl&quot;</span>) <span class="hljs-comment"># Initialize tokenizer</span> model = TFAutoModelWithLMHead.from_pretrained( <span class="hljs-string">&quot;ctrl&quot;</span> ) <span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> input_context = <span class="hljs-string">&quot;Legal My neighbor is&quot;</span> <span class="hljs-comment"># &quot;Legal&quot; is one of the control codes for ctrl</span> input_ids = tokenizer.encode(input_context, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-comment"># encode input context</span> outputs = model.generate( input_ids=input_ids, max_length=<span class="hljs-number">50</span>, temperature=<span class="hljs-number">0.7</span>, repetition_penalty=<span class="hljs-number">1.2</span> ) <span class="hljs-comment"># generate sequences</span> <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;Generated: <span class="hljs-subst">{tokenizer.decode(outputs[<span class="hljs-number">0</span>], skip_special_tokens=<span class="hljs-literal">True</span>)}</span>&quot;</span>) tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-comment"># Initialize tokenizer</span> model = TFAutoModelWithLMHead.from_pretrained( <span class="hljs-string">&quot;gpt2&quot;</span> ) <span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> input_context = <span class="hljs-string">&quot;My cute dog&quot;</span> bad_words_ids = [ tokenizer.encode(bad_word, add_prefix_space=<span class="hljs-literal">True</span>) <span class="hljs-keyword">for</span> bad_word <span class="hljs-keyword">in</span> [<span class="hljs-string">&quot;idiot&quot;</span>, <span class="hljs-string">&quot;stupid&quot;</span>, <span class="hljs-string">&quot;shut up&quot;</span>] ] input_ids = tokenizer.encode(input_context, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-comment"># encode input context</span> outputs = model.generate( input_ids=input_ids, max_length=<span class="hljs-number">100</span>, do_sample=<span class="hljs-literal">True</span>, bad_words_ids=bad_words_ids ) <span class="hljs-comment"># generate sequences without allowing bad_words to be generated</span>`}}),{c(){d=a("p"),f=t("Examples:"),_=m(),k(c.$$.fragment)},l(s){d=r(s,"P",{});var p=i(d);f=n(p,"Examples:"),p.forEach(o),_=g(s),v(c.$$.fragment,s)},m(s,p){x(s,d,p),e(d,f),x(s,_,p),y(c,s,p),u=!0},p:_e,i(s){u||(j(c.$$.fragment,s),u=!0)},o(s){M(c.$$.fragment,s),u=!1},d(s){s&&o(d),s&&o(_),w(c,s)}}}function _m(L){let d,f,_,c,u,s,p,C,q,I,S;return{c(){d=a("p"),f=t("Apart from "),_=a("code"),c=t("inputs"),u=t(`, all the arguments below will default to the value of the attribute of the same name as defined in the model\u2019s config (`),s=a("code"),p=t("config.json"),C=t(`) which in turn defaults to the `),q=a("a"),I=t("PretrainedConfig"),S=t(" of the model."),this.h()},l(z){d=r(z,"P",{});var $=i(d);f=n($,"Apart from "),_=r($,"CODE",{});var we=i(_);c=n(we,"inputs"),we.forEach(o),u=n($,`, all the arguments below will default to the value of the attribute of the same name as defined in the model\u2019s config (`),s=r($,"CODE",{});var Le=i(s);p=n(Le,"config.json"),Le.forEach(o),C=n($,`) which in turn defaults to the `),q=r($,"A",{href:!0});var he=i(q);I=n(he,"PretrainedConfig"),he.forEach(o),S=n($," of the model."),$.forEach(o),this.h()},h(){h(q,"href","/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig")},m(z,$){x(z,d,$),e(d,f),e(d,_),e(_,c),e(d,u),e(d,s),e(s,p),e(d,C),e(d,q),e(q,I),e(d,S)},d(z){z&&o(d)}}}function um(L){let d,f,_,c,u;return c=new ue({props:{code:`from transformers import AutoTokenizer, FlaxAutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("distilgpt2") model = FlaxAutoModelForCausalLM.from_pretrained("distilgpt2") input_context = "The dog" # encode input context input_ids = tokenizer(input_context, return_tensors="np").input_ids # generate candidates using sampling outputs = model.generate(input_ids=input_ids, max_length=20, top_k=30, do_sample=True) tokenizer.batch_decode(outputs, skip_special_tokens=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, FlaxAutoModelForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_context = <span class="hljs-string">&quot;The dog&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># encode input context</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(input_context, return_tensors=<span class="hljs-string">&quot;np&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># generate candidates using sampling</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.generate(input_ids=input_ids, max_length=<span class="hljs-number">20</span>, top_k=<span class="hljs-number">30</span>, do_sample=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>)`}}),{c(){d=a("p"),f=t("Examples:"),_=m(),k(c.$$.fragment)},l(s){d=r(s,"P",{});var p=i(d);f=n(p,"Examples:"),p.forEach(o),_=g(s),v(c.$$.fragment,s)},m(s,p){x(s,d,p),e(d,f),x(s,_,p),y(c,s,p),u=!0},p:_e,i(s){u||(j(c.$$.fragment,s),u=!0)},o(s){M(c.$$.fragment,s),u=!1},d(s){s&&o(d),s&&o(_),w(c,s)}}}function hm(L){let d,f,_,c,u,s,p,C,q,I,S,z,$,we,Le,he,fe,$e,Is,tn,Bs,Ws,nn,Hs,Rs,Us,Te,Vs,on,Ks,Zs,sn,Xs,Js,Qs,Ee,Ys,an,ea,ta,rn,na,oa,us,Oe,nt,zn,xt,sa,Pn,aa,hs,T,kt,ra,vt,ia,ln,la,da,ca,yt,pa,dn,ma,ga,_a,P,B,Dn,ua,ha,cn,fa,ba,Nn,xa,ka,Cn,va,ya,ja,W,In,Ma,wa,pn,La,$a,Bn,Ta,Ea,Wn,Oa,Ga,qa,H,Hn,Sa,Fa,mn,Aa,za,Rn,Pa,Da,Un,Na,Ca,Ia,R,Vn,Ba,Wa,gn,Ha,Ra,Kn,Ua,Va,Zn,Ka,Za,Xa,U,Xn,Ja,Qa,_n,Ya,er,Jn,tr,nr,Qn,or,sr,ar,V,Yn,rr,ir,un,lr,dr,eo,cr,pr,to,mr,gr,_r,E,jt,ur,no,hr,fr,D,K,oo,br,xr,hn,kr,vr,so,yr,jr,ao,Mr,wr,Lr,Z,ro,$r,Tr,fn,Er,Or,io,Gr,qr,lo,Sr,Fr,Ar,X,co,zr,Pr,bn,Dr,Nr,po,Cr,Ir,mo,Br,Wr,Hr,J,go,Rr,Ur,xn,Vr,Kr,_o,Zr,Xr,uo,Jr,Qr,Yr,Q,ho,ei,ti,kn,ni,oi,fo,si,ai,bo,ri,ii,li,Y,xo,di,ci,vn,pi,mi,ko,gi,_i,vo,ui,hi,fi,ot,bi,Mt,xi,wt,ki,vi,yi,yo,ji,Mi,st,wi,at,Li,rt,$i,be,Lt,Ti,$t,Ei,jo,Oi,Gi,qi,it,Si,xe,Tt,Fi,Et,Ai,Mo,zi,Pi,Di,lt,Ni,ke,Ot,Ci,Gt,Ii,wo,Bi,Wi,Hi,dt,Ri,ve,qt,Ui,St,Vi,Lo,Ki,Zi,Xi,ct,Ji,ye,Ft,Qi,At,Yi,$o,el,tl,nl,pt,ol,je,zt,sl,Pt,al,To,rl,il,ll,mt,fs,Ge,gt,Eo,Dt,dl,Oo,cl,bs,pe,Nt,pl,Ct,ml,yn,gl,_l,ul,F,It,hl,Go,fl,bl,Bt,xl,Wt,kl,vl,yl,me,jl,qo,Ml,wl,So,Ll,$l,jn,Tl,El,Ol,Ht,Gl,Rt,ql,Sl,Fl,_t,xs,qe,ut,Fo,Ut,Al,Ao,zl,ks,N,Vt,Pl,Kt,Dl,Mn,Nl,Cl,Il,Zt,Bl,wn,Wl,Hl,Rl,Se,ee,zo,Ul,Vl,Po,Kl,Zl,Do,Xl,Jl,No,Ql,Yl,ed,te,Co,td,nd,Io,od,sd,Bo,ad,rd,Wo,id,ld,dd,ne,Ho,cd,pd,Ro,md,gd,Uo,_d,ud,Vo,hd,fd,bd,A,Xt,xd,Ko,kd,vd,Fe,oe,Zo,yd,jd,Xo,Md,wd,Jo,Ld,$d,Qo,Td,Ed,Od,se,Yo,Gd,qd,es,Sd,Fd,ts,Ad,zd,ns,Pd,Dd,Nd,ae,os,Cd,Id,ss,Bd,Wd,as,Hd,Rd,rs,Ud,Vd,Kd,ht,Zd,Jt,Xd,Qt,Jd,Qd,Yd,ft,vs;return s=new Cs({}),xt=new Cs({}),kt=new ce({props:{name:"class transformers.generation_utils.GenerationMixin",anchor:"transformers.generation_utils.GenerationMixin",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L389"}}),jt=new ce({props:{name:"generate",anchor:"transformers.generation_utils.GenerationMixin.generate",parameters:[{name:"inputs",val:": typing.Optional[torch.Tensor] = None"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"min_length",val:": typing.Optional[int] = None"},{name:"do_sample",val:": typing.Optional[bool] = None"},{name:"early_stopping",val:": typing.Optional[bool] = None"},{name:"num_beams",val:": typing.Optional[int] = None"},{name:"temperature",val:": typing.Optional[float] = None"},{name:"top_k",val:": typing.Optional[int] = None"},{name:"top_p",val:": typing.Optional[float] = None"},{name:"typical_p",val:": typing.Optional[float] = None"},{name:"repetition_penalty",val:": typing.Optional[float] = None"},{name:"bad_words_ids",val:": typing.Optional[typing.Iterable[int]] = None"},{name:"force_words_ids",val:": typing.Union[typing.Iterable[int], typing.Iterable[typing.Iterable[int]], NoneType] = None"},{name:"bos_token_id",val:": typing.Optional[int] = None"},{name:"pad_token_id",val:": typing.Optional[int] = None"},{name:"eos_token_id",val:": typing.Optional[int] = None"},{name:"length_penalty",val:": typing.Optional[float] = None"},{name:"no_repeat_ngram_size",val:": typing.Optional[int] = None"},{name:"encoder_no_repeat_ngram_size",val:": typing.Optional[int] = None"},{name:"num_return_sequences",val:": typing.Optional[int] = None"},{name:"max_time",val:": typing.Optional[float] = None"},{name:"max_new_tokens",val:": typing.Optional[int] = None"},{name:"decoder_start_token_id",val:": typing.Optional[int] = None"},{name:"use_cache",val:": typing.Optional[bool] = None"},{name:"num_beam_groups",val:": typing.Optional[int] = None"},{name:"diversity_penalty",val:": typing.Optional[float] = None"},{name:"prefix_allowed_tokens_fn",val:": typing.Union[typing.Callable[[int, torch.Tensor], typing.List[int]], NoneType] = None"},{name:"logits_processor",val:": typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = []"},{name:"renormalize_logits",val:": typing.Optional[bool] = None"},{name:"stopping_criteria",val:": typing.Optional[transformers.generation_stopping_criteria.StoppingCriteriaList] = []"},{name:"constraints",val:": typing.Optional[typing.List[transformers.generation_beam_constraints.Constraint]] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"output_scores",val:": typing.Optional[bool] = None"},{name:"return_dict_in_generate",val:": typing.Optional[bool] = None"},{name:"forced_bos_token_id",val:": typing.Optional[int] = None"},{name:"forced_eos_token_id",val:": typing.Optional[int] = None"},{name:"remove_invalid_values",val:": typing.Optional[bool] = None"},{name:"synced_gpus",val:": typing.Optional[bool] = False"},{name:"exponential_decay_length_penalty",val:": typing.Union[typing.Tuple[typing.Union[int, float]], NoneType] = None"},{name:"suppress_tokens",val:": typing.Optional[typing.List[int]] = None"},{name:"begin_suppress_tokens",val:": typing.Optional[typing.List[int]] = None"},{name:"forced_decoder_ids",val:": typing.Optional[typing.List[int]] = None"},{name:"**model_kwargs",val:""}],parametersDescription:[{anchor:"transformers.generation_utils.GenerationMixin.generate.inputs",description:`<strong>inputs</strong> (<code>torch.Tensor</code> of varying shape depending on the modality, <em>optional</em>) &#x2014; The sequence used as a prompt for the generation or as model inputs to the encoder. If <code>None</code> the method initializes it with <code>bos_token_id</code> and a batch size of 1. For decoder-only models <code>inputs</code> should of in the format of <code>input_ids</code>. For encoder-decoder models <em>inputs</em> can represent any of <code>input_ids</code>, <code>input_values</code>, <code>input_features</code>, or <code>pixel_values</code>.`,name:"inputs"},{anchor:"transformers.generation_utils.GenerationMixin.generate.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to <code>model.config.max_length</code>) &#x2014; The maximum length the generated tokens can have. Corresponds to the length of the input prompt + <code>max_new_tokens</code>. In general, prefer the use of <code>max_new_tokens</code>, which ignores the number of tokens in the prompt.`,name:"max_length"},{anchor:"transformers.generation_utils.GenerationMixin.generate.max_new_tokens",description:`<strong>max_new_tokens</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt.`,name:"max_new_tokens"},{anchor:"transformers.generation_utils.GenerationMixin.generate.min_length",description:`<strong>min_length</strong> (<code>int</code>, <em>optional</em>, defaults to <code>model.config.min_length</code> or 10 if the config does not set any value) &#x2014; The minimum length of the sequence to be generated.`,name:"min_length"},{anchor:"transformers.generation_utils.GenerationMixin.generate.do_sample",description:`<strong>do_sample</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>model.config.do_sample</code> or <code>False</code> if the config does not set any value) &#x2014; Whether or not to use sampling ; use greedy decoding otherwise.`,name:"do_sample"},{anchor:"transformers.generation_utils.GenerationMixin.generate.early_stopping",description:`<strong>early_stopping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to stop the beam search when at least <code>num_beams</code> sentences are finished per batch or not.`,name:"early_stopping"},{anchor:"transformers.generation_utils.GenerationMixin.generate.num_beams",description:`<strong>num_beams</strong> (<code>int</code>, <em>optional</em>, defaults to <code>model.config.num_beams</code> or 1 if the config does not set any value) &#x2014; Number of beams for beam search. 1 means no beam search.`,name:"num_beams"},{anchor:"transformers.generation_utils.GenerationMixin.generate.temperature",description:`<strong>temperature</strong> (<code>float</code>, <em>optional</em>, defaults to <code>model.config.temperature</code> or 1.0 if the config does not set any value) &#x2014; The value used to module the next token probabilities.`,name:"temperature"},{anchor:"transformers.generation_utils.GenerationMixin.generate.top_k",description:`<strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to <code>model.config.top_k</code> or 50 if the config does not set any value) &#x2014; The number of highest probability vocabulary tokens to keep for top-k-filtering.`,name:"top_k"},{anchor:"transformers.generation_utils.GenerationMixin.generate.top_p",description:`<strong>top_p</strong> (<code>float</code>, <em>optional</em>, defaults to <code>model.config.top_p</code> or 1.0 if the config does not set any value) &#x2014; If set to float &lt; 1, only the smallest set of most probable tokens with probabilities that add up to <code>top_p</code> or higher are kept for generation.`,name:"top_p"},{anchor:"transformers.generation_utils.GenerationMixin.generate.typical_p",description:`<strong>typical_p</strong> (<code>float</code>, <em>optional</em>, defaults to <code>model.config.typical_p</code> or 1.0 if the config does not set any value) &#x2014; The amount of probability mass from the original distribution to be considered in typical decoding. If set to 1.0 it takes no effect. See <a href="https://arxiv.org/pdf/2202.00666.pdf" rel="nofollow">this paper</a> for more details.`,name:"typical_p"},{anchor:"transformers.generation_utils.GenerationMixin.generate.repetition_penalty",description:`<strong>repetition_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to <code>model.config.repetition_penalty</code> or 1.0 if the config does not set any value) &#x2014; The parameter for repetition penalty. 1.0 means no penalty. See <a href="https://arxiv.org/pdf/1909.05858.pdf" rel="nofollow">this paper</a> for more details.`,name:"repetition_penalty"},{anchor:"transformers.generation_utils.GenerationMixin.generate.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to <code>model.config.pad_token_id</code>) &#x2014; The id of the <em>padding</em> token.`,name:"pad_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.generate.bos_token_id",description:`<strong>bos_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to <code>model.config.bos_token_id</code>) &#x2014; The id of the <em>beginning-of-sequence</em> token.`,name:"bos_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.generate.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to <code>model.config.eos_token_id</code>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.generate.length_penalty",description:`<strong>length_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to <code>model.config.length_penalty</code> or 1.0 if the config does not set any value) &#x2014; Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log likelihood of the sequence (i.e. negative), <code>length_penalty</code> &gt; 0.0 promotes longer sequences, while <code>length_penalty</code> &lt; 0.0 encourages shorter sequences.`,name:"length_penalty"},{anchor:"transformers.generation_utils.GenerationMixin.generate.no_repeat_ngram_size",description:`<strong>no_repeat_ngram_size</strong> (<code>int</code>, <em>optional</em>, defaults to <code>model.config.no_repeat_ngram_size</code> or 0 if the config does not set any value) &#x2014; If set to int &gt; 0, all ngrams of that size can only occur once.`,name:"no_repeat_ngram_size"},{anchor:"transformers.generation_utils.GenerationMixin.generate.encoder_no_repeat_ngram_size",description:`<strong>encoder_no_repeat_ngram_size</strong> (<code>int</code>, <em>optional</em>, defaults to <code>model.config.encoder_no_repeat_ngram_size</code> or 0 if the config does not set any value) &#x2014; If set to int &gt; 0, all ngrams of that size that occur in the <code>encoder_input_ids</code> cannot occur in the <code>decoder_input_ids</code>.`,name:"encoder_no_repeat_ngram_size"},{anchor:"transformers.generation_utils.GenerationMixin.generate.bad_words_ids(List[List[int]],",description:`<strong>bad_words_ids(<code>List[List[int]]</code>,</strong> <em>optional</em>, defaults to <code>model.config.bad_words_ids</code>) &#x2014; List of token ids that are not allowed to be generated. In order to get the token ids of the words that should not appear in the generated text, use <code>tokenizer(bad_words, add_prefix_space=True, add_special_tokens=False).input_ids</code>.`,name:"bad_words_ids(List[List[int]],"},{anchor:"transformers.generation_utils.GenerationMixin.generate.force_words_ids(List[List[int]]",description:`<strong>force_words_ids(<code>List[List[int]]</code></strong> or <code>List[List[List[int]]]</code>, <em>optional</em>) &#x2014; List of token ids that must be generated. If given a <code>List[List[int]]</code>, this is treated as a simple list of words that must be included, the opposite to <code>bad_words_ids</code>. If given <code>List[List[List[int]]]</code>, this triggers a <a href="https://github.com/huggingface/transformers/issues/14081" rel="nofollow">disjunctive constraint</a>, where one can allow different forms of each word.`,name:"force_words_ids(List[List[int]]"},{anchor:"transformers.generation_utils.GenerationMixin.generate.num_return_sequences(int,",description:`<strong>num_return_sequences(<code>int</code>,</strong> <em>optional</em>, defaults to <code>model.config.num_return_sequences</code> or 1 if the config does not set any value) &#x2014; The number of independently computed returned sequences for each element in the batch.`,name:"num_return_sequences(int,"},{anchor:"transformers.generation_utils.GenerationMixin.generate.max_time(float,",description:`<strong>max_time(<code>float</code>,</strong> <em>optional</em>) &#x2014; The maximum amount of time you allow the computation to run for in seconds. generation will still finish the current pass after allocated time has been passed.`,name:"max_time(float,"},{anchor:"transformers.generation_utils.GenerationMixin.generate.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values are in <code>[0, 1]</code>, 1 for tokens that are not masked, and 0 for masked tokens. If not provided, will default to a tensor the same shape as <code>input_ids</code> that masks the pad token. <a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.generation_utils.GenerationMixin.generate.decoder_start_token_id",description:`<strong>decoder_start_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; If an encoder-decoder model starts decoding with a different token than <em>bos</em>, the id of that token.`,name:"decoder_start_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.generate.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding.`,name:"use_cache"},{anchor:"transformers.generation_utils.GenerationMixin.generate.num_beam_groups",description:`<strong>num_beam_groups</strong> (<code>int</code>, <em>optional</em>, defaults to <code>model.config.num_beam_groups</code> or 1 if the config does not set any value) &#x2014; Number of groups to divide <code>num_beams</code> into in order to ensure diversity among different groups of beams. <a href="https://arxiv.org/pdf/1610.02424.pdf" rel="nofollow">this paper</a> for more details.`,name:"num_beam_groups"},{anchor:"transformers.generation_utils.GenerationMixin.generate.diversity_penalty",description:`<strong>diversity_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to <code>model.config.diversity_penalty</code> or 0.0 if the config does not set any value) &#x2014; This value is subtracted from a beam&#x2019;s score if it generates a token same as any beam from other group at a particular time. Note that <code>diversity_penalty</code> is only effective if <code>group beam search</code> is enabled.`,name:"diversity_penalty"},{anchor:"transformers.generation_utils.GenerationMixin.generate.prefix_allowed_tokens_fn",description:`<strong>prefix_allowed_tokens_fn</strong> (<code>Callable[[int, torch.Tensor], List[int]]</code>, <em>optional</em>) &#x2014; If provided, this function constraints the beam search to allowed tokens only at each step. If not provided no constraint is applied. This function takes 2 arguments: the batch ID <code>batch_id</code> and <code>input_ids</code>. It has to return a list with the allowed tokens for the next generation step conditioned on the batch ID <code>batch_id</code> and the previously generated tokens <code>inputs_ids</code>. This argument is useful for constrained generation conditioned on the prefix, as described in <a href="https://arxiv.org/abs/2010.00904" rel="nofollow">Autoregressive Entity Retrieval</a>.`,name:"prefix_allowed_tokens_fn"},{anchor:"transformers.generation_utils.GenerationMixin.generate.logits_processor",description:`<strong>logits_processor</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; Custom logits processors that complement the default logits processors built from arguments and a model&#x2019;s config. If a logit processor is passed that is already created with the arguments or a model&#x2019;s config an error is thrown. This feature is intended for advanced users. renormalize_logits &#x2014; (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>): Whether to renormalize the logits after applying all the logits processors or warpers (including the custom ones). It&#x2019;s highly recommended to set this flag to <code>True</code> as the search algorithms suppose the score logits are normalized but some logit processors or warpers break the normalization.`,name:"logits_processor"},{anchor:"transformers.generation_utils.GenerationMixin.generate.stopping_criteria",description:`<strong>stopping_criteria</strong> (<code>StoppingCriteriaList</code>, <em>optional</em>) &#x2014; Custom stopping criteria that complement the default stopping criteria built from arguments and a model&#x2019;s config. If a stopping criteria is passed that is already created with the arguments or a model&#x2019;s config an error is thrown. This feature is intended for advanced users.`,name:"stopping_criteria"},{anchor:"transformers.generation_utils.GenerationMixin.generate.constraints",description:`<strong>constraints</strong> (<code>List[Constraint]</code>, <em>optional</em>) &#x2014; Custom constraints that can be added to the generation to ensure that the output will contain the use of certain tokens as defined by <code>Constraint</code> objects, in the most sensible way possible.`,name:"constraints"},{anchor:"transformers.generation_utils.GenerationMixin.generate.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>model.config.output_attentions</code> or <code>False</code> if the config does not set any value) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.`,name:"output_attentions"},{anchor:"transformers.generation_utils.GenerationMixin.generate.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>model.config.output_hidden_states</code> or <code>False</code> if the config does not set any value) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.`,name:"output_hidden_states"},{anchor:"transformers.generation_utils.GenerationMixin.generate.output_scores",description:`<strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>model.config.output_scores</code> or <code>False</code> if the config does not set any value) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.`,name:"output_scores"},{anchor:"transformers.generation_utils.GenerationMixin.generate.return_dict_in_generate",description:`<strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>model.config.return_dict_in_generate</code> or <code>False</code> if the config does not set any value) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict_in_generate"},{anchor:"transformers.generation_utils.GenerationMixin.generate.forced_bos_token_id",description:`<strong>forced_bos_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to <code>model.config.forced_bos_token_id</code>) &#x2014; The id of the token to force as the first generated token after the <code>decoder_start_token_id</code>. Useful for multilingual models like <a href="../model_doc/mbart">mBART</a> where the first generated token needs to be the target language token.`,name:"forced_bos_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.generate.forced_eos_token_id",description:`<strong>forced_eos_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to <code>model.config.forced_eos_token_id</code>) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached.`,name:"forced_eos_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.generate.remove_invalid_values",description:`<strong>remove_invalid_values</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>model.config.remove_invalid_values</code>) &#x2014; Whether to remove possible <em>nan</em> and <em>inf</em> outputs of the model to prevent the generation method to crash. Note that using <code>remove_invalid_values</code> can slow down generation.`,name:"remove_invalid_values"},{anchor:"transformers.generation_utils.GenerationMixin.generate.synced_gpus",description:`<strong>synced_gpus</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to continue running the while loop until max_length (needed for ZeRO stage 3)`,name:"synced_gpus"},{anchor:"transformers.generation_utils.GenerationMixin.generate.exponential_decay_length_penalty",description:`<strong>exponential_decay_length_penalty</strong> (<code>tuple(int, float)</code>, <em>optional</em>, defaults to <code>model.config.exponential_decay_length_penalty</code>) &#x2014; This Tuple adds an exponentially increasing length penalty, after a certain amount of tokens have been generated. The tuple shall consist of: <code>(start_index, decay_factor)</code> where <code>start_index</code> indicates where penalty starts and <code>decay_factor</code> represents the factor of exponential decay`,name:"exponential_decay_length_penalty"},{anchor:"transformers.generation_utils.GenerationMixin.generate.suppress_tokens",description:`<strong>suppress_tokens</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>model.config.suppress_tokens</code>) &#x2014; A list of tokens that will be supressed at generation. The <code>SupressTokens</code> logit processor will set their log probs to <code>-inf</code> so that they are not sampled.`,name:"suppress_tokens"},{anchor:"transformers.generation_utils.GenerationMixin.generate.begin_suppress_tokens",description:`<strong>begin_suppress_tokens</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>model.config.begin_suppress_tokens</code>) &#x2014; A list of tokens that will be supressed at the begining of the generation. The <code>SupressBeginTokens</code> logit processor will set their log probs to <code>-inf</code> so that they are not sampled.`,name:"begin_suppress_tokens"},{anchor:"transformers.generation_utils.GenerationMixin.generate.forced_decoder_ids",description:`<strong>forced_decoder_ids</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>model.config.forced_decoder_ids</code>) &#x2014; A list of tokens that will be forced as beginning tokens, before sampling.</p> <p>model<em>kwargs &#x2014; Additional model specific kwargs will be forwarded to the <code>forward</code> function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder</em>*.`,name:"forced_decoder_ids"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L914",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput" >ModelOutput</a> (if <code>return_dict_in_generate=True</code> or when <code>config.return_dict_in_generate=True</code>) or a <code>torch.FloatTensor</code>.</p> <p>If the model is <em>not</em> an encoder-decoder model (<code>model.config.is_encoder_decoder=False</code>), the possible <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput" >ModelOutput</a> types are:</p> <ul> <li><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.GreedySearchDecoderOnlyOutput" >GreedySearchDecoderOnlyOutput</a>,</li> <li><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.SampleDecoderOnlyOutput" >SampleDecoderOnlyOutput</a>,</li> <li><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.BeamSearchDecoderOnlyOutput" >BeamSearchDecoderOnlyOutput</a>,</li> <li><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.BeamSampleDecoderOnlyOutput" >BeamSampleDecoderOnlyOutput</a></li> </ul> <p>If the model is an encoder-decoder model (<code>model.config.is_encoder_decoder=True</code>), the possible <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput" >ModelOutput</a> types are:</p> <ul> <li><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.GreedySearchEncoderDecoderOutput" >GreedySearchEncoderDecoderOutput</a>,</li> <li><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.SampleEncoderDecoderOutput" >SampleEncoderDecoderOutput</a>,</li> <li><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.BeamSearchEncoderDecoderOutput" >BeamSearchEncoderDecoderOutput</a>,</li> <li><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.BeamSampleEncoderDecoderOutput" >BeamSampleEncoderDecoderOutput</a></li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput" >ModelOutput</a> or <code>torch.LongTensor</code></p> `}}),ot=new Jp({props:{warning:!0,$$slots:{default:[om]},$$scope:{ctx:L}}}),st=new ge({props:{anchor:"transformers.generation_utils.GenerationMixin.generate.example",$$slots:{default:[sm]},$$scope:{ctx:L}}}),at=new ge({props:{anchor:"transformers.generation_utils.GenerationMixin.generate.example-2",$$slots:{default:[am]},$$scope:{ctx:L}}}),rt=new ge({props:{anchor:"transformers.generation_utils.GenerationMixin.generate.example-3",$$slots:{default:[rm]},$$scope:{ctx:L}}}),Lt=new ce({props:{name:"greedy_search",anchor:"transformers.generation_utils.GenerationMixin.greedy_search",parameters:[{name:"input_ids",val:": LongTensor"},{name:"logits_processor",val:": typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None"},{name:"stopping_criteria",val:": typing.Optional[transformers.generation_stopping_criteria.StoppingCriteriaList] = None"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"pad_token_id",val:": typing.Optional[int] = None"},{name:"eos_token_id",val:": typing.Optional[int] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"output_scores",val:": typing.Optional[bool] = None"},{name:"return_dict_in_generate",val:": typing.Optional[bool] = None"},{name:"synced_gpus",val:": typing.Optional[bool] = False"},{name:"**model_kwargs",val:""}],parametersDescription:[{anchor:"transformers.generation_utils.GenerationMixin.greedy_search.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The sequence used as a prompt for the generation.`,name:"input_ids"},{anchor:"transformers.generation_utils.GenerationMixin.greedy_search.logits_processor",description:`<strong>logits_processor</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> used to modify the prediction scores of the language modeling head applied at each generation step.`,name:"logits_processor"},{anchor:"transformers.generation_utils.GenerationMixin.greedy_search.stopping_criteria",description:`<strong>stopping_criteria</strong> (<code>StoppingCriteriaList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.StoppingCriteriaList">StoppingCriteriaList</a>. List of instances of class derived from <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.StoppingCriteria">StoppingCriteria</a> used to tell if the generation loop should stop.`,name:"stopping_criteria"},{anchor:"transformers.generation_utils.GenerationMixin.greedy_search.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; <strong>DEPRECATED</strong>. Use <code>logits_processor</code> or <code>stopping_criteria</code> directly to cap the number of generated tokens. The maximum length of the sequence to be generated.`,name:"max_length"},{anchor:"transformers.generation_utils.GenerationMixin.greedy_search.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.`,name:"pad_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.greedy_search.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.greedy_search.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.`,name:"output_attentions"},{anchor:"transformers.generation_utils.GenerationMixin.greedy_search.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.`,name:"output_hidden_states"},{anchor:"transformers.generation_utils.GenerationMixin.greedy_search.output_scores",description:`<strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.`,name:"output_scores"},{anchor:"transformers.generation_utils.GenerationMixin.greedy_search.return_dict_in_generate",description:`<strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict_in_generate"},{anchor:"transformers.generation_utils.GenerationMixin.greedy_search.synced_gpus",description:`<strong>synced_gpus</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs &#x2014; Additional model specific keyword arguments will be forwarded to the <code>forward</code> function of the model. If model is an encoder-decoder model the kwargs should include <code>encoder_outputs</code>.`,name:"synced_gpus"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L1637",returnDescription:` <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.GreedySearchDecoderOnlyOutput" >GreedySearchDecoderOnlyOutput</a>, <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.GreedySearchEncoderDecoderOutput" >GreedySearchEncoderDecoderOutput</a> or <code>torch.LongTensor</code>: A <code>torch.LongTensor</code> containing the generated tokens (default behaviour) or a <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.GreedySearchDecoderOnlyOutput" >GreedySearchDecoderOnlyOutput</a> if <code>model.config.is_encoder_decoder=False</code> and <code>return_dict_in_generate=True</code> or a <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.GreedySearchEncoderDecoderOutput" >GreedySearchEncoderDecoderOutput</a> if <code>model.config.is_encoder_decoder=True</code>.</p> `}}),it=new ge({props:{anchor:"transformers.generation_utils.GenerationMixin.greedy_search.example",$$slots:{default:[im]},$$scope:{ctx:L}}}),Tt=new ce({props:{name:"sample",anchor:"transformers.generation_utils.GenerationMixin.sample",parameters:[{name:"input_ids",val:": LongTensor"},{name:"logits_processor",val:": typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None"},{name:"stopping_criteria",val:": typing.Optional[transformers.generation_stopping_criteria.StoppingCriteriaList] = None"},{name:"logits_warper",val:": typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"pad_token_id",val:": typing.Optional[int] = None"},{name:"eos_token_id",val:": typing.Optional[int] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"output_scores",val:": typing.Optional[bool] = None"},{name:"return_dict_in_generate",val:": typing.Optional[bool] = None"},{name:"synced_gpus",val:": typing.Optional[bool] = False"},{name:"**model_kwargs",val:""}],parametersDescription:[{anchor:"transformers.generation_utils.GenerationMixin.sample.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The sequence used as a prompt for the generation.`,name:"input_ids"},{anchor:"transformers.generation_utils.GenerationMixin.sample.logits_processor",description:`<strong>logits_processor</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> used to modify the prediction scores of the language modeling head applied at each generation step.`,name:"logits_processor"},{anchor:"transformers.generation_utils.GenerationMixin.sample.stopping_criteria",description:`<strong>stopping_criteria</strong> (<code>StoppingCriteriaList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.StoppingCriteriaList">StoppingCriteriaList</a>. List of instances of class derived from <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.StoppingCriteria">StoppingCriteria</a> used to tell if the generation loop should stop.`,name:"stopping_criteria"},{anchor:"transformers.generation_utils.GenerationMixin.sample.logits_warper",description:`<strong>logits_warper</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsWarper">LogitsWarper</a> used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step.`,name:"logits_warper"},{anchor:"transformers.generation_utils.GenerationMixin.sample.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; <strong>DEPRECATED</strong>. Use <code>logits_processor</code> or <code>stopping_criteria</code> directly to cap the number of generated tokens. The maximum length of the sequence to be generated.`,name:"max_length"},{anchor:"transformers.generation_utils.GenerationMixin.sample.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.`,name:"pad_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.sample.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.sample.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.`,name:"output_attentions"},{anchor:"transformers.generation_utils.GenerationMixin.sample.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.`,name:"output_hidden_states"},{anchor:"transformers.generation_utils.GenerationMixin.sample.output_scores",description:`<strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.`,name:"output_scores"},{anchor:"transformers.generation_utils.GenerationMixin.sample.return_dict_in_generate",description:`<strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict_in_generate"},{anchor:"transformers.generation_utils.GenerationMixin.sample.synced_gpus",description:`<strong>synced_gpus</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs &#x2014; Additional model specific kwargs will be forwarded to the <code>forward</code> function of the model. If model is an encoder-decoder model the kwargs should include <code>encoder_outputs</code>.`,name:"synced_gpus"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L1865",returnDescription:` <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.SampleDecoderOnlyOutput" >SampleDecoderOnlyOutput</a>, <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.SampleEncoderDecoderOutput" >SampleEncoderDecoderOutput</a> or <code>torch.LongTensor</code>: A <code>torch.LongTensor</code> containing the generated tokens (default behaviour) or a <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.SampleDecoderOnlyOutput" >SampleDecoderOnlyOutput</a> if <code>model.config.is_encoder_decoder=False</code> and <code>return_dict_in_generate=True</code> or a <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.SampleEncoderDecoderOutput" >SampleEncoderDecoderOutput</a> if <code>model.config.is_encoder_decoder=True</code>.</p> `}}),lt=new ge({props:{anchor:"transformers.generation_utils.GenerationMixin.sample.example",$$slots:{default:[lm]},$$scope:{ctx:L}}}),Ot=new ce({props:{name:"beam_search",anchor:"transformers.generation_utils.GenerationMixin.beam_search",parameters:[{name:"input_ids",val:": LongTensor"},{name:"beam_scorer",val:": BeamScorer"},{name:"logits_processor",val:": typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None"},{name:"stopping_criteria",val:": typing.Optional[transformers.generation_stopping_criteria.StoppingCriteriaList] = None"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"pad_token_id",val:": typing.Optional[int] = None"},{name:"eos_token_id",val:": typing.Optional[int] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"output_scores",val:": typing.Optional[bool] = None"},{name:"return_dict_in_generate",val:": typing.Optional[bool] = None"},{name:"synced_gpus",val:": typing.Optional[bool] = False"},{name:"**model_kwargs",val:""}],parametersDescription:[{anchor:"transformers.generation_utils.GenerationMixin.beam_search.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The sequence used as a prompt for the generation.`,name:"input_ids"},{anchor:"transformers.generation_utils.GenerationMixin.beam_search.beam_scorer",description:`<strong>beam_scorer</strong> (<code>BeamScorer</code>) &#x2014; An derived instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> should be read.`,name:"beam_scorer"},{anchor:"transformers.generation_utils.GenerationMixin.beam_search.logits_processor",description:`<strong>logits_processor</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> used to modify the prediction scores of the language modeling head applied at each generation step.`,name:"logits_processor"},{anchor:"transformers.generation_utils.GenerationMixin.beam_search.stopping_criteria",description:`<strong>stopping_criteria</strong> (<code>StoppingCriteriaList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.StoppingCriteriaList">StoppingCriteriaList</a>. List of instances of class derived from <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.StoppingCriteria">StoppingCriteria</a> used to tell if the generation loop should stop.`,name:"stopping_criteria"},{anchor:"transformers.generation_utils.GenerationMixin.beam_search.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; <strong>DEPRECATED</strong>. Use <code>logits_processor</code> or <code>stopping_criteria</code> directly to cap the number of generated tokens. The maximum length of the sequence to be generated.`,name:"max_length"},{anchor:"transformers.generation_utils.GenerationMixin.beam_search.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.`,name:"pad_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.beam_search.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.beam_search.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.`,name:"output_attentions"},{anchor:"transformers.generation_utils.GenerationMixin.beam_search.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.`,name:"output_hidden_states"},{anchor:"transformers.generation_utils.GenerationMixin.beam_search.output_scores",description:`<strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.`,name:"output_scores"},{anchor:"transformers.generation_utils.GenerationMixin.beam_search.return_dict_in_generate",description:`<strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict_in_generate"},{anchor:"transformers.generation_utils.GenerationMixin.beam_search.synced_gpus",description:`<strong>synced_gpus</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs &#x2014; Additional model specific kwargs will be forwarded to the <code>forward</code> function of the model. If model is an encoder-decoder model the kwargs should include <code>encoder_outputs</code>.`,name:"synced_gpus"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L2117",returnDescription:` <p><code>generation_utilsBeamSearchDecoderOnlyOutput</code>, <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.BeamSearchEncoderDecoderOutput" >BeamSearchEncoderDecoderOutput</a> or <code>torch.LongTensor</code>: A <code>torch.LongTensor</code> containing the generated tokens (default behaviour) or a <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.BeamSearchDecoderOnlyOutput" >BeamSearchDecoderOnlyOutput</a> if <code>model.config.is_encoder_decoder=False</code> and <code>return_dict_in_generate=True</code> or a <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.BeamSearchEncoderDecoderOutput" >BeamSearchEncoderDecoderOutput</a> if <code>model.config.is_encoder_decoder=True</code>.</p> `}}),dt=new ge({props:{anchor:"transformers.generation_utils.GenerationMixin.beam_search.example",$$slots:{default:[dm]},$$scope:{ctx:L}}}),qt=new ce({props:{name:"beam_sample",anchor:"transformers.generation_utils.GenerationMixin.beam_sample",parameters:[{name:"input_ids",val:": LongTensor"},{name:"beam_scorer",val:": BeamScorer"},{name:"logits_processor",val:": typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None"},{name:"stopping_criteria",val:": typing.Optional[transformers.generation_stopping_criteria.StoppingCriteriaList] = None"},{name:"logits_warper",val:": typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"pad_token_id",val:": typing.Optional[int] = None"},{name:"eos_token_id",val:": typing.Optional[int] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"output_scores",val:": typing.Optional[bool] = None"},{name:"return_dict_in_generate",val:": typing.Optional[bool] = None"},{name:"synced_gpus",val:": typing.Optional[bool] = False"},{name:"**model_kwargs",val:""}],parametersDescription:[{anchor:"transformers.generation_utils.GenerationMixin.beam_sample.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The sequence used as a prompt for the generation.`,name:"input_ids"},{anchor:"transformers.generation_utils.GenerationMixin.beam_sample.beam_scorer",description:`<strong>beam_scorer</strong> (<code>BeamScorer</code>) &#x2014; A derived instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> should be read.`,name:"beam_scorer"},{anchor:"transformers.generation_utils.GenerationMixin.beam_sample.logits_processor",description:`<strong>logits_processor</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> used to modify the prediction scores of the language modeling head applied at each generation step.`,name:"logits_processor"},{anchor:"transformers.generation_utils.GenerationMixin.beam_sample.stopping_criteria",description:`<strong>stopping_criteria</strong> (<code>StoppingCriteriaList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.StoppingCriteriaList">StoppingCriteriaList</a>. List of instances of class derived from <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.StoppingCriteria">StoppingCriteria</a> used to tell if the generation loop should stop.`,name:"stopping_criteria"},{anchor:"transformers.generation_utils.GenerationMixin.beam_sample.logits_warper",description:`<strong>logits_warper</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsWarper">LogitsWarper</a> used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step.`,name:"logits_warper"},{anchor:"transformers.generation_utils.GenerationMixin.beam_sample.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; <strong>DEPRECATED</strong>. Use <code>logits_processor</code> or <code>stopping_criteria</code> directly to cap the number of generated tokens. The maximum length of the sequence to be generated.`,name:"max_length"},{anchor:"transformers.generation_utils.GenerationMixin.beam_sample.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.`,name:"pad_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.beam_sample.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.beam_sample.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.`,name:"output_attentions"},{anchor:"transformers.generation_utils.GenerationMixin.beam_sample.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.`,name:"output_hidden_states"},{anchor:"transformers.generation_utils.GenerationMixin.beam_sample.output_scores",description:`<strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.`,name:"output_scores"},{anchor:"transformers.generation_utils.GenerationMixin.beam_sample.return_dict_in_generate",description:`<strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict_in_generate"},{anchor:"transformers.generation_utils.GenerationMixin.beam_sample.synced_gpus",description:`<strong>synced_gpus</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs &#x2014; Additional model specific kwargs will be forwarded to the <code>forward</code> function of the model. If model is an encoder-decoder model the kwargs should include <code>encoder_outputs</code>.`,name:"synced_gpus"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L2426",returnDescription:` <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.BeamSampleDecoderOnlyOutput" >BeamSampleDecoderOnlyOutput</a>, <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.BeamSampleEncoderDecoderOutput" >BeamSampleEncoderDecoderOutput</a> or <code>torch.LongTensor</code>: A <code>torch.LongTensor</code> containing the generated tokens (default behaviour) or a <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.BeamSampleDecoderOnlyOutput" >BeamSampleDecoderOnlyOutput</a> if <code>model.config.is_encoder_decoder=False</code> and <code>return_dict_in_generate=True</code> or a <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.BeamSampleEncoderDecoderOutput" >BeamSampleEncoderDecoderOutput</a> if <code>model.config.is_encoder_decoder=True</code>.</p> `}}),ct=new ge({props:{anchor:"transformers.generation_utils.GenerationMixin.beam_sample.example",$$slots:{default:[cm]},$$scope:{ctx:L}}}),Ft=new ce({props:{name:"group_beam_search",anchor:"transformers.generation_utils.GenerationMixin.group_beam_search",parameters:[{name:"input_ids",val:": LongTensor"},{name:"beam_scorer",val:": BeamScorer"},{name:"logits_processor",val:": typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None"},{name:"stopping_criteria",val:": typing.Optional[transformers.generation_stopping_criteria.StoppingCriteriaList] = None"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"pad_token_id",val:": typing.Optional[int] = None"},{name:"eos_token_id",val:": typing.Optional[int] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"output_scores",val:": typing.Optional[bool] = None"},{name:"return_dict_in_generate",val:": typing.Optional[bool] = None"},{name:"synced_gpus",val:": typing.Optional[bool] = False"},{name:"**model_kwargs",val:""}],parametersDescription:[{anchor:"transformers.generation_utils.GenerationMixin.group_beam_search.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The sequence used as a prompt for the generation.`,name:"input_ids"},{anchor:"transformers.generation_utils.GenerationMixin.group_beam_search.beam_scorer",description:`<strong>beam_scorer</strong> (<code>BeamScorer</code>) &#x2014; An derived instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> should be read.`,name:"beam_scorer"},{anchor:"transformers.generation_utils.GenerationMixin.group_beam_search.logits_processor",description:`<strong>logits_processor</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> used to modify the prediction scores of the language modeling head applied at each generation step.`,name:"logits_processor"},{anchor:"transformers.generation_utils.GenerationMixin.group_beam_search.stopping_criteria",description:`<strong>stopping_criteria</strong> (<code>StoppingCriteriaList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.StoppingCriteriaList">StoppingCriteriaList</a>. List of instances of class derived from <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.StoppingCriteria">StoppingCriteria</a> used to tell if the generation loop should stop.`,name:"stopping_criteria"},{anchor:"transformers.generation_utils.GenerationMixin.group_beam_search.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; <strong>DEPRECATED</strong>. Use <code>logits_processor</code> or <code>stopping_criteria</code> directly to cap the number of generated tokens. The maximum length of the sequence to be generated.`,name:"max_length"},{anchor:"transformers.generation_utils.GenerationMixin.group_beam_search.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.`,name:"pad_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.group_beam_search.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.group_beam_search.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.`,name:"output_attentions"},{anchor:"transformers.generation_utils.GenerationMixin.group_beam_search.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.`,name:"output_hidden_states"},{anchor:"transformers.generation_utils.GenerationMixin.group_beam_search.output_scores",description:`<strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.`,name:"output_scores"},{anchor:"transformers.generation_utils.GenerationMixin.group_beam_search.return_dict_in_generate",description:`<strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict_in_generate"},{anchor:"transformers.generation_utils.GenerationMixin.group_beam_search.synced_gpus",description:`<strong>synced_gpus</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to continue running the while loop until max_length (needed for ZeRO stage 3)</p> <p>model_kwargs &#x2014; Additional model specific kwargs that will be forwarded to the <code>forward</code> function of the model. If model is an encoder-decoder model the kwargs should include <code>encoder_outputs</code>.`,name:"synced_gpus"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L2742",returnDescription:` <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.BeamSearchDecoderOnlyOutput" >BeamSearchDecoderOnlyOutput</a>, <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.BeamSearchEncoderDecoderOutput" >BeamSearchEncoderDecoderOutput</a> or <code>torch.LongTensor</code>: A <code>torch.LongTensor</code> containing the generated tokens (default behaviour) or a <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.BeamSearchDecoderOnlyOutput" >BeamSearchDecoderOnlyOutput</a> if <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.BeamSearchDecoderOnlyOutput" >BeamSearchDecoderOnlyOutput</a> if <code>model.config.is_encoder_decoder=False</code> and <code>return_dict_in_generate=True</code> or a <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.BeamSearchEncoderDecoderOutput" >BeamSearchEncoderDecoderOutput</a> if <code>model.config.is_encoder_decoder=True</code>.</p> `}}),pt=new ge({props:{anchor:"transformers.generation_utils.GenerationMixin.group_beam_search.example",$$slots:{default:[pm]},$$scope:{ctx:L}}}),zt=new ce({props:{name:"constrained_beam_search",anchor:"transformers.generation_utils.GenerationMixin.constrained_beam_search",parameters:[{name:"input_ids",val:": LongTensor"},{name:"constrained_beam_scorer",val:": ConstrainedBeamSearchScorer"},{name:"logits_processor",val:": typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None"},{name:"stopping_criteria",val:": typing.Optional[transformers.generation_stopping_criteria.StoppingCriteriaList] = None"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"pad_token_id",val:": typing.Optional[int] = None"},{name:"eos_token_id",val:": typing.Optional[int] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"output_scores",val:": typing.Optional[bool] = None"},{name:"return_dict_in_generate",val:": typing.Optional[bool] = None"},{name:"synced_gpus",val:": typing.Optional[bool] = None"},{name:"**model_kwargs",val:""}],parametersDescription:[{anchor:"transformers.generation_utils.GenerationMixin.constrained_beam_search.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The sequence used as a prompt for the generation.`,name:"input_ids"},{anchor:"transformers.generation_utils.GenerationMixin.constrained_beam_search.constrained_beam_scorer",description:`<strong>constrained_beam_scorer</strong> (<code>ConstrainedBeamSearchScorer</code>) &#x2014; A derived instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> that defines how beam hypotheses are constructed, stored and sorted during generation, while satisfying a list of positive constraints. For more information, the documentation of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.ConstrainedBeamSearchScorer">ConstrainedBeamSearchScorer</a> should be read.`,name:"constrained_beam_scorer"},{anchor:"transformers.generation_utils.GenerationMixin.constrained_beam_search.logits_processor",description:`<strong>logits_processor</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> used to modify the prediction scores of the language modeling head applied at each generation step.`,name:"logits_processor"},{anchor:"transformers.generation_utils.GenerationMixin.constrained_beam_search.stopping_criteria",description:`<strong>stopping_criteria</strong> (<code>StoppingCriteriaList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.StoppingCriteriaList">StoppingCriteriaList</a>. List of instances of class derived from <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.StoppingCriteria">StoppingCriteria</a> used to tell if the generation loop should stop.`,name:"stopping_criteria"},{anchor:"transformers.generation_utils.GenerationMixin.constrained_beam_search.logits_warper",description:`<strong>logits_warper</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsWarper">LogitsWarper</a> used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step.`,name:"logits_warper"},{anchor:"transformers.generation_utils.GenerationMixin.constrained_beam_search.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; <strong>DEPRECATED</strong>. Use <code>logits_processor</code> or <code>stopping_criteria</code> directly to cap the number of generated tokens. The maximum length of the sequence to be generated.`,name:"max_length"},{anchor:"transformers.generation_utils.GenerationMixin.constrained_beam_search.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.`,name:"pad_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.constrained_beam_search.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.constrained_beam_search.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.`,name:"output_attentions"},{anchor:"transformers.generation_utils.GenerationMixin.constrained_beam_search.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.`,name:"output_hidden_states"},{anchor:"transformers.generation_utils.GenerationMixin.constrained_beam_search.output_scores",description:`<strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.`,name:"output_scores"},{anchor:"transformers.generation_utils.GenerationMixin.constrained_beam_search.return_dict_in_generate",description:`<strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict_in_generate"},{anchor:"transformers.generation_utils.GenerationMixin.constrained_beam_search.synced_gpus",description:`<strong>synced_gpus</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs &#x2014; Additional model specific kwargs will be forwarded to the <code>forward</code> function of the model. If model is an encoder-decoder model the kwargs should include <code>encoder_outputs</code>.`,name:"synced_gpus"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L3104",returnDescription:` <p><code>generation_utilsBeamSearchDecoderOnlyOutput</code>, <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.BeamSearchEncoderDecoderOutput" >BeamSearchEncoderDecoderOutput</a> or <code>torch.LongTensor</code>: A <code>torch.LongTensor</code> containing the generated tokens (default behaviour) or a <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.BeamSearchDecoderOnlyOutput" >BeamSearchDecoderOnlyOutput</a> if <code>model.config.is_encoder_decoder=False</code> and <code>return_dict_in_generate=True</code> or a <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.BeamSearchEncoderDecoderOutput" >BeamSearchEncoderDecoderOutput</a> if <code>model.config.is_encoder_decoder=True</code>.</p> `}}),mt=new ge({props:{anchor:"transformers.generation_utils.GenerationMixin.constrained_beam_search.example",$$slots:{default:[mm]},$$scope:{ctx:L}}}),Dt=new Cs({}),Nt=new ce({props:{name:"class transformers.generation_tf_utils.TFGenerationMixin",anchor:"transformers.generation_tf_utils.TFGenerationMixin",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_utils.py#L351"}}),It=new ce({props:{name:"generate",anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate",parameters:[{name:"input_ids",val:" = None"},{name:"max_length",val:" = None"},{name:"max_new_tokens",val:" = None"},{name:"min_length",val:" = None"},{name:"do_sample",val:" = None"},{name:"early_stopping",val:" = None"},{name:"num_beams",val:" = None"},{name:"temperature",val:" = None"},{name:"top_k",val:" = None"},{name:"top_p",val:" = None"},{name:"repetition_penalty",val:" = None"},{name:"bad_words_ids",val:" = None"},{name:"bos_token_id",val:" = None"},{name:"pad_token_id",val:" = None"},{name:"eos_token_id",val:" = None"},{name:"length_penalty",val:" = None"},{name:"no_repeat_ngram_size",val:" = None"},{name:"num_return_sequences",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_start_token_id",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_scores",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict_in_generate",val:" = None"},{name:"forced_bos_token_id",val:" = None"},{name:"forced_eos_token_id",val:" = None"},{name:"**model_kwargs",val:""}],parametersDescription:[{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.input_ids",description:"<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, `(batch_size, sequence_length, &#x2014;",name:"input_ids"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.feature_dim)`",description:`<strong>feature_dim)\`</strong> or <code>(batch_size, num_channels, height, width)</code>, <em>optional</em>) &#x2014; The sequence used as a prompt for the generation or as model inputs to the encoder. If <code>None</code> the method initializes it with <code>bos_token_id</code> and a batch size of 1. For decoder-only models <code>inputs</code> should of in the format of <code>input_ids</code>. For encoder-decoder models <em>inputs</em> can represent any of <code>input_ids</code>, <code>input_values</code>, <code>input_features</code>, or <code>pixel_values</code>.`,name:"feature_dim)`"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to <code>model.config.max_length</code>) &#x2014; The maximum length the generated tokens can have. Corresponds to the length of the input prompt + <code>max_new_tokens</code>. In general, prefer the use of <code>max_new_tokens</code>, which ignores the number of tokens in the prompt.`,name:"max_length"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.max_new_tokens",description:`<strong>max_new_tokens</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt.`,name:"max_new_tokens"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.min_length",description:`<strong>min_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; The minimum length of the sequence to be generated.`,name:"min_length"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.do_sample",description:`<strong>do_sample</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use sampling ; use greedy decoding otherwise.`,name:"do_sample"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.early_stopping",description:`<strong>early_stopping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to stop the beam search when at least <code>num_beams</code> sentences are finished per batch or not.`,name:"early_stopping"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.num_beams",description:`<strong>num_beams</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of beams for beam search. 1 means no beam search.`,name:"num_beams"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.temperature",description:`<strong>temperature</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; The value used to module the next token probabilities.`,name:"temperature"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.top_k",description:`<strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to 50) &#x2014; The number of highest probability vocabulary tokens to keep for top-k-filtering.`,name:"top_k"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.top_p",description:`<strong>top_p</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; If set to float &lt; 1, only the most probable tokens with probabilities that add up to <code>top_p</code> or higher are kept for generation.`,name:"top_p"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.repetition_penalty",description:`<strong>repetition_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; The parameter for repetition penalty. 1.0 means no penalty. See <a href="https://arxiv.org/pdf/1909.05858.pdf" rel="nofollow">this paper</a> for more details.`,name:"repetition_penalty"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.`,name:"pad_token_id"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.bos_token_id",description:`<strong>bos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>beginning-of-sequence</em> token.`,name:"bos_token_id"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.length_penalty",description:`<strong>length_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log likelihood of the sequence (i.e. negative), <code>length_penalty</code> &gt; 0.0 promotes longer sequences, while <code>length_penalty</code> &lt; 0.0 encourages shorter sequences.`,name:"length_penalty"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.no_repeat_ngram_size",description:`<strong>no_repeat_ngram_size</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to int &gt; 0, all ngrams of that size can only occur once.`,name:"no_repeat_ngram_size"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.bad_words_ids(List[int],",description:`<strong>bad_words_ids(<code>List[int]</code>,</strong> <em>optional</em>) &#x2014; List of token ids that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use <code>tokenizer.encode(bad_word, add_prefix_space=True)</code>.`,name:"bad_words_ids(List[int],"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.num_return_sequences(int,",description:`<strong>num_return_sequences(<code>int</code>,</strong> <em>optional</em>, defaults to 1) &#x2014; The number of independently computed returned sequences for each element in the batch.`,name:"num_return_sequences(int,"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of <code>dtype=tf.int32</code> and shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values are in <code>[0, 1]</code>, 1 for tokens that are not masked, and 0 for masked tokens.</p> <p>If not provided, will default to a tensor the same shape as <code>input_ids</code> that masks the pad token.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.decoder_start_token_id",description:`<strong>decoder_start_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; If an encoder-decoder model starts decoding with a different token than <em>bos</em>, the id of that token.`,name:"decoder_start_token_id"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.use_cache",description:`<strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding.`,name:"use_cache"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.`,name:"output_attentions"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.`,name:"output_hidden_states"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.output_scores",description:`<strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.`,name:"output_scores"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.return_dict_in_generate",description:`<strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict_in_generate"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.forced_bos_token_id",description:`<strong>forced_bos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the token to force as the first generated token after the <code>decoder_start_token_id</code>. Useful for multilingual models like <a href="../model_doc/mbart">mBART</a> where the first generated token needs to be the target language token.`,name:"forced_bos_token_id"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.forced_eos_token_id",description:`<strong>forced_eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached. model_specific_kwargs &#x2014; Additional model specific kwargs will be forwarded to the <code>forward</code> function of the model.`,name:"forced_eos_token_id"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_utils.py#L375",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput" >ModelOutput</a> (if <code>return_dict_in_generate=True</code> or when <code>config.return_dict_in_generate=True</code>) or a <code>tf.Tensor</code>.</p> <p>If the model is <em>not</em> an encoder-decoder model (<code>model.config.is_encoder_decoder=False</code>), the possible <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput" >ModelOutput</a> types are:</p> <ul> <li><code>TFGreedySearchDecoderOnlyOutput</code>,</li> <li><code>TFSampleDecoderOnlyOutput</code>,</li> <li><code>TFBeamSearchDecoderOnlyOutput</code>,</li> <li><code>TFBeamSampleDecoderOnlyOutput</code></li> </ul> <p>If the model is an encoder-decoder model (<code>model.config.is_encoder_decoder=True</code>), the possible <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput" >ModelOutput</a> types are:</p> <ul> <li><code>TFGreedySearchEncoderDecoderOutput</code>,</li> <li><code>TFSampleEncoderDecoderOutput</code>,</li> <li><code>TFBeamSearchEncoderDecoderOutput</code>,</li> <li><code>TFBeamSampleEncoderDecoderOutput</code></li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput" >ModelOutput</a> or <code>tf.Tensor</code></p> `}}),_t=new ge({props:{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.example",$$slots:{default:[gm]},$$scope:{ctx:L}}}),Ut=new Cs({}),Vt=new ce({props:{name:"class transformers.generation_flax_utils.FlaxGenerationMixin",anchor:"transformers.generation_flax_utils.FlaxGenerationMixin",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_utils.py#L125"}}),Xt=new ce({props:{name:"generate",anchor:"transformers.generation_flax_utils.FlaxGenerationMixin.generate",parameters:[{name:"input_ids",val:": ndarray"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"max_new_tokens",val:": typing.Optional[int] = None"},{name:"pad_token_id",val:": typing.Optional[int] = None"},{name:"bos_token_id",val:": typing.Optional[int] = None"},{name:"eos_token_id",val:": typing.Optional[int] = None"},{name:"decoder_start_token_id",val:": typing.Optional[int] = None"},{name:"do_sample",val:": typing.Optional[bool] = None"},{name:"prng_key",val:": typing.Optional[jax._src.numpy.ndarray.ndarray] = None"},{name:"top_k",val:": typing.Optional[int] = None"},{name:"top_p",val:": typing.Optional[float] = None"},{name:"temperature",val:": typing.Optional[float] = None"},{name:"num_beams",val:": typing.Optional[int] = None"},{name:"no_repeat_ngram_size",val:": typing.Optional[int] = None"},{name:"min_length",val:": typing.Optional[int] = None"},{name:"forced_bos_token_id",val:": typing.Optional[int] = None"},{name:"forced_eos_token_id",val:": typing.Optional[int] = None"},{name:"length_penalty",val:": typing.Optional[float] = None"},{name:"early_stopping",val:": typing.Optional[bool] = None"},{name:"trace",val:": bool = True"},{name:"params",val:": typing.Union[typing.Dict[str, jax._src.numpy.ndarray.ndarray], NoneType] = None"},{name:"**model_kwargs",val:""}],parametersDescription:[{anchor:"transformers.generation_flax_utils.FlaxGenerationMixin.generate.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The sequence used as a prompt for the generation.`,name:"input_ids"},{anchor:"transformers.generation_flax_utils.FlaxGenerationMixin.generate.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to <code>model.config.max_length</code>) &#x2014; The maximum length the generated tokens can have. Corresponds to the length of the input prompt + <code>max_new_tokens</code>. In general, prefer the use of <code>max_new_tokens</code>, which ignores the number of tokens in the prompt.`,name:"max_length"},{anchor:"transformers.generation_flax_utils.FlaxGenerationMixin.generate.max_new_tokens",description:`<strong>max_new_tokens</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt.`,name:"max_new_tokens"},{anchor:"transformers.generation_flax_utils.FlaxGenerationMixin.generate.do_sample",description:`<strong>do_sample</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use sampling ; use greedy decoding otherwise.`,name:"do_sample"},{anchor:"transformers.generation_flax_utils.FlaxGenerationMixin.generate.temperature",description:`<strong>temperature</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; The value used to module the next token probabilities.`,name:"temperature"},{anchor:"transformers.generation_flax_utils.FlaxGenerationMixin.generate.top_k",description:`<strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to 50) &#x2014; The number of highest probability vocabulary tokens to keep for top-k-filtering.`,name:"top_k"},{anchor:"transformers.generation_flax_utils.FlaxGenerationMixin.generate.top_p",description:`<strong>top_p</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; If set to float &lt; 1, only the most probable tokens with probabilities that add up to <code>top_p</code> or higher are kept for generation.`,name:"top_p"},{anchor:"transformers.generation_flax_utils.FlaxGenerationMixin.generate.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.`,name:"pad_token_id"},{anchor:"transformers.generation_flax_utils.FlaxGenerationMixin.generate.bos_token_id",description:`<strong>bos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>beginning-of-sequence</em> token.`,name:"bos_token_id"},{anchor:"transformers.generation_flax_utils.FlaxGenerationMixin.generate.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"},{anchor:"transformers.generation_flax_utils.FlaxGenerationMixin.generate.num_beams",description:`<strong>num_beams</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of beams for beam search. 1 means no beam search.`,name:"num_beams"},{anchor:"transformers.generation_flax_utils.FlaxGenerationMixin.generate.decoder_start_token_id",description:`<strong>decoder_start_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; If an encoder-decoder model starts decoding with a different token than <em>bos</em>, the id of that token.`,name:"decoder_start_token_id"},{anchor:"transformers.generation_flax_utils.FlaxGenerationMixin.generate.trace",description:`<strong>trace</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to trace generation. Setting <code>trace=False</code> should only be used for debugging and will lead to a considerably slower runtime.`,name:"trace"},{anchor:"transformers.generation_flax_utils.FlaxGenerationMixin.generate.params",description:`<strong>params</strong> (<code>Dict[str, jnp.ndarray]</code>, <em>optional</em>) &#x2014; Optionally the model parameters can be passed. Can be useful for parallelized generation. model<em>kwargs &#x2014; Additional model specific kwargs will be forwarded to the <code>forward</code> function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder</em>*. Also accepts <code>encoder_outputs</code> to skip encoder part.`,name:"params"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_utils.py#L211",returnDescription:` <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput" >ModelOutput</a>.</p> `}}),ht=new Jp({props:{warning:!0,$$slots:{default:[_m]},$$scope:{ctx:L}}}),ft=new ge({props:{anchor:"transformers.generation_flax_utils.FlaxGenerationMixin.generate.example",$$slots:{default:[um]},$$scope:{ctx:L}}}),{c(){d=a("meta"),f=m(),_=a("h1"),c=a("a"),u=a("span"),k(s.$$.fragment),p=m(),C=a("span"),q=t("Generation"),I=m(),S=a("p"),z=t("Each framework has a generate method for auto-regressive text generation implemented in their respective "),$=a("code"),we=t("GenerationMixin"),Le=t(" class:"),he=m(),fe=a("ul"),$e=a("li"),Is=t("PyTorch "),tn=a("a"),Bs=t("generate()"),Ws=t(" is implemented in "),nn=a("a"),Hs=t("GenerationMixin"),Rs=t("."),Us=m(),Te=a("li"),Vs=t("TensorFlow "),on=a("a"),Ks=t("generate()"),Zs=t(" is implemented in "),sn=a("a"),Xs=t("TFGenerationMixin"),Js=t("."),Qs=m(),Ee=a("li"),Ys=t("Flax/JAX "),an=a("a"),ea=t("generate()"),ta=t(" is implemented in "),rn=a("a"),na=t("FlaxGenerationMixin"),oa=t("."),us=m(),Oe=a("h2"),nt=a("a"),zn=a("span"),k(xt.$$.fragment),sa=m(),Pn=a("span"),aa=t("GenerationMixin"),hs=m(),T=a("div"),k(kt.$$.fragment),ra=m(),vt=a("p"),ia=t("A class containing all functions for auto-regressive text generation, to be used as a mixin in "),ln=a("a"),la=t("PreTrainedModel"),da=t("."),ca=m(),yt=a("p"),pa=t("The class exposes "),dn=a("a"),ma=t("generate()"),ga=t(", which can be used for:"),_a=m(),P=a("ul"),B=a("li"),Dn=a("em"),ua=t("greedy decoding"),ha=t(" by calling "),cn=a("a"),fa=t("greedy_search()"),ba=t(" if "),Nn=a("code"),xa=t("num_beams=1"),ka=t(` and `),Cn=a("code"),va=t("do_sample=False"),ya=t("."),ja=m(),W=a("li"),In=a("em"),Ma=t("multinomial sampling"),wa=t(" by calling "),pn=a("a"),La=t("sample()"),$a=t(" if "),Bn=a("code"),Ta=t("num_beams=1"),Ea=t(` and `),Wn=a("code"),Oa=t("do_sample=True"),Ga=t("."),qa=m(),H=a("li"),Hn=a("em"),Sa=t("beam-search decoding"),Fa=t(" by calling "),mn=a("a"),Aa=t("beam_search()"),za=t(" if "),Rn=a("code"),Pa=t("num_beams>1"),Da=t(` and `),Un=a("code"),Na=t("do_sample=False"),Ca=t("."),Ia=m(),R=a("li"),Vn=a("em"),Ba=t("beam-search multinomial sampling"),Wa=t(" by calling "),gn=a("a"),Ha=t("beam_sample()"),Ra=t(` if `),Kn=a("code"),Ua=t("num_beams>1"),Va=t(" and "),Zn=a("code"),Ka=t("do_sample=True"),Za=t("."),Xa=m(),U=a("li"),Xn=a("em"),Ja=t("diverse beam-search decoding"),Qa=t(" by calling "),_n=a("a"),Ya=t("group_beam_search()"),er=t(`, if `),Jn=a("code"),tr=t("num_beams>1"),nr=t(" and "),Qn=a("code"),or=t("num_beam_groups>1"),sr=t("."),ar=m(),V=a("li"),Yn=a("em"),rr=t("constrained beam-search decoding"),ir=t(" by calling "),un=a("a"),lr=t("constrained_beam_search()"),dr=t(`, if `),eo=a("code"),cr=t("constraints!=None"),pr=t(" or "),to=a("code"),mr=t("force_words_ids!=None"),gr=t("."),_r=m(),E=a("div"),k(jt.$$.fragment),ur=m(),no=a("p"),hr=t(`Generates sequences of token ids for models with a language modeling head. The method supports the following generation methods for text-decoder, text-to-text, speech-to-text, and vision-to-text models:`),fr=m(),D=a("ul"),K=a("li"),oo=a("em"),br=t("greedy decoding"),xr=t(" by calling "),hn=a("a"),kr=t("greedy_search()"),vr=t(" if "),so=a("code"),yr=t("num_beams=1"),jr=t(` and `),ao=a("code"),Mr=t("do_sample=False"),wr=t("."),Lr=m(),Z=a("li"),ro=a("em"),$r=t("multinomial sampling"),Tr=t(" by calling "),fn=a("a"),Er=t("sample()"),Or=t(" if "),io=a("code"),Gr=t("num_beams=1"),qr=t(` and `),lo=a("code"),Sr=t("do_sample=True"),Fr=t("."),Ar=m(),X=a("li"),co=a("em"),zr=t("beam-search decoding"),Pr=t(" by calling "),bn=a("a"),Dr=t("beam_search()"),Nr=t(" if "),po=a("code"),Cr=t("num_beams>1"),Ir=t(` and `),mo=a("code"),Br=t("do_sample=False"),Wr=t("."),Hr=m(),J=a("li"),go=a("em"),Rr=t("beam-search multinomial sampling"),Ur=t(" by calling "),xn=a("a"),Vr=t("beam_sample()"),Kr=t(` if `),_o=a("code"),Zr=t("num_beams>1"),Xr=t(" and "),uo=a("code"),Jr=t("do_sample=True"),Qr=t("."),Yr=m(),Q=a("li"),ho=a("em"),ei=t("diverse beam-search decoding"),ti=t(" by calling "),kn=a("a"),ni=t("group_beam_search()"),oi=t(`, if `),fo=a("code"),si=t("num_beams>1"),ai=t(" and "),bo=a("code"),ri=t("num_beam_groups>1"),ii=t("."),li=m(),Y=a("li"),xo=a("em"),di=t("constrained beam-search decoding"),ci=t(` by calling `),vn=a("a"),pi=t("constrained_beam_search()"),mi=t(", if "),ko=a("code"),gi=t("constraints!=None"),_i=t(` or `),vo=a("code"),ui=t("force_words_ids!=None"),hi=t("."),fi=m(),k(ot.$$.fragment),bi=m(),Mt=a("p"),xi=t("Most of these parameters are explained in more detail in "),wt=a("a"),ki=t(`this blog post`),vi=t("."),yi=m(),yo=a("p"),ji=t("Examples:"),Mi=m(),k(st.$$.fragment),wi=m(),k(at.$$.fragment),Li=m(),k(rt.$$.fragment),$i=m(),be=a("div"),k(Lt.$$.fragment),Ti=m(),$t=a("p"),Ei=t("Generates sequences of token ids for models with a language modeling head using "),jo=a("strong"),Oi=t("greedy decoding"),Gi=t(` and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.`),qi=m(),k(it.$$.fragment),Si=m(),xe=a("div"),k(Tt.$$.fragment),Fi=m(),Et=a("p"),Ai=t("Generates sequences of token ids for models with a language modeling head using "),Mo=a("strong"),zi=t("multinomial sampling"),Pi=t(` and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.`),Di=m(),k(lt.$$.fragment),Ni=m(),ke=a("div"),k(Ot.$$.fragment),Ci=m(),Gt=a("p"),Ii=t("Generates sequences of token ids for models with a language modeling head using "),wo=a("strong"),Bi=t("beam search decoding"),Wi=t(` and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.`),Hi=m(),k(dt.$$.fragment),Ri=m(),ve=a("div"),k(qt.$$.fragment),Ui=m(),St=a("p"),Vi=t("Generates sequences of token ids for models with a language modeling head using "),Lo=a("strong"),Ki=t(`beam search multinomial sampling`),Zi=t(" and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models."),Xi=m(),k(ct.$$.fragment),Ji=m(),ye=a("div"),k(Ft.$$.fragment),Qi=m(),At=a("p"),Yi=t("Generates sequences of token ids for models with a language modeling head using "),$o=a("strong"),el=t(`diverse beam search decoding`),tl=t(" and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models."),nl=m(),k(pt.$$.fragment),ol=m(),je=a("div"),k(zt.$$.fragment),sl=m(),Pt=a("p"),al=t("Generates sequences of token ids for models with a language modeling head using "),To=a("strong"),rl=t(`constrained beam search decoding`),il=t(" and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models."),ll=m(),k(mt.$$.fragment),fs=m(),Ge=a("h2"),gt=a("a"),Eo=a("span"),k(Dt.$$.fragment),dl=m(),Oo=a("span"),cl=t("TFGenerationMixin"),bs=m(),pe=a("div"),k(Nt.$$.fragment),pl=m(),Ct=a("p"),ml=t("A class containing all of the functions supporting generation, to be used as a mixin in "),yn=a("a"),gl=t("TFPreTrainedModel"),_l=t("."),ul=m(),F=a("div"),k(It.$$.fragment),hl=m(),Go=a("p"),fl=t(`Generates sequences for models with a language modeling head. The method currently supports greedy decoding, beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling.`),bl=m(),Bt=a("p"),xl=t("Adapted in part from "),Wt=a("a"),kl=t(`Facebook\u2019s XLM beam search code`),vl=t("."),yl=m(),me=a("p"),jl=t("Apart from "),qo=a("code"),Ml=t("input_ids"),wl=t(" and "),So=a("code"),Ll=t("attention_mask"),$l=t(`, all the arguments below will default to the value of the attribute of the same name inside the `),jn=a("a"),Tl=t("PretrainedConfig"),El=t(` of the model. The default values indicated are the default values of those config.`),Ol=m(),Ht=a("p"),Gl=t("Most of these parameters are explained in more detail in "),Rt=a("a"),ql=t(`this blog post`),Sl=t("."),Fl=m(),k(_t.$$.fragment),xs=m(),qe=a("h2"),ut=a("a"),Fo=a("span"),k(Ut.$$.fragment),Al=m(),Ao=a("span"),zl=t("FlaxGenerationMixin"),ks=m(),N=a("div"),k(Vt.$$.fragment),Pl=m(),Kt=a("p"),Dl=t(`A class containing all functions for auto-regressive text generation, to be used as a mixin in `),Mn=a("a"),Nl=t("FlaxPreTrainedModel"),Cl=t("."),Il=m(),Zt=a("p"),Bl=t("The class exposes "),wn=a("a"),Wl=t("generate()"),Hl=t(", which can be used for:"),Rl=m(),Se=a("ul"),ee=a("li"),zo=a("em"),Ul=t("greedy decoding"),Vl=t(" by calling "),Po=a("code"),Kl=t("_greedy_search()"),Zl=t(` if `),Do=a("code"),Xl=t("num_beams=1"),Jl=t(" and "),No=a("code"),Ql=t("do_sample=False"),Yl=t("."),ed=m(),te=a("li"),Co=a("em"),td=t("multinomial sampling"),nd=t(" by calling "),Io=a("code"),od=t("_sample()"),sd=t(" if "),Bo=a("code"),ad=t("num_beams=1"),rd=t(` and `),Wo=a("code"),id=t("do_sample=True"),ld=t("."),dd=m(),ne=a("li"),Ho=a("em"),cd=t("beam-search decoding"),pd=t(" by calling "),Ro=a("code"),md=t("~generation_utils.FlaxGenerationMixin._beam_search"),gd=t(" if "),Uo=a("code"),_d=t("num_beams>1"),ud=t(` and `),Vo=a("code"),hd=t("do_sample=False"),fd=t("."),bd=m(),A=a("div"),k(Xt.$$.fragment),xd=m(),Ko=a("p"),kd=t(`Generates sequences of token ids for models with a language modeling head. The method supports the following generation methods for text-decoder, text-to-text, speech-to-text, and vision-to-text models:`),vd=m(),Fe=a("ul"),oe=a("li"),Zo=a("em"),yd=t("greedy decoding"),jd=t(" by calling "),Xo=a("code"),Md=t("_greedy_search()"),wd=t(` if `),Jo=a("code"),Ld=t("num_beams=1"),$d=t(" and "),Qo=a("code"),Td=t("do_sample=False"),Ed=t("."),Od=m(),se=a("li"),Yo=a("em"),Gd=t("multinomial sampling"),qd=t(" by calling "),es=a("code"),Sd=t("_sample()"),Fd=t(" if "),ts=a("code"),Ad=t("num_beams=1"),zd=t(` and `),ns=a("code"),Pd=t("do_sample=True"),Dd=t("."),Nd=m(),ae=a("li"),os=a("em"),Cd=t("beam-search decoding"),Id=t(" by calling "),ss=a("code"),Bd=t("~generation_utils.FlaxGenerationMixin._beam_search"),Wd=t(" if "),as=a("code"),Hd=t("num_beams>1"),Rd=t(` and `),rs=a("code"),Ud=t("do_sample=False"),Vd=t("."),Kd=m(),k(ht.$$.fragment),Zd=m(),Jt=a("p"),Xd=t("Most of these parameters are explained in more detail in "),Qt=a("a"),Jd=t(`this blog post`),Qd=t("."),Yd=m(),k(ft.$$.fragment),this.h()},l(l){const b=tm('[data-svelte="svelte-1phssyn"]',document.head);d=r(b,"META",{name:!0,content:!0}),b.forEach(o),f=g(l),_=r(l,"H1",{class:!0});var Yt=i(_);c=r(Yt,"A",{id:!0,class:!0,href:!0});var is=i(c);u=r(is,"SPAN",{});var ls=i(u);v(s.$$.fragment,ls),ls.forEach(o),is.forEach(o),p=g(Yt),C=r(Yt,"SPAN",{});var ds=i(C);q=n(ds,"Generation"),ds.forEach(o),Yt.forEach(o),I=g(l),S=r(l,"P",{});var en=i(S);z=n(en,"Each framework has a generate method for auto-regressive text generation implemented in their respective "),$=r(en,"CODE",{});var cs=i($);we=n(cs,"GenerationMixin"),cs.forEach(o),Le=n(en," class:"),en.forEach(o),he=g(l),fe=r(l,"UL",{});var Ae=i(fe);$e=r(Ae,"LI",{});var ze=i($e);Is=n(ze,"PyTorch "),tn=r(ze,"A",{href:!0});var ps=i(tn);Bs=n(ps,"generate()"),ps.forEach(o),Ws=n(ze," is implemented in "),nn=r(ze,"A",{href:!0});var ms=i(nn);Hs=n(ms,"GenerationMixin"),ms.forEach(o),Rs=n(ze,"."),ze.forEach(o),Us=g(Ae),Te=r(Ae,"LI",{});var Pe=i(Te);Vs=n(Pe,"TensorFlow "),on=r(Pe,"A",{href:!0});var gs=i(on);Ks=n(gs,"generate()"),gs.forEach(o),Zs=n(Pe," is implemented in "),sn=r(Pe,"A",{href:!0});var _s=i(sn);Xs=n(_s,"TFGenerationMixin"),_s.forEach(o),Js=n(Pe,"."),Pe.forEach(o),Qs=g(Ae),Ee=r(Ae,"LI",{});var Ln=i(Ee);Ys=n(Ln,"Flax/JAX "),an=r(Ln,"A",{href:!0});var ec=i(an);ea=n(ec,"generate()"),ec.forEach(o),ta=n(Ln," is implemented in "),rn=r(Ln,"A",{href:!0});var tc=i(rn);na=n(tc,"FlaxGenerationMixin"),tc.forEach(o),oa=n(Ln,"."),Ln.forEach(o),Ae.forEach(o),us=g(l),Oe=r(l,"H2",{class:!0});var ys=i(Oe);nt=r(ys,"A",{id:!0,class:!0,href:!0});var nc=i(nt);zn=r(nc,"SPAN",{});var oc=i(zn);v(xt.$$.fragment,oc),oc.forEach(o),nc.forEach(o),sa=g(ys),Pn=r(ys,"SPAN",{});var sc=i(Pn);aa=n(sc,"GenerationMixin"),sc.forEach(o),ys.forEach(o),hs=g(l),T=r(l,"DIV",{class:!0});var O=i(T);v(kt.$$.fragment,O),ra=g(O),vt=r(O,"P",{});var js=i(vt);ia=n(js,"A class containing all functions for auto-regressive text generation, to be used as a mixin in "),ln=r(js,"A",{href:!0});var ac=i(ln);la=n(ac,"PreTrainedModel"),ac.forEach(o),da=n(js,"."),js.forEach(o),ca=g(O),yt=r(O,"P",{});var Ms=i(yt);pa=n(Ms,"The class exposes "),dn=r(Ms,"A",{href:!0});var rc=i(dn);ma=n(rc,"generate()"),rc.forEach(o),ga=n(Ms,", which can be used for:"),Ms.forEach(o),_a=g(O),P=r(O,"UL",{});var re=i(P);B=r(re,"LI",{});var De=i(B);Dn=r(De,"EM",{});var ic=i(Dn);ua=n(ic,"greedy decoding"),ic.forEach(o),ha=n(De," by calling "),cn=r(De,"A",{href:!0});var lc=i(cn);fa=n(lc,"greedy_search()"),lc.forEach(o),ba=n(De," if "),Nn=r(De,"CODE",{});var dc=i(Nn);xa=n(dc,"num_beams=1"),dc.forEach(o),ka=n(De,` and `),Cn=r(De,"CODE",{});var cc=i(Cn);va=n(cc,"do_sample=False"),cc.forEach(o),ya=n(De,"."),De.forEach(o),ja=g(re),W=r(re,"LI",{});var Ne=i(W);In=r(Ne,"EM",{});var pc=i(In);Ma=n(pc,"multinomial sampling"),pc.forEach(o),wa=n(Ne," by calling "),pn=r(Ne,"A",{href:!0});var mc=i(pn);La=n(mc,"sample()"),mc.forEach(o),$a=n(Ne," if "),Bn=r(Ne,"CODE",{});var gc=i(Bn);Ta=n(gc,"num_beams=1"),gc.forEach(o),Ea=n(Ne,` and `),Wn=r(Ne,"CODE",{});var _c=i(Wn);Oa=n(_c,"do_sample=True"),_c.forEach(o),Ga=n(Ne,"."),Ne.forEach(o),qa=g(re),H=r(re,"LI",{});var Ce=i(H);Hn=r(Ce,"EM",{});var uc=i(Hn);Sa=n(uc,"beam-search decoding"),uc.forEach(o),Fa=n(Ce," by calling "),mn=r(Ce,"A",{href:!0});var hc=i(mn);Aa=n(hc,"beam_search()"),hc.forEach(o),za=n(Ce," if "),Rn=r(Ce,"CODE",{});var fc=i(Rn);Pa=n(fc,"num_beams>1"),fc.forEach(o),Da=n(Ce,` and `),Un=r(Ce,"CODE",{});var bc=i(Un);Na=n(bc,"do_sample=False"),bc.forEach(o),Ca=n(Ce,"."),Ce.forEach(o),Ia=g(re),R=r(re,"LI",{});var Ie=i(R);Vn=r(Ie,"EM",{});var xc=i(Vn);Ba=n(xc,"beam-search multinomial sampling"),xc.forEach(o),Wa=n(Ie," by calling "),gn=r(Ie,"A",{href:!0});var kc=i(gn);Ha=n(kc,"beam_sample()"),kc.forEach(o),Ra=n(Ie,` if `),Kn=r(Ie,"CODE",{});var vc=i(Kn);Ua=n(vc,"num_beams>1"),vc.forEach(o),Va=n(Ie," and "),Zn=r(Ie,"CODE",{});var yc=i(Zn);Ka=n(yc,"do_sample=True"),yc.forEach(o),Za=n(Ie,"."),Ie.forEach(o),Xa=g(re),U=r(re,"LI",{});var Be=i(U);Xn=r(Be,"EM",{});var jc=i(Xn);Ja=n(jc,"diverse beam-search decoding"),jc.forEach(o),Qa=n(Be," by calling "),_n=r(Be,"A",{href:!0});var Mc=i(_n);Ya=n(Mc,"group_beam_search()"),Mc.forEach(o),er=n(Be,`, if `),Jn=r(Be,"CODE",{});var wc=i(Jn);tr=n(wc,"num_beams>1"),wc.forEach(o),nr=n(Be," and "),Qn=r(Be,"CODE",{});var Lc=i(Qn);or=n(Lc,"num_beam_groups>1"),Lc.forEach(o),sr=n(Be,"."),Be.forEach(o),ar=g(re),V=r(re,"LI",{});var We=i(V);Yn=r(We,"EM",{});var $c=i(Yn);rr=n($c,"constrained beam-search decoding"),$c.forEach(o),ir=n(We," by calling "),un=r(We,"A",{href:!0});var Tc=i(un);lr=n(Tc,"constrained_beam_search()"),Tc.forEach(o),dr=n(We,`, if `),eo=r(We,"CODE",{});var Ec=i(eo);cr=n(Ec,"constraints!=None"),Ec.forEach(o),pr=n(We," or "),to=r(We,"CODE",{});var Oc=i(to);mr=n(Oc,"force_words_ids!=None"),Oc.forEach(o),gr=n(We,"."),We.forEach(o),re.forEach(o),_r=g(O),E=r(O,"DIV",{class:!0});var G=i(E);v(jt.$$.fragment,G),ur=g(G),no=r(G,"P",{});var Gc=i(no);hr=n(Gc,`Generates sequences of token ids for models with a language modeling head. The method supports the following generation methods for text-decoder, text-to-text, speech-to-text, and vision-to-text models:`),Gc.forEach(o),fr=g(G),D=r(G,"UL",{});var ie=i(D);K=r(ie,"LI",{});var He=i(K);oo=r(He,"EM",{});var qc=i(oo);br=n(qc,"greedy decoding"),qc.forEach(o),xr=n(He," by calling "),hn=r(He,"A",{href:!0});var Sc=i(hn);kr=n(Sc,"greedy_search()"),Sc.forEach(o),vr=n(He," if "),so=r(He,"CODE",{});var Fc=i(so);yr=n(Fc,"num_beams=1"),Fc.forEach(o),jr=n(He,` and `),ao=r(He,"CODE",{});var Ac=i(ao);Mr=n(Ac,"do_sample=False"),Ac.forEach(o),wr=n(He,"."),He.forEach(o),Lr=g(ie),Z=r(ie,"LI",{});var Re=i(Z);ro=r(Re,"EM",{});var zc=i(ro);$r=n(zc,"multinomial sampling"),zc.forEach(o),Tr=n(Re," by calling "),fn=r(Re,"A",{href:!0});var Pc=i(fn);Er=n(Pc,"sample()"),Pc.forEach(o),Or=n(Re," if "),io=r(Re,"CODE",{});var Dc=i(io);Gr=n(Dc,"num_beams=1"),Dc.forEach(o),qr=n(Re,` and `),lo=r(Re,"CODE",{});var Nc=i(lo);Sr=n(Nc,"do_sample=True"),Nc.forEach(o),Fr=n(Re,"."),Re.forEach(o),Ar=g(ie),X=r(ie,"LI",{});var Ue=i(X);co=r(Ue,"EM",{});var Cc=i(co);zr=n(Cc,"beam-search decoding"),Cc.forEach(o),Pr=n(Ue," by calling "),bn=r(Ue,"A",{href:!0});var Ic=i(bn);Dr=n(Ic,"beam_search()"),Ic.forEach(o),Nr=n(Ue," if "),po=r(Ue,"CODE",{});var Bc=i(po);Cr=n(Bc,"num_beams>1"),Bc.forEach(o),Ir=n(Ue,` and `),mo=r(Ue,"CODE",{});var Wc=i(mo);Br=n(Wc,"do_sample=False"),Wc.forEach(o),Wr=n(Ue,"."),Ue.forEach(o),Hr=g(ie),J=r(ie,"LI",{});var Ve=i(J);go=r(Ve,"EM",{});var Hc=i(go);Rr=n(Hc,"beam-search multinomial sampling"),Hc.forEach(o),Ur=n(Ve," by calling "),xn=r(Ve,"A",{href:!0});var Rc=i(xn);Vr=n(Rc,"beam_sample()"),Rc.forEach(o),Kr=n(Ve,` if `),_o=r(Ve,"CODE",{});var Uc=i(_o);Zr=n(Uc,"num_beams>1"),Uc.forEach(o),Xr=n(Ve," and "),uo=r(Ve,"CODE",{});var Vc=i(uo);Jr=n(Vc,"do_sample=True"),Vc.forEach(o),Qr=n(Ve,"."),Ve.forEach(o),Yr=g(ie),Q=r(ie,"LI",{});var Ke=i(Q);ho=r(Ke,"EM",{});var Kc=i(ho);ei=n(Kc,"diverse beam-search decoding"),Kc.forEach(o),ti=n(Ke," by calling "),kn=r(Ke,"A",{href:!0});var Zc=i(kn);ni=n(Zc,"group_beam_search()"),Zc.forEach(o),oi=n(Ke,`, if `),fo=r(Ke,"CODE",{});var Xc=i(fo);si=n(Xc,"num_beams>1"),Xc.forEach(o),ai=n(Ke," and "),bo=r(Ke,"CODE",{});var Jc=i(bo);ri=n(Jc,"num_beam_groups>1"),Jc.forEach(o),ii=n(Ke,"."),Ke.forEach(o),li=g(ie),Y=r(ie,"LI",{});var Ze=i(Y);xo=r(Ze,"EM",{});var Qc=i(xo);di=n(Qc,"constrained beam-search decoding"),Qc.forEach(o),ci=n(Ze,` by calling `),vn=r(Ze,"A",{href:!0});var Yc=i(vn);pi=n(Yc,"constrained_beam_search()"),Yc.forEach(o),mi=n(Ze,", if "),ko=r(Ze,"CODE",{});var ep=i(ko);gi=n(ep,"constraints!=None"),ep.forEach(o),_i=n(Ze,` or `),vo=r(Ze,"CODE",{});var tp=i(vo);ui=n(tp,"force_words_ids!=None"),tp.forEach(o),hi=n(Ze,"."),Ze.forEach(o),ie.forEach(o),fi=g(G),v(ot.$$.fragment,G),bi=g(G),Mt=r(G,"P",{});var ws=i(Mt);xi=n(ws,"Most of these parameters are explained in more detail in "),wt=r(ws,"A",{href:!0,rel:!0});var np=i(wt);ki=n(np,`this blog post`),np.forEach(o),vi=n(ws,"."),ws.forEach(o),yi=g(G),yo=r(G,"P",{});var op=i(yo);ji=n(op,"Examples:"),op.forEach(o),Mi=g(G),v(st.$$.fragment,G),wi=g(G),v(at.$$.fragment,G),Li=g(G),v(rt.$$.fragment,G),G.forEach(o),$i=g(O),be=r(O,"DIV",{class:!0});var $n=i(be);v(Lt.$$.fragment,$n),Ti=g($n),$t=r($n,"P",{});var Ls=i($t);Ei=n(Ls,"Generates sequences of token ids for models with a language modeling head using "),jo=r(Ls,"STRONG",{});var sp=i(jo);Oi=n(sp,"greedy decoding"),sp.forEach(o),Gi=n(Ls,` and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.`),Ls.forEach(o),qi=g($n),v(it.$$.fragment,$n),$n.forEach(o),Si=g(O),xe=r(O,"DIV",{class:!0});var Tn=i(xe);v(Tt.$$.fragment,Tn),Fi=g(Tn),Et=r(Tn,"P",{});var $s=i(Et);Ai=n($s,"Generates sequences of token ids for models with a language modeling head using "),Mo=r($s,"STRONG",{});var ap=i(Mo);zi=n(ap,"multinomial sampling"),ap.forEach(o),Pi=n($s,` and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.`),$s.forEach(o),Di=g(Tn),v(lt.$$.fragment,Tn),Tn.forEach(o),Ni=g(O),ke=r(O,"DIV",{class:!0});var En=i(ke);v(Ot.$$.fragment,En),Ci=g(En),Gt=r(En,"P",{});var Ts=i(Gt);Ii=n(Ts,"Generates sequences of token ids for models with a language modeling head using "),wo=r(Ts,"STRONG",{});var rp=i(wo);Bi=n(rp,"beam search decoding"),rp.forEach(o),Wi=n(Ts,` and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.`),Ts.forEach(o),Hi=g(En),v(dt.$$.fragment,En),En.forEach(o),Ri=g(O),ve=r(O,"DIV",{class:!0});var On=i(ve);v(qt.$$.fragment,On),Ui=g(On),St=r(On,"P",{});var Es=i(St);Vi=n(Es,"Generates sequences of token ids for models with a language modeling head using "),Lo=r(Es,"STRONG",{});var ip=i(Lo);Ki=n(ip,`beam search multinomial sampling`),ip.forEach(o),Zi=n(Es," and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models."),Es.forEach(o),Xi=g(On),v(ct.$$.fragment,On),On.forEach(o),Ji=g(O),ye=r(O,"DIV",{class:!0});var Gn=i(ye);v(Ft.$$.fragment,Gn),Qi=g(Gn),At=r(Gn,"P",{});var Os=i(At);Yi=n(Os,"Generates sequences of token ids for models with a language modeling head using "),$o=r(Os,"STRONG",{});var lp=i($o);el=n(lp,`diverse beam search decoding`),lp.forEach(o),tl=n(Os," and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models."),Os.forEach(o),nl=g(Gn),v(pt.$$.fragment,Gn),Gn.forEach(o),ol=g(O),je=r(O,"DIV",{class:!0});var qn=i(je);v(zt.$$.fragment,qn),sl=g(qn),Pt=r(qn,"P",{});var Gs=i(Pt);al=n(Gs,"Generates sequences of token ids for models with a language modeling head using "),To=r(Gs,"STRONG",{});var dp=i(To);rl=n(dp,`constrained beam search decoding`),dp.forEach(o),il=n(Gs," and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models."),Gs.forEach(o),ll=g(qn),v(mt.$$.fragment,qn),qn.forEach(o),O.forEach(o),fs=g(l),Ge=r(l,"H2",{class:!0});var qs=i(Ge);gt=r(qs,"A",{id:!0,class:!0,href:!0});var cp=i(gt);Eo=r(cp,"SPAN",{});var pp=i(Eo);v(Dt.$$.fragment,pp),pp.forEach(o),cp.forEach(o),dl=g(qs),Oo=r(qs,"SPAN",{});var mp=i(Oo);cl=n(mp,"TFGenerationMixin"),mp.forEach(o),qs.forEach(o),bs=g(l),pe=r(l,"DIV",{class:!0});var Sn=i(pe);v(Nt.$$.fragment,Sn),pl=g(Sn),Ct=r(Sn,"P",{});var Ss=i(Ct);ml=n(Ss,"A class containing all of the functions supporting generation, to be used as a mixin in "),yn=r(Ss,"A",{href:!0});var gp=i(yn);gl=n(gp,"TFPreTrainedModel"),gp.forEach(o),_l=n(Ss,"."),Ss.forEach(o),ul=g(Sn),F=r(Sn,"DIV",{class:!0});var le=i(F);v(It.$$.fragment,le),hl=g(le),Go=r(le,"P",{});var _p=i(Go);fl=n(_p,`Generates sequences for models with a language modeling head. The method currently supports greedy decoding, beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling.`),_p.forEach(o),bl=g(le),Bt=r(le,"P",{});var Fs=i(Bt);xl=n(Fs,"Adapted in part from "),Wt=r(Fs,"A",{href:!0,rel:!0});var up=i(Wt);kl=n(up,`Facebook\u2019s XLM beam search code`),up.forEach(o),vl=n(Fs,"."),Fs.forEach(o),yl=g(le),me=r(le,"P",{});var bt=i(me);jl=n(bt,"Apart from "),qo=r(bt,"CODE",{});var hp=i(qo);Ml=n(hp,"input_ids"),hp.forEach(o),wl=n(bt," and "),So=r(bt,"CODE",{});var fp=i(So);Ll=n(fp,"attention_mask"),fp.forEach(o),$l=n(bt,`, all the arguments below will default to the value of the attribute of the same name inside the `),jn=r(bt,"A",{href:!0});var bp=i(jn);Tl=n(bp,"PretrainedConfig"),bp.forEach(o),El=n(bt,` of the model. The default values indicated are the default values of those config.`),bt.forEach(o),Ol=g(le),Ht=r(le,"P",{});var As=i(Ht);Gl=n(As,"Most of these parameters are explained in more detail in "),Rt=r(As,"A",{href:!0,rel:!0});var xp=i(Rt);ql=n(xp,`this blog post`),xp.forEach(o),Sl=n(As,"."),As.forEach(o),Fl=g(le),v(_t.$$.fragment,le),le.forEach(o),Sn.forEach(o),xs=g(l),qe=r(l,"H2",{class:!0});var zs=i(qe);ut=r(zs,"A",{id:!0,class:!0,href:!0});var kp=i(ut);Fo=r(kp,"SPAN",{});var vp=i(Fo);v(Ut.$$.fragment,vp),vp.forEach(o),kp.forEach(o),Al=g(zs),Ao=r(zs,"SPAN",{});var yp=i(Ao);zl=n(yp,"FlaxGenerationMixin"),yp.forEach(o),zs.forEach(o),ks=g(l),N=r(l,"DIV",{class:!0});var Me=i(N);v(Vt.$$.fragment,Me),Pl=g(Me),Kt=r(Me,"P",{});var Ps=i(Kt);Dl=n(Ps,`A class containing all functions for auto-regressive text generation, to be used as a mixin in `),Mn=r(Ps,"A",{href:!0});var jp=i(Mn);Nl=n(jp,"FlaxPreTrainedModel"),jp.forEach(o),Cl=n(Ps,"."),Ps.forEach(o),Il=g(Me),Zt=r(Me,"P",{});var Ds=i(Zt);Bl=n(Ds,"The class exposes "),wn=r(Ds,"A",{href:!0});var Mp=i(wn);Wl=n(Mp,"generate()"),Mp.forEach(o),Hl=n(Ds,", which can be used for:"),Ds.forEach(o),Rl=g(Me),Se=r(Me,"UL",{});var Fn=i(Se);ee=r(Fn,"LI",{});var Xe=i(ee);zo=r(Xe,"EM",{});var wp=i(zo);Ul=n(wp,"greedy decoding"),wp.forEach(o),Vl=n(Xe," by calling "),Po=r(Xe,"CODE",{});var Lp=i(Po);Kl=n(Lp,"_greedy_search()"),Lp.forEach(o),Zl=n(Xe,` if `),Do=r(Xe,"CODE",{});var $p=i(Do);Xl=n($p,"num_beams=1"),$p.forEach(o),Jl=n(Xe," and "),No=r(Xe,"CODE",{});var Tp=i(No);Ql=n(Tp,"do_sample=False"),Tp.forEach(o),Yl=n(Xe,"."),Xe.forEach(o),ed=g(Fn),te=r(Fn,"LI",{});var Je=i(te);Co=r(Je,"EM",{});var Ep=i(Co);td=n(Ep,"multinomial sampling"),Ep.forEach(o),nd=n(Je," by calling "),Io=r(Je,"CODE",{});var Op=i(Io);od=n(Op,"_sample()"),Op.forEach(o),sd=n(Je," if "),Bo=r(Je,"CODE",{});var Gp=i(Bo);ad=n(Gp,"num_beams=1"),Gp.forEach(o),rd=n(Je,` and `),Wo=r(Je,"CODE",{});var qp=i(Wo);id=n(qp,"do_sample=True"),qp.forEach(o),ld=n(Je,"."),Je.forEach(o),dd=g(Fn),ne=r(Fn,"LI",{});var Qe=i(ne);Ho=r(Qe,"EM",{});var Sp=i(Ho);cd=n(Sp,"beam-search decoding"),Sp.forEach(o),pd=n(Qe," by calling "),Ro=r(Qe,"CODE",{});var Fp=i(Ro);md=n(Fp,"~generation_utils.FlaxGenerationMixin._beam_search"),Fp.forEach(o),gd=n(Qe," if "),Uo=r(Qe,"CODE",{});var Ap=i(Uo);_d=n(Ap,"num_beams>1"),Ap.forEach(o),ud=n(Qe,` and `),Vo=r(Qe,"CODE",{});var zp=i(Vo);hd=n(zp,"do_sample=False"),zp.forEach(o),fd=n(Qe,"."),Qe.forEach(o),Fn.forEach(o),bd=g(Me),A=r(Me,"DIV",{class:!0});var de=i(A);v(Xt.$$.fragment,de),xd=g(de),Ko=r(de,"P",{});var Pp=i(Ko);kd=n(Pp,`Generates sequences of token ids for models with a language modeling head. The method supports the following generation methods for text-decoder, text-to-text, speech-to-text, and vision-to-text models:`),Pp.forEach(o),vd=g(de),Fe=r(de,"UL",{});var An=i(Fe);oe=r(An,"LI",{});var Ye=i(oe);Zo=r(Ye,"EM",{});var Dp=i(Zo);yd=n(Dp,"greedy decoding"),Dp.forEach(o),jd=n(Ye," by calling "),Xo=r(Ye,"CODE",{});var Np=i(Xo);Md=n(Np,"_greedy_search()"),Np.forEach(o),wd=n(Ye,` if `),Jo=r(Ye,"CODE",{});var Cp=i(Jo);Ld=n(Cp,"num_beams=1"),Cp.forEach(o),$d=n(Ye," and "),Qo=r(Ye,"CODE",{});var Ip=i(Qo);Td=n(Ip,"do_sample=False"),Ip.forEach(o),Ed=n(Ye,"."),Ye.forEach(o),Od=g(An),se=r(An,"LI",{});var et=i(se);Yo=r(et,"EM",{});var Bp=i(Yo);Gd=n(Bp,"multinomial sampling"),Bp.forEach(o),qd=n(et," by calling "),es=r(et,"CODE",{});var Wp=i(es);Sd=n(Wp,"_sample()"),Wp.forEach(o),Fd=n(et," if "),ts=r(et,"CODE",{});var Hp=i(ts);Ad=n(Hp,"num_beams=1"),Hp.forEach(o),zd=n(et,` and `),ns=r(et,"CODE",{});var Rp=i(ns);Pd=n(Rp,"do_sample=True"),Rp.forEach(o),Dd=n(et,"."),et.forEach(o),Nd=g(An),ae=r(An,"LI",{});var tt=i(ae);os=r(tt,"EM",{});var Up=i(os);Cd=n(Up,"beam-search decoding"),Up.forEach(o),Id=n(tt," by calling "),ss=r(tt,"CODE",{});var Vp=i(ss);Bd=n(Vp,"~generation_utils.FlaxGenerationMixin._beam_search"),Vp.forEach(o),Wd=n(tt," if "),as=r(tt,"CODE",{});var Kp=i(as);Hd=n(Kp,"num_beams>1"),Kp.forEach(o),Rd=n(tt,` and `),rs=r(tt,"CODE",{});var Zp=i(rs);Ud=n(Zp,"do_sample=False"),Zp.forEach(o),Vd=n(tt,"."),tt.forEach(o),An.forEach(o),Kd=g(de),v(ht.$$.fragment,de),Zd=g(de),Jt=r(de,"P",{});var Ns=i(Jt);Xd=n(Ns,"Most of these parameters are explained in more detail in "),Qt=r(Ns,"A",{href:!0,rel:!0});var Xp=i(Qt);Jd=n(Xp,`this blog post`),Xp.forEach(o),Qd=n(Ns,"."),Ns.forEach(o),Yd=g(de),v(ft.$$.fragment,de),de.forEach(o),Me.forEach(o),this.h()},h(){h(d,"name","hf:doc:metadata"),h(d,"content",JSON.stringify(fm)),h(c,"id","generation"),h(c,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(c,"href","#generation"),h(_,"class","relative group"),h(tn,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate"),h(nn,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin"),h(on,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_tf_utils.TFGenerationMixin.generate"),h(sn,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_tf_utils.TFGenerationMixin"),h(an,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_flax_utils.FlaxGenerationMixin.generate"),h(rn,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_flax_utils.FlaxGenerationMixin"),h(nt,"id","transformers.generation_utils.GenerationMixin"),h(nt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(nt,"href","#transformers.generation_utils.GenerationMixin"),h(Oe,"class","relative group"),h(ln,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel"),h(dn,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate"),h(cn,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.greedy_search"),h(pn,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.sample"),h(mn,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_search"),h(gn,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_sample"),h(_n,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.group_beam_search"),h(un,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.constrained_beam_search"),h(hn,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.greedy_search"),h(fn,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.sample"),h(bn,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_search"),h(xn,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_sample"),h(kn,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.group_beam_search"),h(vn,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.constrained_beam_search"),h(wt,"href","https://huggingface.co/blog/how-to-generate"),h(wt,"rel","nofollow"),h(E,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),h(be,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),h(xe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),h(ke,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),h(ve,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),h(ye,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),h(je,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),h(T,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),h(gt,"id","transformers.generation_tf_utils.TFGenerationMixin"),h(gt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(gt,"href","#transformers.generation_tf_utils.TFGenerationMixin"),h(Ge,"class","relative group"),h(yn,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel"),h(Wt,"href","https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529"),h(Wt,"rel","nofollow"),h(jn,"href","/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig"),h(Rt,"href","https://huggingface.co/blog/how-to-generate"),h(Rt,"rel","nofollow"),h(F,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),h(pe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),h(ut,"id","transformers.generation_flax_utils.FlaxGenerationMixin"),h(ut,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(ut,"href","#transformers.generation_flax_utils.FlaxGenerationMixin"),h(qe,"class","relative group"),h(Mn,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.FlaxPreTrainedModel"),h(wn,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_flax_utils.FlaxGenerationMixin.generate"),h(Qt,"href","https://huggingface.co/blog/how-to-generate"),h(Qt,"rel","nofollow"),h(A,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),h(N,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(l,b){e(document.head,d),x(l,f,b),x(l,_,b),e(_,c),e(c,u),y(s,u,null),e(_,p),e(_,C),e(C,q),x(l,I,b),x(l,S,b),e(S,z),e(S,$),e($,we),e(S,Le),x(l,he,b),x(l,fe,b),e(fe,$e),e($e,Is),e($e,tn),e(tn,Bs),e($e,Ws),e($e,nn),e(nn,Hs),e($e,Rs),e(fe,Us),e(fe,Te),e(Te,Vs),e(Te,on),e(on,Ks),e(Te,Zs),e(Te,sn),e(sn,Xs),e(Te,Js),e(fe,Qs),e(fe,Ee),e(Ee,Ys),e(Ee,an),e(an,ea),e(Ee,ta),e(Ee,rn),e(rn,na),e(Ee,oa),x(l,us,b),x(l,Oe,b),e(Oe,nt),e(nt,zn),y(xt,zn,null),e(Oe,sa),e(Oe,Pn),e(Pn,aa),x(l,hs,b),x(l,T,b),y(kt,T,null),e(T,ra),e(T,vt),e(vt,ia),e(vt,ln),e(ln,la),e(vt,da),e(T,ca),e(T,yt),e(yt,pa),e(yt,dn),e(dn,ma),e(yt,ga),e(T,_a),e(T,P),e(P,B),e(B,Dn),e(Dn,ua),e(B,ha),e(B,cn),e(cn,fa),e(B,ba),e(B,Nn),e(Nn,xa),e(B,ka),e(B,Cn),e(Cn,va),e(B,ya),e(P,ja),e(P,W),e(W,In),e(In,Ma),e(W,wa),e(W,pn),e(pn,La),e(W,$a),e(W,Bn),e(Bn,Ta),e(W,Ea),e(W,Wn),e(Wn,Oa),e(W,Ga),e(P,qa),e(P,H),e(H,Hn),e(Hn,Sa),e(H,Fa),e(H,mn),e(mn,Aa),e(H,za),e(H,Rn),e(Rn,Pa),e(H,Da),e(H,Un),e(Un,Na),e(H,Ca),e(P,Ia),e(P,R),e(R,Vn),e(Vn,Ba),e(R,Wa),e(R,gn),e(gn,Ha),e(R,Ra),e(R,Kn),e(Kn,Ua),e(R,Va),e(R,Zn),e(Zn,Ka),e(R,Za),e(P,Xa),e(P,U),e(U,Xn),e(Xn,Ja),e(U,Qa),e(U,_n),e(_n,Ya),e(U,er),e(U,Jn),e(Jn,tr),e(U,nr),e(U,Qn),e(Qn,or),e(U,sr),e(P,ar),e(P,V),e(V,Yn),e(Yn,rr),e(V,ir),e(V,un),e(un,lr),e(V,dr),e(V,eo),e(eo,cr),e(V,pr),e(V,to),e(to,mr),e(V,gr),e(T,_r),e(T,E),y(jt,E,null),e(E,ur),e(E,no),e(no,hr),e(E,fr),e(E,D),e(D,K),e(K,oo),e(oo,br),e(K,xr),e(K,hn),e(hn,kr),e(K,vr),e(K,so),e(so,yr),e(K,jr),e(K,ao),e(ao,Mr),e(K,wr),e(D,Lr),e(D,Z),e(Z,ro),e(ro,$r),e(Z,Tr),e(Z,fn),e(fn,Er),e(Z,Or),e(Z,io),e(io,Gr),e(Z,qr),e(Z,lo),e(lo,Sr),e(Z,Fr),e(D,Ar),e(D,X),e(X,co),e(co,zr),e(X,Pr),e(X,bn),e(bn,Dr),e(X,Nr),e(X,po),e(po,Cr),e(X,Ir),e(X,mo),e(mo,Br),e(X,Wr),e(D,Hr),e(D,J),e(J,go),e(go,Rr),e(J,Ur),e(J,xn),e(xn,Vr),e(J,Kr),e(J,_o),e(_o,Zr),e(J,Xr),e(J,uo),e(uo,Jr),e(J,Qr),e(D,Yr),e(D,Q),e(Q,ho),e(ho,ei),e(Q,ti),e(Q,kn),e(kn,ni),e(Q,oi),e(Q,fo),e(fo,si),e(Q,ai),e(Q,bo),e(bo,ri),e(Q,ii),e(D,li),e(D,Y),e(Y,xo),e(xo,di),e(Y,ci),e(Y,vn),e(vn,pi),e(Y,mi),e(Y,ko),e(ko,gi),e(Y,_i),e(Y,vo),e(vo,ui),e(Y,hi),e(E,fi),y(ot,E,null),e(E,bi),e(E,Mt),e(Mt,xi),e(Mt,wt),e(wt,ki),e(Mt,vi),e(E,yi),e(E,yo),e(yo,ji),e(E,Mi),y(st,E,null),e(E,wi),y(at,E,null),e(E,Li),y(rt,E,null),e(T,$i),e(T,be),y(Lt,be,null),e(be,Ti),e(be,$t),e($t,Ei),e($t,jo),e(jo,Oi),e($t,Gi),e(be,qi),y(it,be,null),e(T,Si),e(T,xe),y(Tt,xe,null),e(xe,Fi),e(xe,Et),e(Et,Ai),e(Et,Mo),e(Mo,zi),e(Et,Pi),e(xe,Di),y(lt,xe,null),e(T,Ni),e(T,ke),y(Ot,ke,null),e(ke,Ci),e(ke,Gt),e(Gt,Ii),e(Gt,wo),e(wo,Bi),e(Gt,Wi),e(ke,Hi),y(dt,ke,null),e(T,Ri),e(T,ve),y(qt,ve,null),e(ve,Ui),e(ve,St),e(St,Vi),e(St,Lo),e(Lo,Ki),e(St,Zi),e(ve,Xi),y(ct,ve,null),e(T,Ji),e(T,ye),y(Ft,ye,null),e(ye,Qi),e(ye,At),e(At,Yi),e(At,$o),e($o,el),e(At,tl),e(ye,nl),y(pt,ye,null),e(T,ol),e(T,je),y(zt,je,null),e(je,sl),e(je,Pt),e(Pt,al),e(Pt,To),e(To,rl),e(Pt,il),e(je,ll),y(mt,je,null),x(l,fs,b),x(l,Ge,b),e(Ge,gt),e(gt,Eo),y(Dt,Eo,null),e(Ge,dl),e(Ge,Oo),e(Oo,cl),x(l,bs,b),x(l,pe,b),y(Nt,pe,null),e(pe,pl),e(pe,Ct),e(Ct,ml),e(Ct,yn),e(yn,gl),e(Ct,_l),e(pe,ul),e(pe,F),y(It,F,null),e(F,hl),e(F,Go),e(Go,fl),e(F,bl),e(F,Bt),e(Bt,xl),e(Bt,Wt),e(Wt,kl),e(Bt,vl),e(F,yl),e(F,me),e(me,jl),e(me,qo),e(qo,Ml),e(me,wl),e(me,So),e(So,Ll),e(me,$l),e(me,jn),e(jn,Tl),e(me,El),e(F,Ol),e(F,Ht),e(Ht,Gl),e(Ht,Rt),e(Rt,ql),e(Ht,Sl),e(F,Fl),y(_t,F,null),x(l,xs,b),x(l,qe,b),e(qe,ut),e(ut,Fo),y(Ut,Fo,null),e(qe,Al),e(qe,Ao),e(Ao,zl),x(l,ks,b),x(l,N,b),y(Vt,N,null),e(N,Pl),e(N,Kt),e(Kt,Dl),e(Kt,Mn),e(Mn,Nl),e(Kt,Cl),e(N,Il),e(N,Zt),e(Zt,Bl),e(Zt,wn),e(wn,Wl),e(Zt,Hl),e(N,Rl),e(N,Se),e(Se,ee),e(ee,zo),e(zo,Ul),e(ee,Vl),e(ee,Po),e(Po,Kl),e(ee,Zl),e(ee,Do),e(Do,Xl),e(ee,Jl),e(ee,No),e(No,Ql),e(ee,Yl),e(Se,ed),e(Se,te),e(te,Co),e(Co,td),e(te,nd),e(te,Io),e(Io,od),e(te,sd),e(te,Bo),e(Bo,ad),e(te,rd),e(te,Wo),e(Wo,id),e(te,ld),e(Se,dd),e(Se,ne),e(ne,Ho),e(Ho,cd),e(ne,pd),e(ne,Ro),e(Ro,md),e(ne,gd),e(ne,Uo),e(Uo,_d),e(ne,ud),e(ne,Vo),e(Vo,hd),e(ne,fd),e(N,bd),e(N,A),y(Xt,A,null),e(A,xd),e(A,Ko),e(Ko,kd),e(A,vd),e(A,Fe),e(Fe,oe),e(oe,Zo),e(Zo,yd),e(oe,jd),e(oe,Xo),e(Xo,Md),e(oe,wd),e(oe,Jo),e(Jo,Ld),e(oe,$d),e(oe,Qo),e(Qo,Td),e(oe,Ed),e(Fe,Od),e(Fe,se),e(se,Yo),e(Yo,Gd),e(se,qd),e(se,es),e(es,Sd),e(se,Fd),e(se,ts),e(ts,Ad),e(se,zd),e(se,ns),e(ns,Pd),e(se,Dd),e(Fe,Nd),e(Fe,ae),e(ae,os),e(os,Cd),e(ae,Id),e(ae,ss),e(ss,Bd),e(ae,Wd),e(ae,as),e(as,Hd),e(ae,Rd),e(ae,rs),e(rs,Ud),e(ae,Vd),e(A,Kd),y(ht,A,null),e(A,Zd),e(A,Jt),e(Jt,Xd),e(Jt,Qt),e(Qt,Jd),e(Jt,Qd),e(A,Yd),y(ft,A,null),vs=!0},p(l,[b]){const Yt={};b&2&&(Yt.$$scope={dirty:b,ctx:l}),ot.$set(Yt);const is={};b&2&&(is.$$scope={dirty:b,ctx:l}),st.$set(is);const ls={};b&2&&(ls.$$scope={dirty:b,ctx:l}),at.$set(ls);const ds={};b&2&&(ds.$$scope={dirty:b,ctx:l}),rt.$set(ds);const en={};b&2&&(en.$$scope={dirty:b,ctx:l}),it.$set(en);const cs={};b&2&&(cs.$$scope={dirty:b,ctx:l}),lt.$set(cs);const Ae={};b&2&&(Ae.$$scope={dirty:b,ctx:l}),dt.$set(Ae);const ze={};b&2&&(ze.$$scope={dirty:b,ctx:l}),ct.$set(ze);const ps={};b&2&&(ps.$$scope={dirty:b,ctx:l}),pt.$set(ps);const ms={};b&2&&(ms.$$scope={dirty:b,ctx:l}),mt.$set(ms);const Pe={};b&2&&(Pe.$$scope={dirty:b,ctx:l}),_t.$set(Pe);const gs={};b&2&&(gs.$$scope={dirty:b,ctx:l}),ht.$set(gs);const _s={};b&2&&(_s.$$scope={dirty:b,ctx:l}),ft.$set(_s)},i(l){vs||(j(s.$$.fragment,l),j(xt.$$.fragment,l),j(kt.$$.fragment,l),j(jt.$$.fragment,l),j(ot.$$.fragment,l),j(st.$$.fragment,l),j(at.$$.fragment,l),j(rt.$$.fragment,l),j(Lt.$$.fragment,l),j(it.$$.fragment,l),j(Tt.$$.fragment,l),j(lt.$$.fragment,l),j(Ot.$$.fragment,l),j(dt.$$.fragment,l),j(qt.$$.fragment,l),j(ct.$$.fragment,l),j(Ft.$$.fragment,l),j(pt.$$.fragment,l),j(zt.$$.fragment,l),j(mt.$$.fragment,l),j(Dt.$$.fragment,l),j(Nt.$$.fragment,l),j(It.$$.fragment,l),j(_t.$$.fragment,l),j(Ut.$$.fragment,l),j(Vt.$$.fragment,l),j(Xt.$$.fragment,l),j(ht.$$.fragment,l),j(ft.$$.fragment,l),vs=!0)},o(l){M(s.$$.fragment,l),M(xt.$$.fragment,l),M(kt.$$.fragment,l),M(jt.$$.fragment,l),M(ot.$$.fragment,l),M(st.$$.fragment,l),M(at.$$.fragment,l),M(rt.$$.fragment,l),M(Lt.$$.fragment,l),M(it.$$.fragment,l),M(Tt.$$.fragment,l),M(lt.$$.fragment,l),M(Ot.$$.fragment,l),M(dt.$$.fragment,l),M(qt.$$.fragment,l),M(ct.$$.fragment,l),M(Ft.$$.fragment,l),M(pt.$$.fragment,l),M(zt.$$.fragment,l),M(mt.$$.fragment,l),M(Dt.$$.fragment,l),M(Nt.$$.fragment,l),M(It.$$.fragment,l),M(_t.$$.fragment,l),M(Ut.$$.fragment,l),M(Vt.$$.fragment,l),M(Xt.$$.fragment,l),M(ht.$$.fragment,l),M(ft.$$.fragment,l),vs=!1},d(l){o(d),l&&o(f),l&&o(_),w(s),l&&o(I),l&&o(S),l&&o(he),l&&o(fe),l&&o(us),l&&o(Oe),w(xt),l&&o(hs),l&&o(T),w(kt),w(jt),w(ot),w(st),w(at),w(rt),w(Lt),w(it),w(Tt),w(lt),w(Ot),w(dt),w(qt),w(ct),w(Ft),w(pt),w(zt),w(mt),l&&o(fs),l&&o(Ge),w(Dt),l&&o(bs),l&&o(pe),w(Nt),w(It),w(_t),l&&o(xs),l&&o(qe),w(Ut),l&&o(ks),l&&o(N),w(Vt),w(Xt),w(ht),w(ft)}}}const fm={local:"generation",sections:[{local:"transformers.generation_utils.GenerationMixin",title:"GenerationMixin"},{local:"transformers.generation_tf_utils.TFGenerationMixin",title:"TFGenerationMixin"},{local:"transformers.generation_flax_utils.FlaxGenerationMixin",title:"FlaxGenerationMixin"}],title:"Generation"};function bm(L){return nm(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class wm extends Qp{constructor(d){super();Yp(this,d,bm,hm,em,{})}}export{wm as default,fm as metadata};
12
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/main_classes/model.mdx-hf-doc-builder.js
import{S as tx,i as ox,s as rx,e as r,k as d,w as u,t as s,M as ax,c as a,d as t,m as l,a as n,x as g,h as i,b as m,G as e,g as $,y as _,q as b,o as v,B as y,v as nx,L as Se}from"../../chunks/vendor-hf-doc-builder.js";import{T as Hn}from"../../chunks/Tip-hf-doc-builder.js";import{D as M}from"../../chunks/Docstring-hf-doc-builder.js";import{C as U}from"../../chunks/CodeBlock-hf-doc-builder.js";import{I as Ne}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{E as Be}from"../../chunks/ExampleCodeBlock-hf-doc-builder.js";function sx(D){let p,x,w,f,k;return f=new U({props:{code:`from transformers import AutoModel model = AutoModel.from_pretrained("bert-base-cased") # Push the model to your namespace with the name "my-finetuned-bert". model.push_to_hub("my-finetuned-bert") # Push the model to an organization with the name "my-finetuned-bert". model.push_to_hub("huggingface/my-finetuned-bert")`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModel model = AutoModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the model to your namespace with the name &quot;my-finetuned-bert&quot;.</span> model.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the model to an organization with the name &quot;my-finetuned-bert&quot;.</span> model.push_to_hub(<span class="hljs-string">&quot;huggingface/my-finetuned-bert&quot;</span>)`}}),{c(){p=r("p"),x=s("Examples:"),w=d(),u(f.$$.fragment)},l(c){p=a(c,"P",{});var T=n(p);x=i(T,"Examples:"),T.forEach(t),w=l(c),g(f.$$.fragment,c)},m(c,T){$(c,p,T),e(p,x),$(c,w,T),_(f,c,T),k=!0},p:Se,i(c){k||(b(f.$$.fragment,c),k=!0)},o(c){v(f.$$.fragment,c),k=!1},d(c){c&&t(p),c&&t(w),y(f,c)}}}function ix(D){let p,x;return{c(){p=r("p"),x=s("Passing `use_auth_token=True\u201C is required when you want to use a private model.")},l(w){p=a(w,"P",{});var f=n(p);x=i(f,"Passing `use_auth_token=True\u201C is required when you want to use a private model."),f.forEach(t)},m(w,f){$(w,p,f),e(p,x)},d(w){w&&t(p)}}}function dx(D){let p,x,w,f,k;return{c(){p=r("p"),x=s("Activate the special "),w=r("a"),f=s("\u201Coffline-mode\u201D"),k=s(` to use this method in a firewalled environment.`),this.h()},l(c){p=a(c,"P",{});var T=n(p);x=i(T,"Activate the special "),w=a(T,"A",{href:!0,rel:!0});var ae=n(w);f=i(ae,"\u201Coffline-mode\u201D"),ae.forEach(t),k=i(T,` to use this method in a firewalled environment.`),T.forEach(t),this.h()},h(){m(w,"href","https://huggingface.co/transformers/installation.html#offline-mode"),m(w,"rel","nofollow")},m(c,T){$(c,p,T),e(p,x),e(p,w),e(w,f),e(p,k)},d(c){c&&t(p)}}}function lx(D){let p,x,w,f,k;return f=new U({props:{code:`from transformers import BertConfig, BertModel # Download model and configuration from huggingface.co and cache. model = BertModel.from_pretrained("bert-base-uncased") # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable). model = BertModel.from_pretrained("./test/saved_model/") # Update configuration during loading. model = BertModel.from_pretrained("bert-base-uncased", output_attentions=True) assert model.config.output_attentions == True # Loading from a TF checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable). config = BertConfig.from_json_file("./tf_model/my_tf_model_config.json") model = BertModel.from_pretrained("./tf_model/my_tf_checkpoint.ckpt.index", from_tf=True, config=config) # Loading from a Flax checkpoint file instead of a PyTorch model (slower) model = BertModel.from_pretrained("bert-base-uncased", from_flax=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertConfig, BertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Model was saved using *save_pretrained(&#x27;./test/saved_model/&#x27;)* (for example purposes, not runnable).</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertModel.from_pretrained(<span class="hljs-string">&quot;./test/saved_model/&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> model.config.output_attentions == <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a TF checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable).</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = BertConfig.from_json_file(<span class="hljs-string">&quot;./tf_model/my_tf_model_config.json&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertModel.from_pretrained(<span class="hljs-string">&quot;./tf_model/my_tf_checkpoint.ckpt.index&quot;</span>, from_tf=<span class="hljs-literal">True</span>, config=config) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a Flax checkpoint file instead of a PyTorch model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>, from_flax=<span class="hljs-literal">True</span>)`}}),{c(){p=r("p"),x=s("Examples:"),w=d(),u(f.$$.fragment)},l(c){p=a(c,"P",{});var T=n(p);x=i(T,"Examples:"),T.forEach(t),w=l(c),g(f.$$.fragment,c)},m(c,T){$(c,p,T),e(p,x),$(c,w,T),_(f,c,T),k=!0},p:Se,i(c){k||(b(f.$$.fragment,c),k=!0)},o(c){v(f.$$.fragment,c),k=!1},d(c){c&&t(p),c&&t(w),y(f,c)}}}function cx(D){let p,x;return{c(){p=r("p"),x=s("This API is experimental and may have some slight breaking changes in the next releases.")},l(w){p=a(w,"P",{});var f=n(p);x=i(f,"This API is experimental and may have some slight breaking changes in the next releases."),f.forEach(t)},m(w,f){$(w,p,f),e(p,x)},d(w){w&&t(p)}}}function mx(D){let p,x,w,f,k;return f=new U({props:{code:`from transformers import TFAutoModel model = TFAutoModel.from_pretrained("bert-base-cased") # Push the model to your namespace with the name "my-finetuned-bert". model.push_to_hub("my-finetuned-bert") # Push the model to an organization with the name "my-finetuned-bert". model.push_to_hub("huggingface/my-finetuned-bert")`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModel model = TFAutoModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the model to your namespace with the name &quot;my-finetuned-bert&quot;.</span> model.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the model to an organization with the name &quot;my-finetuned-bert&quot;.</span> model.push_to_hub(<span class="hljs-string">&quot;huggingface/my-finetuned-bert&quot;</span>)`}}),{c(){p=r("p"),x=s("Examples:"),w=d(),u(f.$$.fragment)},l(c){p=a(c,"P",{});var T=n(p);x=i(T,"Examples:"),T.forEach(t),w=l(c),g(f.$$.fragment,c)},m(c,T){$(c,p,T),e(p,x),$(c,w,T),_(f,c,T),k=!0},p:Se,i(c){k||(b(f.$$.fragment,c),k=!0)},o(c){v(f.$$.fragment,c),k=!1},d(c){c&&t(p),c&&t(w),y(f,c)}}}function px(D){let p,x,w,f,k;return{c(){p=r("p"),x=s("Passing "),w=r("code"),f=s("use_auth_token=True"),k=s(" is required when you want to use a private model.")},l(c){p=a(c,"P",{});var T=n(p);x=i(T,"Passing "),w=a(T,"CODE",{});var ae=n(w);f=i(ae,"use_auth_token=True"),ae.forEach(t),k=i(T," is required when you want to use a private model."),T.forEach(t)},m(c,T){$(c,p,T),e(p,x),e(p,w),e(w,f),e(p,k)},d(c){c&&t(p)}}}function hx(D){let p,x,w,f,k;return f=new U({props:{code:`from transformers import BertConfig, TFBertModel # Download model and configuration from huggingface.co and cache. model = TFBertModel.from_pretrained("bert-base-uncased") # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable). model = TFBertModel.from_pretrained("./test/saved_model/") # Update configuration during loading. model = TFBertModel.from_pretrained("bert-base-uncased", output_attentions=True) assert model.config.output_attentions == True # Loading from a Pytorch model file instead of a TensorFlow checkpoint (slower, for example purposes, not runnable). config = BertConfig.from_json_file("./pt_model/my_pt_model_config.json") model = TFBertModel.from_pretrained("./pt_model/my_pytorch_model.bin", from_pt=True, config=config)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertConfig, TFBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Model was saved using *save_pretrained(&#x27;./test/saved_model/&#x27;)* (for example purposes, not runnable).</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFBertModel.from_pretrained(<span class="hljs-string">&quot;./test/saved_model/&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> model.config.output_attentions == <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a Pytorch model file instead of a TensorFlow checkpoint (slower, for example purposes, not runnable).</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = BertConfig.from_json_file(<span class="hljs-string">&quot;./pt_model/my_pt_model_config.json&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFBertModel.from_pretrained(<span class="hljs-string">&quot;./pt_model/my_pytorch_model.bin&quot;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)`}}),{c(){p=r("p"),x=s("Examples:"),w=d(),u(f.$$.fragment)},l(c){p=a(c,"P",{});var T=n(p);x=i(T,"Examples:"),T.forEach(t),w=l(c),g(f.$$.fragment,c)},m(c,T){$(c,p,T),e(p,x),$(c,w,T),_(f,c,T),k=!0},p:Se,i(c){k||(b(f.$$.fragment,c),k=!0)},o(c){v(f.$$.fragment,c),k=!1},d(c){c&&t(p),c&&t(w),y(f,c)}}}function fx(D){let p,x;return{c(){p=r("p"),x=s("This API is experimental and may have some slight breaking changes in the next releases.")},l(w){p=a(w,"P",{});var f=n(p);x=i(f,"This API is experimental and may have some slight breaking changes in the next releases."),f.forEach(t)},m(w,f){$(w,p,f),e(p,x)},d(w){w&&t(p)}}}function ux(D){let p,x,w,f,k;return f=new U({props:{code:`from transformers import FlaxAutoModel model = FlaxAutoModel.from_pretrained("bert-base-cased") # Push the model to your namespace with the name "my-finetuned-bert". model.push_to_hub("my-finetuned-bert") # Push the model to an organization with the name "my-finetuned-bert". model.push_to_hub("huggingface/my-finetuned-bert")`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaxAutoModel model = FlaxAutoModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the model to your namespace with the name &quot;my-finetuned-bert&quot;.</span> model.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the model to an organization with the name &quot;my-finetuned-bert&quot;.</span> model.push_to_hub(<span class="hljs-string">&quot;huggingface/my-finetuned-bert&quot;</span>)`}}),{c(){p=r("p"),x=s("Examples:"),w=d(),u(f.$$.fragment)},l(c){p=a(c,"P",{});var T=n(p);x=i(T,"Examples:"),T.forEach(t),w=l(c),g(f.$$.fragment,c)},m(c,T){$(c,p,T),e(p,x),$(c,w,T),_(f,c,T),k=!0},p:Se,i(c){k||(b(f.$$.fragment,c),k=!0)},o(c){v(f.$$.fragment,c),k=!1},d(c){c&&t(p),c&&t(w),y(f,c)}}}function gx(D){let p,x,w,f,k;return f=new U({props:{code:`from transformers import BertConfig, FlaxBertModel # Download model and configuration from huggingface.co and cache. model = FlaxBertModel.from_pretrained("bert-base-cased") # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable). model = FlaxBertModel.from_pretrained("./test/saved_model/") # Loading from a PyTorch checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable). config = BertConfig.from_json_file("./pt_model/config.json") model = FlaxBertModel.from_pretrained("./pt_model/pytorch_model.bin", from_pt=True, config=config)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertConfig, FlaxBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Model was saved using *save_pretrained(&#x27;./test/saved_model/&#x27;)* (for example purposes, not runnable).</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;./test/saved_model/&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a PyTorch checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable).</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = BertConfig.from_json_file(<span class="hljs-string">&quot;./pt_model/config.json&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;./pt_model/pytorch_model.bin&quot;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)`}}),{c(){p=r("p"),x=s("Examples:"),w=d(),u(f.$$.fragment)},l(c){p=a(c,"P",{});var T=n(p);x=i(T,"Examples:"),T.forEach(t),w=l(c),g(f.$$.fragment,c)},m(c,T){$(c,p,T),e(p,x),$(c,w,T),_(f,c,T),k=!0},p:Se,i(c){k||(b(f.$$.fragment,c),k=!0)},o(c){v(f.$$.fragment,c),k=!1},d(c){c&&t(p),c&&t(w),y(f,c)}}}function _x(D){let p,x;return{c(){p=r("p"),x=s("This API is experimental and may have some slight breaking changes in the next releases.")},l(w){p=a(w,"P",{});var f=n(p);x=i(f,"This API is experimental and may have some slight breaking changes in the next releases."),f.forEach(t)},m(w,f){$(w,p,f),e(p,x)},d(w){w&&t(p)}}}function bx(D){let p,x,w,f,k;return f=new U({props:{code:`from transformers import FlaxBertModel # load model model = FlaxBertModel.from_pretrained("bert-base-cased") # By default, the model parameters will be in fp32 precision, to cast these to bfloat16 precision model.params = model.to_bf16(model.params) # If you want don't want to cast certain parameters (for example layer norm bias and scale) # then pass the mask as follows from flax import traverse_util model = FlaxBertModel.from_pretrained("bert-base-cased") flat_params = traverse_util.flatten_dict(model.params) mask = { path: (path[-2] != ("LayerNorm", "bias") and path[-2:] != ("LayerNorm", "scale")) for path in flat_params } mask = traverse_util.unflatten_dict(mask) model.params = model.to_bf16(model.params, mask)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaxBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># By default, the model parameters will be in fp32 precision, to cast these to bfloat16 precision</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.params = model.to_bf16(model.params) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># If you want don&#x27;t want to cast certain parameters (for example layer norm bias and scale)</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># then pass the mask as follows</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> flax <span class="hljs-keyword">import</span> traverse_util <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>flat_params = traverse_util.flatten_dict(model.params) <span class="hljs-meta">&gt;&gt;&gt; </span>mask = { <span class="hljs-meta">... </span> path: (path[-<span class="hljs-number">2</span>] != (<span class="hljs-string">&quot;LayerNorm&quot;</span>, <span class="hljs-string">&quot;bias&quot;</span>) <span class="hljs-keyword">and</span> path[-<span class="hljs-number">2</span>:] != (<span class="hljs-string">&quot;LayerNorm&quot;</span>, <span class="hljs-string">&quot;scale&quot;</span>)) <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> path <span class="hljs-keyword">in</span> flat_params <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span>mask = traverse_util.unflatten_dict(mask) <span class="hljs-meta">&gt;&gt;&gt; </span>model.params = model.to_bf16(model.params, mask)`}}),{c(){p=r("p"),x=s("Examples:"),w=d(),u(f.$$.fragment)},l(c){p=a(c,"P",{});var T=n(p);x=i(T,"Examples:"),T.forEach(t),w=l(c),g(f.$$.fragment,c)},m(c,T){$(c,p,T),e(p,x),$(c,w,T),_(f,c,T),k=!0},p:Se,i(c){k||(b(f.$$.fragment,c),k=!0)},o(c){v(f.$$.fragment,c),k=!1},d(c){c&&t(p),c&&t(w),y(f,c)}}}function vx(D){let p,x,w,f,k;return f=new U({props:{code:`from transformers import FlaxBertModel # load model model = FlaxBertModel.from_pretrained("bert-base-cased") # By default, the model params will be in fp32, to cast these to float16 model.params = model.to_fp16(model.params) # If you want don't want to cast certain parameters (for example layer norm bias and scale) # then pass the mask as follows from flax import traverse_util model = FlaxBertModel.from_pretrained("bert-base-cased") flat_params = traverse_util.flatten_dict(model.params) mask = { path: (path[-2] != ("LayerNorm", "bias") and path[-2:] != ("LayerNorm", "scale")) for path in flat_params } mask = traverse_util.unflatten_dict(mask) model.params = model.to_fp16(model.params, mask)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaxBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># By default, the model params will be in fp32, to cast these to float16</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.params = model.to_fp16(model.params) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># If you want don&#x27;t want to cast certain parameters (for example layer norm bias and scale)</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># then pass the mask as follows</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> flax <span class="hljs-keyword">import</span> traverse_util <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>flat_params = traverse_util.flatten_dict(model.params) <span class="hljs-meta">&gt;&gt;&gt; </span>mask = { <span class="hljs-meta">... </span> path: (path[-<span class="hljs-number">2</span>] != (<span class="hljs-string">&quot;LayerNorm&quot;</span>, <span class="hljs-string">&quot;bias&quot;</span>) <span class="hljs-keyword">and</span> path[-<span class="hljs-number">2</span>:] != (<span class="hljs-string">&quot;LayerNorm&quot;</span>, <span class="hljs-string">&quot;scale&quot;</span>)) <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> path <span class="hljs-keyword">in</span> flat_params <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span>mask = traverse_util.unflatten_dict(mask) <span class="hljs-meta">&gt;&gt;&gt; </span>model.params = model.to_fp16(model.params, mask)`}}),{c(){p=r("p"),x=s("Examples:"),w=d(),u(f.$$.fragment)},l(c){p=a(c,"P",{});var T=n(p);x=i(T,"Examples:"),T.forEach(t),w=l(c),g(f.$$.fragment,c)},m(c,T){$(c,p,T),e(p,x),$(c,w,T),_(f,c,T),k=!0},p:Se,i(c){k||(b(f.$$.fragment,c),k=!0)},o(c){v(f.$$.fragment,c),k=!1},d(c){c&&t(p),c&&t(w),y(f,c)}}}function yx(D){let p,x,w,f,k;return f=new U({props:{code:`from transformers import FlaxBertModel # Download model and configuration from huggingface.co model = FlaxBertModel.from_pretrained("bert-base-cased") # By default, the model params will be in fp32, to illustrate the use of this method, # we'll first cast to fp16 and back to fp32 model.params = model.to_f16(model.params) # now cast back to fp32 model.params = model.to_fp32(model.params)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaxBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># By default, the model params will be in fp32, to illustrate the use of this method,</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># we&#x27;ll first cast to fp16 and back to fp32</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.params = model.to_f16(model.params) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># now cast back to fp32</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.params = model.to_fp32(model.params)`}}),{c(){p=r("p"),x=s("Examples:"),w=d(),u(f.$$.fragment)},l(c){p=a(c,"P",{});var T=n(p);x=i(T,"Examples:"),T.forEach(t),w=l(c),g(f.$$.fragment,c)},m(c,T){$(c,p,T),e(p,x),$(c,w,T),_(f,c,T),k=!0},p:Se,i(c){k||(b(f.$$.fragment,c),k=!0)},o(c){v(f.$$.fragment,c),k=!1},d(c){c&&t(p),c&&t(w),y(f,c)}}}function $x(D){let p,x,w,f,k;return f=new U({props:{code:`from transformers import {object_class} {object} = {object_class}.from_pretrained("bert-base-cased") # Push the {object} to your namespace with the name "my-finetuned-bert". {object}.push_to_hub("my-finetuned-bert") # Push the {object} to an organization with the name "my-finetuned-bert". {object}.push_to_hub("huggingface/my-finetuned-bert")`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> {object_class} {<span class="hljs-built_in">object</span>} = {object_class}.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the {object} to your namespace with the name &quot;my-finetuned-bert&quot;.</span> {<span class="hljs-built_in">object</span>}.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the {object} to an organization with the name &quot;my-finetuned-bert&quot;.</span> {<span class="hljs-built_in">object</span>}.push_to_hub(<span class="hljs-string">&quot;huggingface/my-finetuned-bert&quot;</span>)`}}),{c(){p=r("p"),x=s("Examples:"),w=d(),u(f.$$.fragment)},l(c){p=a(c,"P",{});var T=n(p);x=i(T,"Examples:"),T.forEach(t),w=l(c),g(f.$$.fragment,c)},m(c,T){$(c,p,T),e(p,x),$(c,w,T),_(f,c,T),k=!0},p:Se,i(c){k||(b(f.$$.fragment,c),k=!0)},o(c){v(f.$$.fragment,c),k=!1},d(c){c&&t(p),c&&t(w),y(f,c)}}}function wx(D){let p,x,w,f,k,c,T,ae,Bm,jl,Z,Sm,Wa,Wm,Xm,Xa,Vm,Gm,Va,Rm,Hm,Dl,We,Ga,Ym,Jm,Ra,Km,Zm,ql,st,Yn,Qm,ep,Jn,tp,zl,O,op,Ha,rp,ap,Kn,np,sp,Ya,ip,dp,Ja,lp,cp,Ka,mp,pp,Al,Xe,it,Zn,qo,hp,Qn,fp,Cl,F,zo,up,es,gp,_p,Za,Qa,bp,vp,yp,Ao,ts,$p,wp,os,Tp,xp,rs,kp,Pp,G,as,pe,ns,Mp,Ep,en,Fp,jp,tn,Dp,qp,zp,Co,he,ss,Ap,Cp,is,Ip,Lp,ds,Up,Op,Np,Ve,dt,ls,Bp,Sp,on,Wp,Xp,Vp,lt,cs,Gp,Rp,ms,Hp,Yp,Jp,ct,ps,Kp,Zp,hs,Qp,eh,th,fs,mt,us,oh,rh,gs,ah,nh,sh,_s,pt,bs,ih,dh,vs,lh,ch,mh,ys,S,$s,ph,hh,ws,fh,uh,Ts,gh,_h,xs,bh,vh,ks,yh,$h,wh,fe,Io,Th,Lo,xh,Ps,kh,Ph,Mh,ht,Eh,z,Uo,Fh,Ms,jh,Dh,Ge,qh,Es,zh,Ah,Fs,Ch,Ih,Lh,Oo,Uh,js,Oh,Nh,Bh,No,Sh,Ds,Wh,Xh,Vh,ft,Gh,ut,Rh,gt,Hh,qs,rn,zs,Yh,Jh,Kh,As,Zh,Qh,Cs,ef,tf,R,Is,of,rf,Ls,af,nf,Us,sf,df,Os,lf,cf,Ns,mf,pf,Bs,hf,ff,_t,Bo,uf,Ss,gf,_f,bt,So,bf,an,vf,Wo,yf,$f,vt,Xo,wf,Ws,Tf,xf,ue,Vo,kf,Xs,Pf,Mf,Vs,Ef,Ff,ge,Go,jf,Gs,Df,qf,Rs,zf,Af,yt,Ro,Cf,Hs,If,Lf,$t,Ho,Uf,Ys,Of,Nf,wt,Yo,Bf,Js,Sf,Wf,_e,Jo,Xf,Ks,Vf,Gf,Tt,Rf,be,Ko,Hf,Zo,Yf,Zs,Jf,Kf,Zf,Qo,Qf,Qs,eu,tu,ou,xt,er,ru,tr,au,nn,nu,su,iu,kt,or,du,ei,lu,cu,ve,rr,mu,ti,pu,hu,ar,fu,oi,uu,gu,Il,sn,Ll,Re,Pt,ri,nr,_u,ai,bu,Ul,ye,vu,dn,yu,$u,sr,wu,Tu,Ol,Mt,xu,ni,ku,Pu,Nl,ir,Bl,Et,Mu,si,Eu,Fu,Sl,Q,ju,ii,Du,qu,di,zu,Au,li,Cu,Iu,Wl,dr,Xl,Ft,Lu,ci,Uu,Ou,Vl,lr,Gl,cr,Rl,ln,Nu,Hl,mr,Yl,jt,Bu,mi,Su,Wu,Jl,He,Dt,pi,pr,Xu,hi,Vu,Kl,ee,Gu,fi,Ru,Hu,ui,Yu,Ju,gi,Ku,Zu,Zl,hr,Ql,$e,Qu,_i,eg,tg,bi,og,rg,ec,fr,tc,qt,ag,vi,ng,sg,oc,ur,rc,cn,ig,ac,Ye,zt,yi,gr,dg,$i,lg,nc,I,_r,cg,br,mg,wi,pg,hg,fg,we,vr,ug,Ti,gg,_g,Je,bg,xi,vg,yg,ki,$g,wg,Tg,At,yr,xg,Pi,kg,Pg,Ct,$r,Mg,Ke,Eg,Mi,Fg,jg,wr,Dg,qg,zg,It,Tr,Ag,Ei,Cg,Ig,Lt,xr,Lg,Fi,Ug,Og,Ut,kr,Ng,ji,Bg,Sg,Ot,Pr,Wg,Di,Xg,Vg,Nt,Mr,Gg,Ze,Rg,qi,Hg,Yg,mn,Jg,Kg,sc,Qe,Bt,zi,Er,Zg,Ai,Qg,ic,P,Fr,e_,Ci,t_,o_,pn,hn,r_,a_,n_,jr,Ii,s_,i_,Li,d_,l_,Ui,c_,m_,et,Te,Oi,p_,h_,fn,f_,u_,un,g_,__,b_,St,Ni,v_,y_,Bi,$_,w_,T_,W,Si,x_,k_,Wi,P_,M_,Xi,E_,F_,Vi,j_,D_,Gi,q_,z_,A_,xe,Dr,C_,qr,I_,Ri,L_,U_,O_,Wt,N_,Xt,zr,B_,Hi,S_,W_,Vt,Ar,X_,Cr,V_,Yi,G_,R_,H_,N,Ir,Y_,Ji,J_,K_,Lr,Z_,Ki,Q_,eb,tb,Ur,ob,Zi,rb,ab,nb,Gt,sb,Rt,ib,Ht,Or,db,Qi,lb,cb,Yt,Nr,mb,ed,pb,hb,Jt,Br,fb,td,ub,gb,Kt,Sr,_b,od,bb,vb,Zt,Wr,yb,rd,$b,wb,Qt,Xr,Tb,ad,xb,kb,eo,Vr,Pb,nd,Mb,Eb,to,Gr,Fb,H,jb,Rr,Db,qb,sd,zb,Ab,id,Cb,Ib,dd,Lb,Ub,Ob,oo,Hr,Nb,ld,Bb,Sb,ke,Yr,Wb,cd,Xb,Vb,ro,Gb,Pe,Jr,Rb,Kr,Hb,md,Yb,Jb,Kb,Zr,Zb,pd,Qb,ev,tv,ao,Qr,ov,ea,rv,gn,av,nv,sv,no,ta,iv,hd,dv,lv,so,oa,cv,fd,mv,pv,io,ra,hv,ud,fv,uv,lo,aa,gv,gd,_v,bv,co,na,vv,_d,yv,$v,mo,sa,wv,ia,Tv,bd,xv,kv,Pv,po,da,Mv,la,Ev,vd,Fv,jv,dc,tt,ho,yd,ca,Dv,$d,qv,lc,ne,ma,zv,pa,Av,wd,Cv,Iv,Lv,fo,ha,Uv,Td,Ov,cc,ot,uo,xd,fa,Nv,kd,Bv,mc,q,ua,Sv,Pd,Wv,Xv,_n,bn,Vv,Gv,Rv,Md,Hv,Yv,rt,Me,Ed,Jv,Kv,vn,Zv,Qv,yn,ey,ty,oy,go,Fd,ry,ay,jd,ny,sy,iy,X,Dd,dy,ly,qd,cy,my,zd,py,hy,Ad,fy,uy,Cd,gy,_y,by,Ee,ga,vy,_a,yy,Id,$y,wy,Ty,_o,xy,V,ba,ky,Ld,Py,My,va,Ey,Ud,Fy,jy,Dy,ya,qy,Od,zy,Ay,Cy,bo,Iy,Fe,$a,Ly,wa,Uy,Nd,Oy,Ny,By,Bd,Sy,Wy,je,Ta,Xy,Sd,Vy,Gy,vo,Ry,yo,xa,Hy,ka,Yy,Wd,Jy,Ky,Zy,te,Pa,Qy,Y,e1,Xd,t1,o1,Vd,r1,a1,Gd,n1,s1,Rd,i1,d1,l1,Hd,c1,m1,$o,p1,oe,Ma,h1,J,f1,Yd,u1,g1,Jd,_1,b1,Kd,v1,y1,Zd,$1,w1,T1,Qd,x1,k1,wo,P1,De,Ea,M1,K,E1,el,F1,j1,tl,D1,q1,ol,z1,A1,rl,C1,I1,L1,To,pc,at,xo,al,Fa,U1,nl,O1,hc,se,ja,N1,sl,B1,S1,qe,Da,W1,qa,X1,il,V1,G1,R1,ko,fc,nt,Po,dl,za,H1,ll,Y1,uc,ie,Aa,J1,Ca,K1,Ia,cl,Z1,Q1,e2,ml,t2,gc;return c=new Ne({}),qo=new Ne({}),zo=new M({props:{name:"class transformers.PreTrainedModel",anchor:"transformers.PreTrainedModel",parameters:[{name:"config",val:": PretrainedConfig"},{name:"*inputs",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L906"}}),Io=new M({props:{name:"push_to_hub",anchor:"transformers.PreTrainedModel.push_to_hub",parameters:[{name:"repo_id",val:": str"},{name:"use_temp_dir",val:": typing.Optional[bool] = None"},{name:"commit_message",val:": typing.Optional[str] = None"},{name:"private",val:": typing.Optional[bool] = None"},{name:"use_auth_token",val:": typing.Union[bool, str, NoneType] = None"},{name:"max_shard_size",val:": typing.Union[int, str, NoneType] = '10GB'"},{name:"create_pr",val:": bool = False"},{name:"**deprecated_kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedModel.push_to_hub.repo_id",description:`<strong>repo_id</strong> (<code>str</code>) &#x2014; The name of the repository you want to push your model to. It should contain your organization name when pushing to a given organization.`,name:"repo_id"},{anchor:"transformers.PreTrainedModel.push_to_hub.use_temp_dir",description:`<strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to use a temporary directory to store the files saved before they are pushed to the Hub. Will default to <code>True</code> if there is no directory named like <code>repo_id</code>, <code>False</code> otherwise.`,name:"use_temp_dir"},{anchor:"transformers.PreTrainedModel.push_to_hub.commit_message",description:`<strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;Upload model&quot;</code>.`,name:"commit_message"},{anchor:"transformers.PreTrainedModel.push_to_hub.private",description:`<strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).`,name:"private"},{anchor:"transformers.PreTrainedModel.push_to_hub.use_auth_token",description:`<strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>huggingface-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.`,name:"use_auth_token"},{anchor:"transformers.PreTrainedModel.push_to_hub.max_shard_size",description:`<strong>max_shard_size</strong> (<code>int</code> or <code>str</code>, <em>optional</em>, defaults to <code>&quot;10GB&quot;</code>) &#x2014; Only applicable for models. The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like <code>&quot;5MB&quot;</code>).`,name:"max_shard_size"},{anchor:"transformers.PreTrainedModel.push_to_hub.create_pr",description:`<strong>create_pr</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to create a PR with the uploaded files or directly commit.`,name:"create_pr"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/hub.py#L712"}}),ht=new Be({props:{anchor:"transformers.PreTrainedModel.push_to_hub.example",$$slots:{default:[sx]},$$scope:{ctx:D}}}),Uo=new M({props:{name:"from_pretrained",anchor:"transformers.PreTrainedModel.from_pretrained",parameters:[{name:"pretrained_model_name_or_path",val:": typing.Union[str, os.PathLike, NoneType]"},{name:"*model_args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedModel.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>, <em>optional</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <code>./tf_model/model.ckpt.index</code>). In this case, <code>from_tf</code> should be set to <code>True</code> and a configuration object should be provided as <code>config</code> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> <li>A path or url to a model folder containing a <em>flax checkpoint file</em> in <em>.msgpack</em> format (e.g, <code>./flax_model/</code> containing <code>flax_model.msgpack</code>). In this case, <code>from_flax</code> should be set to <code>True</code>.</li> <li><code>None</code> if you are both providing the configuration and state dictionary (resp. with keyword arguments <code>config</code> and <code>state_dict</code>).</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.PreTrainedModel.from_pretrained.model_args",description:`<strong>model_args</strong> (sequence of positional arguments, <em>optional</em>) &#x2014; All remaining positional arguments will be passed to the underlying model&#x2019;s <code>__init__</code> method.`,name:"model_args"},{anchor:"transformers.PreTrainedModel.from_pretrained.config",description:`<strong>config</strong> (<code>Union[PretrainedConfig, str, os.PathLike]</code>, <em>optional</em>) &#x2014; Can be either:</p> <ul> <li>an instance of a class derived from <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>,</li> <li>a string or path valid as input to <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig.from_pretrained">from_pretrained()</a>.</li> </ul> <p>Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.save_pretrained">save_pretrained()</a> and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <code>pretrained_model_name_or_path</code> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.PreTrainedModel.from_pretrained.state_dict",description:`<strong>state_dict</strong> (<code>Dict[str, torch.Tensor]</code>, <em>optional</em>) &#x2014; A state dictionary to use instead of a state dictionary loaded from saved weights file.</p> <p>This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.save_pretrained">save_pretrained()</a> and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> is not a simpler option.`,name:"state_dict"},{anchor:"transformers.PreTrainedModel.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<code>Union[str, os.PathLike]</code>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.PreTrainedModel.from_pretrained.from_tf",description:`<strong>from_tf</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Load the model weights from a TensorFlow checkpoint save file (see docstring of <code>pretrained_model_name_or_path</code> argument).`,name:"from_tf"},{anchor:"transformers.PreTrainedModel.from_pretrained.from_flax",description:`<strong>from_flax</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Load the model weights from a Flax checkpoint save file (see docstring of <code>pretrained_model_name_or_path</code> argument).`,name:"from_flax"},{anchor:"transformers.PreTrainedModel.from_pretrained.ignore_mismatched_sizes",description:`<strong>ignore_mismatched_sizes</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to raise an error if some of the weights from the checkpoint do not have the same size as the weights of the model (if for instance, you are instantiating a model with 10 labels from a checkpoint with 3 labels).`,name:"ignore_mismatched_sizes"},{anchor:"transformers.PreTrainedModel.from_pretrained.force_download",description:`<strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.PreTrainedModel.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.PreTrainedModel.from_pretrained.proxies",description:`<strong>proxies</strong> (<code>Dict[str, str]</code>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <code>{&apos;http&apos;: &apos;foo.bar:3128&apos;, &apos;http://hostname&apos;: &apos;foo.bar:4012&apos;}</code>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.PreTrainedModel.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<code>bool</code>,</strong> <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"transformers.PreTrainedModel.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<code>bool</code>,</strong> <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to only look at local files (i.e., do not try to download the model).`,name:"local_files_only(bool,"},{anchor:"transformers.PreTrainedModel.from_pretrained.use_auth_token",description:`<strong>use_auth_token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>huggingface-cli login</code> (stored in <code>~/.huggingface</code>).`,name:"use_auth_token"},{anchor:"transformers.PreTrainedModel.from_pretrained.revision",description:`<strong>revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;main&quot;</code>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <code>revision</code> can be any identifier allowed by git.`,name:"revision"},{anchor:"transformers.PreTrainedModel.from_pretrained.mirror",description:`<strong>mirror</strong> (<code>str</code>, <em>optional</em>) &#x2014; Mirror source to accelerate downloads in China. If you are from China and have an accessibility problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. Please refer to the mirror site for more information.`,name:"mirror"},{anchor:"transformers.PreTrainedModel.from_pretrained._fast_init(bool,",description:`<strong>_fast_init(<code>bool</code>,</strong> <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to disable fast initialization.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>One should only disable <em>_fast_init</em> to ensure backwards compatibility with <code>transformers.__version__ &lt; 4.6.0</code> for seeded model initialization. This argument will be removed at the next major version. See <a href="https://github.com/huggingface/transformers/pull/11471" rel="nofollow">pull request 11471</a> for more information.</p> </div>`,name:"_fast_init(bool,"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L1661",parameterGroups:[{title:"Parameters for big model inference",parametersDescription:[{anchor:"transformers.PreTrainedModel.from_pretrained.low_cpu_mem_usage(bool,",description:`<strong>low_cpu_mem_usage(<code>bool</code>,</strong> <em>optional</em>) &#x2014; Tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. This is an experimental feature and a subject to change at any moment.`,name:"low_cpu_mem_usage(bool,"},{anchor:"transformers.PreTrainedModel.from_pretrained.torch_dtype",description:`<strong>torch_dtype</strong> (<code>str</code> or <code>torch.dtype</code>, <em>optional</em>) &#x2014; Override the default <code>torch.dtype</code> and load the model under this dtype. If <code>&quot;auto&quot;</code> is passed the dtype will be automatically derived from the model&#x2019;s weights.`,name:"torch_dtype"},{anchor:"transformers.PreTrainedModel.from_pretrained.device_map",description:`<strong>device_map</strong> (<code>str</code> or <code>Dict[str, Union[int, str, torch.device]]</code>, <em>optional</em>) &#x2014; A map that specifies where each submodule should go. It doesn&#x2019;t need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device.</p> <p>To have Accelerate compute the most optimized <code>device_map</code> automatically, set <code>device_map=&quot;auto&quot;</code>. For more information about each option see <a href="https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map" rel="nofollow">designing a device map</a>.`,name:"device_map"},{anchor:"transformers.PreTrainedModel.from_pretrained.max_memory",description:`<strong>max_memory</strong> (<code>Dict</code>, <em>optional</em>) &#x2014; A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset.`,name:"max_memory"},{anchor:"transformers.PreTrainedModel.from_pretrained.offload_folder",description:`<strong>offload_folder</strong> (<code>str</code> or <code>os.PathLike</code>, <em>optional</em>) &#x2014; If the <code>device_map</code> contains any value <code>&quot;disk&quot;</code>, the folder where we will offload weights.`,name:"offload_folder"},{anchor:"transformers.PreTrainedModel.from_pretrained.offload_state_dict",description:`<strong>offload_state_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If <code>True</code>, will temporarily offload the CPU state dict to the hard drive to avoid getting out of CPU RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to <code>True</code> when there is some disk offload.`,name:"offload_state_dict"},{anchor:"transformers.PreTrainedModel.from_pretrained.load_in_8bit",description:`<strong>load_in_8bit</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If <code>True</code>, will convert the loaded model into mixed-8bit quantized model. To use this feature please install <code>bitsandbytes</code> compiled with your CUDA version by running <code>pip install -i https://test.pypi.org/simple/ bitsandbytes-cudaXXX</code> where XXX is your CUDA version (e.g. 11.6 = 116). Make also sure that you have enough GPU RAM to store half of the model size since the 8bit modules are not compiled and adapted for CPUs.`,name:"load_in_8bit"},{anchor:"transformers.PreTrainedModel.from_pretrained.load_in_8bit_threshold",description:`<strong>load_in_8bit_threshold</strong> (<code>float</code>, <em>optional</em>, defaults to 6) &#x2014; Works together with <code>load_in_8bit</code>. This corresponds to the outlier threshold for outlier detection as described in <code>GPT3.int8() : 8-bit Matrix Multiplication for Transformers at Scale</code> paper. Any hidden states value that is above this threshold will be considered an outlier and the operation on those values will be done in fp16. Values are usually normally distributed, that is, most values are in the range [-3.5, 3.5], but there are some exceptional systematic outliers that are very differently distributed for large models. These outliers are often in the interval [-60, -6] or [6, 60]. Int8 quantization works well for values of magnitude ~5, but beyond that, there is a significant performance penalty. A good default threshold is 6, but a lower threshold might be needed for more unstable models (small models, fine-tuning).`,name:"load_in_8bit_threshold"},{anchor:"transformers.PreTrainedModel.from_pretrained.load_in_8bit_skip_modules",description:`<strong>load_in_8bit_skip_modules</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; An explicit list of the modules that we do not want to convert in 8-bit. This is useful for models such as Jukebox that has several heads in different places and not necessarily at the last position.`,name:"load_in_8bit_skip_modules"},{anchor:"transformers.PreTrainedModel.from_pretrained.subfolder",description:`<strong>subfolder</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&quot;</code>) &#x2014; In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here.`,name:"subfolder"},{anchor:"transformers.PreTrainedModel.from_pretrained.kwargs",description:`<strong>kwargs</strong> (remaining dictionary of keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <code>output_attentions=True</code>). Behaves differently depending on whether a <code>config</code> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <code>config</code>, <code>**kwargs</code> will be directly passed to the underlying model&#x2019;s <code>__init__</code> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <code>kwargs</code> will be first passed to the configuration class initialization function (<a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig.from_pretrained">from_pretrained()</a>). Each key of <code>kwargs</code> that corresponds to a configuration attribute will be used to override said attribute with the supplied <code>kwargs</code> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <code>__init__</code> function.</li> </ul>`,name:"kwargs"}]}]}}),ft=new Hn({props:{$$slots:{default:[ix]},$$scope:{ctx:D}}}),ut=new Hn({props:{$$slots:{default:[dx]},$$scope:{ctx:D}}}),gt=new Be({props:{anchor:"transformers.PreTrainedModel.from_pretrained.example",$$slots:{default:[lx]},$$scope:{ctx:D}}}),Bo=new M({props:{name:"get_input_embeddings",anchor:"transformers.PreTrainedModel.get_input_embeddings",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L1061",returnDescription:` <p>A torch module mapping vocabulary to hidden states.</p> `,returnType:` <p><code>nn.Module</code></p> `}}),So=new M({props:{name:"get_memory_footprint",anchor:"transformers.PreTrainedModel.get_memory_footprint",parameters:[{name:"return_buffers",val:" = True"}],parametersDescription:[{anchor:"transformers.PreTrainedModel.get_memory_footprint.return_buffers",description:`<strong>return_buffers</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to return the size of the buffer tensors in the computation of the memory footprint. Buffers are tensors that do not require gradients and not registered as parameters. E.g. mean and std in batch norm layers. Please see: <a href="https://discuss.pytorch.org/t/what-pytorch-means-by-buffers/120266/2" rel="nofollow">https://discuss.pytorch.org/t/what-pytorch-means-by-buffers/120266/2</a>`,name:"return_buffers"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L1643"}}),Xo=new M({props:{name:"get_output_embeddings",anchor:"transformers.PreTrainedModel.get_output_embeddings",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L1087",returnDescription:` <p>A torch module mapping hidden states to vocabulary.</p> `,returnType:` <p><code>nn.Module</code></p> `}}),Vo=new M({props:{name:"gradient_checkpointing_disable",anchor:"transformers.PreTrainedModel.gradient_checkpointing_disable",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L1471"}}),Go=new M({props:{name:"gradient_checkpointing_enable",anchor:"transformers.PreTrainedModel.gradient_checkpointing_enable",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L1460"}}),Ro=new M({props:{name:"init_weights",anchor:"transformers.PreTrainedModel.init_weights",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L1427"}}),Ho=new M({props:{name:"post_init",anchor:"transformers.PreTrainedModel.post_init",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L980"}}),Yo=new M({props:{name:"prune_heads",anchor:"transformers.PreTrainedModel.prune_heads",parameters:[{name:"heads_to_prune",val:": typing.Dict[int, typing.List[int]]"}],parametersDescription:[{anchor:"transformers.PreTrainedModel.prune_heads.heads_to_prune",description:`<strong>heads_to_prune</strong> (<code>Dict[int, List[int]]</code>) &#x2014; Dictionary with keys being selected layer indices (<code>int</code>) and associated values being the list of heads to prune in said layer (list of <code>int</code>). For instance {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.`,name:"heads_to_prune"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L1443"}}),Jo=new M({props:{name:"register_for_auto_class",anchor:"transformers.PreTrainedModel.register_for_auto_class",parameters:[{name:"auto_class",val:" = 'AutoModel'"}],parametersDescription:[{anchor:"transformers.PreTrainedModel.register_for_auto_class.auto_class",description:`<strong>auto_class</strong> (<code>str</code> or <code>type</code>, <em>optional</em>, defaults to <code>&quot;AutoModel&quot;</code>) &#x2014; The auto class to register this new model with.`,name:"auto_class"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L2680"}}),Tt=new Hn({props:{warning:!0,$$slots:{default:[cx]},$$scope:{ctx:D}}}),Ko=new M({props:{name:"resize_token_embeddings",anchor:"transformers.PreTrainedModel.resize_token_embeddings",parameters:[{name:"new_num_tokens",val:": typing.Optional[int] = None"}],parametersDescription:[{anchor:"transformers.PreTrainedModel.resize_token_embeddings.new_num_tokens",description:`<strong>new_num_tokens</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of new tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or <code>None</code>, just returns a pointer to the input tokens <code>torch.nn.Embedding</code> module of the model without doing anything.`,name:"new_num_tokens"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L1218",returnDescription:` <p>Pointer to the input tokens Embeddings Module of the model.</p> `,returnType:` <p><code>torch.nn.Embedding</code></p> `}}),er=new M({props:{name:"save_pretrained",anchor:"transformers.PreTrainedModel.save_pretrained",parameters:[{name:"save_directory",val:": typing.Union[str, os.PathLike]"},{name:"is_main_process",val:": bool = True"},{name:"state_dict",val:": typing.Optional[dict] = None"},{name:"save_function",val:": typing.Callable = <function save at 0x7f045fb23ca0>"},{name:"push_to_hub",val:": bool = False"},{name:"max_shard_size",val:": typing.Union[int, str] = '10GB'"},{name:"safe_serialization",val:": bool = False"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedModel.save_pretrained.save_directory",description:`<strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Directory to which to save. Will be created if it doesn&#x2019;t exist.`,name:"save_directory"},{anchor:"transformers.PreTrainedModel.save_pretrained.is_main_process",description:`<strong>is_main_process</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether the process calling this is the main process or not. Useful when in distributed training like TPUs and need to call this function on all processes. In this case, set <code>is_main_process=True</code> only on the main process to avoid race conditions.`,name:"is_main_process"},{anchor:"transformers.PreTrainedModel.save_pretrained.state_dict",description:`<strong>state_dict</strong> (nested dictionary of <code>torch.Tensor</code>) &#x2014; The state dictionary of the model to save. Will default to <code>self.state_dict()</code>, but can be used to only save parts of the model or if special precautions need to be taken when recovering the state dictionary of a model (like when using model parallelism).`,name:"state_dict"},{anchor:"transformers.PreTrainedModel.save_pretrained.save_function",description:`<strong>save_function</strong> (<code>Callable</code>) &#x2014; The function to use to save the state dictionary. Useful on distributed training like TPUs when one need to replace <code>torch.save</code> by another method.`,name:"save_function"},{anchor:"transformers.PreTrainedModel.save_pretrained.push_to_hub",description:`<strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with <code>repo_id</code> (will default to the name of <code>save_directory</code> in your namespace).`,name:"push_to_hub"},{anchor:"transformers.PreTrainedModel.save_pretrained.max_shard_size",description:`<strong>max_shard_size</strong> (<code>int</code> or <code>str</code>, <em>optional</em>, defaults to <code>&quot;10GB&quot;</code>) &#x2014; The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like <code>&quot;5MB&quot;</code>).</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>If a single weight of the model is bigger than <code>max_shard_size</code>, it will be in its own checkpoint shard which will be bigger than <code>max_shard_size</code>.</p> </div>`,name:"max_shard_size"},{anchor:"transformers.PreTrainedModel.save_pretrained.safe_serialization",description:`<strong>safe_serialization</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to save the model using <code>safetensors</code> or the traditional PyTorch way (that uses <code>pickle</code>).</p> <p>kwargs &#x2014; Additional key word arguments passed along to the <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.push_to_hub">push_to_hub()</a> method.`,name:"safe_serialization"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L1491"}}),or=new M({props:{name:"set_input_embeddings",anchor:"transformers.PreTrainedModel.set_input_embeddings",parameters:[{name:"value",val:": Module"}],parametersDescription:[{anchor:"transformers.PreTrainedModel.set_input_embeddings.value",description:"<strong>value</strong> (<code>nn.Module</code>) &#x2014; A module mapping vocabulary to hidden states.",name:"value"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L1074"}}),rr=new M({props:{name:"tie_weights",anchor:"transformers.PreTrainedModel.tie_weights",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L1102"}}),nr=new Ne({}),ir=new U({props:{code:`from transformers import AutoModelForSeq2SeqLM t0pp = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp", low_cpu_mem_usage=True)`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSeq2SeqLM t0pp = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;bigscience/T0pp&quot;</span>, low_cpu_mem_usage=<span class="hljs-literal">True</span>)`}}),dr=new U({props:{code:`from transformers import AutoModelForSeq2SeqLM t0pp = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp", device_map="auto")`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSeq2SeqLM t0pp = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;bigscience/T0pp&quot;</span>, device_map=<span class="hljs-string">&quot;auto&quot;</span>)`}}),lr=new U({props:{code:"t0pp.hf_device_map",highlighted:"t0pp.hf_device_map"}}),cr=new U({props:{code:`{'shared': 0, 'decoder.embed_tokens': 0, 'encoder': 0, 'decoder.block.0': 0, 'decoder.block.1': 1, 'decoder.block.2': 1, 'decoder.block.3': 1, 'decoder.block.4': 1, 'decoder.block.5': 1, 'decoder.block.6': 1, 'decoder.block.7': 1, 'decoder.block.8': 1, 'decoder.block.9': 1, 'decoder.block.10': 1, 'decoder.block.11': 1, 'decoder.block.12': 1, 'decoder.block.13': 1, 'decoder.block.14': 1, 'decoder.block.15': 1, 'decoder.block.16': 1, 'decoder.block.17': 1, 'decoder.block.18': 1, 'decoder.block.19': 1, 'decoder.block.20': 1, 'decoder.block.21': 1, 'decoder.block.22': 'cpu', 'decoder.block.23': 'cpu', 'decoder.final_layer_norm': 'cpu', 'decoder.dropout': 'cpu', 'lm_head': 'cpu'}`,highlighted:`{<span class="hljs-string">&#x27;shared&#x27;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&#x27;decoder.embed_tokens&#x27;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&#x27;encoder&#x27;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&#x27;decoder.block.0&#x27;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&#x27;decoder.block.1&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.2&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.3&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.4&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.5&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.6&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.7&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.8&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.9&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.10&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.11&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.12&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.13&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.14&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.15&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.16&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.17&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.18&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.19&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.20&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.21&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.22&#x27;</span>: <span class="hljs-string">&#x27;cpu&#x27;</span>, <span class="hljs-string">&#x27;decoder.block.23&#x27;</span>: <span class="hljs-string">&#x27;cpu&#x27;</span>, <span class="hljs-string">&#x27;decoder.final_layer_norm&#x27;</span>: <span class="hljs-string">&#x27;cpu&#x27;</span>, <span class="hljs-string">&#x27;decoder.dropout&#x27;</span>: <span class="hljs-string">&#x27;cpu&#x27;</span>, <span class="hljs-string">&#x27;lm_head&#x27;</span>: <span class="hljs-string">&#x27;cpu&#x27;</span>}`}}),mr=new U({props:{code:'device_map = {"shared": 0, "encoder": 0, "decoder": 1, "lm_head": 1}',highlighted:'device_map = {<span class="hljs-string">&quot;shared&quot;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&quot;encoder&quot;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&quot;decoder&quot;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&quot;lm_head&quot;</span>: <span class="hljs-number">1</span>}'}}),pr=new Ne({}),hr=new U({props:{code:'model = T5ForConditionalGeneration.from_pretrained("t5", torch_dtype=torch.float16)',highlighted:'model = T5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;t5&quot;</span>, torch_dtype=torch.float16)'}}),fr=new U({props:{code:'model = T5ForConditionalGeneration.from_pretrained("t5", torch_dtype="auto")',highlighted:'model = T5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;t5&quot;</span>, torch_dtype=<span class="hljs-string">&quot;auto&quot;</span>)'}}),ur=new U({props:{code:`config = T5Config.from_pretrained("t5") model = AutoModel.from_config(config)`,highlighted:`config = T5Config.from_pretrained(<span class="hljs-string">&quot;t5&quot;</span>) model = AutoModel.from_config(config)`}}),gr=new Ne({}),_r=new M({props:{name:"class transformers.modeling_utils.ModuleUtilsMixin",anchor:"transformers.modeling_utils.ModuleUtilsMixin",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L623"}}),vr=new M({props:{name:"add_memory_hooks",anchor:"transformers.modeling_utils.ModuleUtilsMixin.add_memory_hooks",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L654"}}),yr=new M({props:{name:"estimate_tokens",anchor:"transformers.modeling_utils.ModuleUtilsMixin.estimate_tokens",parameters:[{name:"input_dict",val:": typing.Dict[str, typing.Union[torch.Tensor, typing.Any]]"}],parametersDescription:[{anchor:"transformers.modeling_utils.ModuleUtilsMixin.estimate_tokens.inputs",description:"<strong>inputs</strong> (<code>dict</code>) &#x2014; The model inputs.",name:"inputs"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L858",returnDescription:` <p>The total number of tokens.</p> `,returnType:` <p><code>int</code></p> `}}),$r=new M({props:{name:"floating_point_ops",anchor:"transformers.modeling_utils.ModuleUtilsMixin.floating_point_ops",parameters:[{name:"input_dict",val:": typing.Dict[str, typing.Union[torch.Tensor, typing.Any]]"},{name:"exclude_embeddings",val:": bool = True"}],parametersDescription:[{anchor:"transformers.modeling_utils.ModuleUtilsMixin.floating_point_ops.batch_size",description:`<strong>batch_size</strong> (<code>int</code>) &#x2014; The batch size for the forward pass.`,name:"batch_size"},{anchor:"transformers.modeling_utils.ModuleUtilsMixin.floating_point_ops.sequence_length",description:`<strong>sequence_length</strong> (<code>int</code>) &#x2014; The number of tokens in each line of the batch.`,name:"sequence_length"},{anchor:"transformers.modeling_utils.ModuleUtilsMixin.floating_point_ops.exclude_embeddings",description:`<strong>exclude_embeddings</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to count embedding and softmax operations.`,name:"exclude_embeddings"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L879",returnDescription:` <p>The number of floating-point operations.</p> `,returnType:` <p><code>int</code></p> `}}),Tr=new M({props:{name:"get_extended_attention_mask",anchor:"transformers.modeling_utils.ModuleUtilsMixin.get_extended_attention_mask",parameters:[{name:"attention_mask",val:": Tensor"},{name:"input_shape",val:": typing.Tuple[int]"},{name:"device",val:": <property object at 0x7f04003b2810> = None"},{name:"dtype",val:": torch.float32 = None"}],parametersDescription:[{anchor:"transformers.modeling_utils.ModuleUtilsMixin.get_extended_attention_mask.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code>) &#x2014; Mask with ones indicating tokens to attend to, zeros for tokens to ignore.`,name:"attention_mask"},{anchor:"transformers.modeling_utils.ModuleUtilsMixin.get_extended_attention_mask.input_shape",description:`<strong>input_shape</strong> (<code>Tuple[int]</code>) &#x2014; The shape of the input to the model.`,name:"input_shape"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L742",returnDescription:` <p><code>torch.Tensor</code> The extended attention mask, with a the same dtype as <code>attention_mask.dtype</code>.</p> `}}),xr=new M({props:{name:"get_head_mask",anchor:"transformers.modeling_utils.ModuleUtilsMixin.get_head_mask",parameters:[{name:"head_mask",val:": typing.Optional[torch.Tensor]"},{name:"num_hidden_layers",val:": int"},{name:"is_attention_chunked",val:": bool = False"}],parametersDescription:[{anchor:"transformers.modeling_utils.ModuleUtilsMixin.get_head_mask.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> with shape <code>[num_heads]</code> or <code>[num_hidden_layers x num_heads]</code>, <em>optional</em>) &#x2014; The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard).`,name:"head_mask"},{anchor:"transformers.modeling_utils.ModuleUtilsMixin.get_head_mask.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>) &#x2014; The number of hidden layers in the model. is_attention_chunked &#x2014; (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>): Whether or not the attentions scores are computed by chunks or not.`,name:"num_hidden_layers"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L794",returnDescription:` <p><code>torch.Tensor</code> with shape <code>[num_hidden_layers x batch x num_heads x seq_length x seq_length]</code> or list with <code>[None]</code> for each layer.</p> `}}),kr=new M({props:{name:"invert_attention_mask",anchor:"transformers.modeling_utils.ModuleUtilsMixin.invert_attention_mask",parameters:[{name:"encoder_attention_mask",val:": Tensor"}],parametersDescription:[{anchor:"transformers.modeling_utils.ModuleUtilsMixin.invert_attention_mask.encoder_attention_mask",description:"<strong>encoder_attention_mask</strong> (<code>torch.Tensor</code>) &#x2014; An attention mask.",name:"encoder_attention_mask"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L690",returnDescription:` <p>The inverted attention mask.</p> `,returnType:` <p><code>torch.Tensor</code></p> `}}),Pr=new M({props:{name:"num_parameters",anchor:"transformers.modeling_utils.ModuleUtilsMixin.num_parameters",parameters:[{name:"only_trainable",val:": bool = False"},{name:"exclude_embeddings",val:": bool = False"}],parametersDescription:[{anchor:"transformers.modeling_utils.ModuleUtilsMixin.num_parameters.only_trainable",description:`<strong>only_trainable</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return only the number of trainable parameters`,name:"only_trainable"},{anchor:"transformers.modeling_utils.ModuleUtilsMixin.num_parameters.exclude_embeddings",description:`<strong>exclude_embeddings</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return only the number of non-embeddings parameters`,name:"exclude_embeddings"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L832",returnDescription:` <p>The number of parameters.</p> `,returnType:` <p><code>int</code></p> `}}),Mr=new M({props:{name:"reset_memory_hooks_state",anchor:"transformers.modeling_utils.ModuleUtilsMixin.reset_memory_hooks_state",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L666"}}),Er=new Ne({}),Fr=new M({props:{name:"class transformers.TFPreTrainedModel",anchor:"transformers.TFPreTrainedModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L988"}}),Dr=new M({props:{name:"push_to_hub",anchor:"transformers.TFPreTrainedModel.push_to_hub",parameters:[{name:"repo_id",val:": str"},{name:"use_temp_dir",val:": typing.Optional[bool] = None"},{name:"commit_message",val:": typing.Optional[str] = None"},{name:"private",val:": typing.Optional[bool] = None"},{name:"use_auth_token",val:": typing.Union[bool, str, NoneType] = None"},{name:"max_shard_size",val:": typing.Union[int, str, NoneType] = '10GB'"},{name:"**model_card_kwargs",val:""}],parametersDescription:[{anchor:"transformers.TFPreTrainedModel.push_to_hub.repo_id",description:`<strong>repo_id</strong> (<code>str</code>) &#x2014; The name of the repository you want to push your model to. It should contain your organization name when pushing to a given organization.`,name:"repo_id"},{anchor:"transformers.TFPreTrainedModel.push_to_hub.use_temp_dir",description:`<strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to use a temporary directory to store the files saved before they are pushed to the Hub. Will default to <code>True</code> if there is no directory named like <code>repo_id</code>, <code>False</code> otherwise.`,name:"use_temp_dir"},{anchor:"transformers.TFPreTrainedModel.push_to_hub.commit_message",description:`<strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;Upload model&quot;</code>.`,name:"commit_message"},{anchor:"transformers.TFPreTrainedModel.push_to_hub.private",description:`<strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).`,name:"private"},{anchor:"transformers.TFPreTrainedModel.push_to_hub.use_auth_token",description:`<strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>huggingface-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.`,name:"use_auth_token"},{anchor:"transformers.TFPreTrainedModel.push_to_hub.max_shard_size",description:`<strong>max_shard_size</strong> (<code>int</code> or <code>str</code>, <em>optional</em>, defaults to <code>&quot;10GB&quot;</code>) &#x2014; Only applicable for models. The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like <code>&quot;5MB&quot;</code>). model_card_kwargs &#x2014; Additional keyword arguments passed along to the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.create_model_card">create_model_card()</a> method.`,name:"max_shard_size"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L2642"}}),Wt=new Be({props:{anchor:"transformers.TFPreTrainedModel.push_to_hub.example",$$slots:{default:[mx]},$$scope:{ctx:D}}}),zr=new M({props:{name:"compile",anchor:"transformers.TFPreTrainedModel.compile",parameters:[{name:"optimizer",val:" = 'rmsprop'"},{name:"loss",val:" = 'passthrough'"},{name:"metrics",val:" = None"},{name:"loss_weights",val:" = None"},{name:"weighted_metrics",val:" = None"},{name:"run_eagerly",val:" = None"},{name:"steps_per_execution",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1282"}}),Ar=new M({props:{name:"create_model_card",anchor:"transformers.TFPreTrainedModel.create_model_card",parameters:[{name:"output_dir",val:""},{name:"model_name",val:": str"},{name:"language",val:": typing.Optional[str] = None"},{name:"license",val:": typing.Optional[str] = None"},{name:"tags",val:": typing.Optional[str] = None"},{name:"finetuned_from",val:": typing.Optional[str] = None"},{name:"tasks",val:": typing.Optional[str] = None"},{name:"dataset_tags",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"dataset",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"dataset_args",val:": typing.Union[str, typing.List[str], NoneType] = None"}],parametersDescription:[{anchor:"transformers.TFPreTrainedModel.create_model_card.output_dir",description:`<strong>output_dir</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; The folder in which to create the model card.`,name:"output_dir"},{anchor:"transformers.TFPreTrainedModel.create_model_card.model_name",description:`<strong>model_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; The name of the model.`,name:"model_name"},{anchor:"transformers.TFPreTrainedModel.create_model_card.language",description:`<strong>language</strong> (<code>str</code>, <em>optional</em>) &#x2014; The language of the model (if applicable)`,name:"language"},{anchor:"transformers.TFPreTrainedModel.create_model_card.license",description:`<strong>license</strong> (<code>str</code>, <em>optional</em>) &#x2014; The license of the model. Will default to the license of the pretrained model used, if the original model given to the <code>Trainer</code> comes from a repo on the Hub.`,name:"license"},{anchor:"transformers.TFPreTrainedModel.create_model_card.tags",description:`<strong>tags</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014; Some tags to be included in the metadata of the model card.`,name:"tags"},{anchor:"transformers.TFPreTrainedModel.create_model_card.finetuned_from",description:`<strong>finetuned_from</strong> (<code>str</code>, <em>optional</em>) &#x2014; The name of the model used to fine-tune this one (if applicable). Will default to the name of the repo of the original model given to the <code>Trainer</code> (if it comes from the Hub).`,name:"finetuned_from"},{anchor:"transformers.TFPreTrainedModel.create_model_card.tasks",description:`<strong>tasks</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014; One or several task identifiers, to be included in the metadata of the model card.`,name:"tasks"},{anchor:"transformers.TFPreTrainedModel.create_model_card.dataset_tags",description:`<strong>dataset_tags</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014; One or several dataset tags, to be included in the metadata of the model card.`,name:"dataset_tags"},{anchor:"transformers.TFPreTrainedModel.create_model_card.dataset",description:`<strong>dataset</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014; One or several dataset identifiers, to be included in the metadata of the model card.`,name:"dataset"},{anchor:"transformers.TFPreTrainedModel.create_model_card.dataset_args",description:`<strong>dataset_args</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014; One or several dataset arguments, to be included in the metadata of the model card.`,name:"dataset_args"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1573"}}),Ir=new M({props:{name:"from_pretrained",anchor:"transformers.TFPreTrainedModel.from_pretrained",parameters:[{name:"pretrained_model_name_or_path",val:""},{name:"*model_args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.TFPreTrainedModel.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> <li>A path or url to a <em>PyTorch state_dict save file</em> (e.g, <code>./pt_model/pytorch_model.bin</code>). In this case, <code>from_pt</code> should be set to <code>True</code> and a configuration object should be provided as <code>config</code> argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards.</li> <li><code>None</code> if you are both providing the configuration and state dictionary (resp. with keyword arguments <code>config</code> and <code>state_dict</code>).</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.TFPreTrainedModel.from_pretrained.model_args",description:`<strong>model_args</strong> (sequence of positional arguments, <em>optional</em>) &#x2014; All remaining positional arguments will be passed to the underlying model&#x2019;s <code>__init__</code> method.`,name:"model_args"},{anchor:"transformers.TFPreTrainedModel.from_pretrained.config",description:`<strong>config</strong> (<code>Union[PretrainedConfig, str]</code>, <em>optional</em>) &#x2014; Can be either:</p> <ul> <li>an instance of a class derived from <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>,</li> <li>a string valid as input to <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig.from_pretrained">from_pretrained()</a>.</li> </ul> <p>Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.save_pretrained">save_pretrained()</a> and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <code>pretrained_model_name_or_path</code> and a configuration JSON file named <em>config.json</em> is found in the directory. from_pt &#x2014; (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>): Load the model weights from a PyTorch state_dict save file (see docstring of <code>pretrained_model_name_or_path</code> argument).</li> </ul>`,name:"config"},{anchor:"transformers.TFPreTrainedModel.from_pretrained.ignore_mismatched_sizes",description:`<strong>ignore_mismatched_sizes</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to raise an error if some of the weights from the checkpoint do not have the same size as the weights of the model (if for instance, you are instantiating a model with 10 labels from a checkpoint with 3 labels).`,name:"ignore_mismatched_sizes"},{anchor:"transformers.TFPreTrainedModel.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<code>str</code>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.TFPreTrainedModel.from_pretrained.force_download",description:`<strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.TFPreTrainedModel.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists. proxies &#x2014; (<code>Dict[str, str], </code>optional<code>): A dictionary of proxy servers to use by protocol or endpoint, e.g., </code>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}<code>. The proxies are used on each request. output_loading_info(</code>bool<code>, *optional*, defaults to </code>False\`): Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"resume_download"},{anchor:"transformers.TFPreTrainedModel.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<code>bool</code>,</strong> <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to only look at local files (e.g., not try doanloading the model).`,name:"local_files_only(bool,"},{anchor:"transformers.TFPreTrainedModel.from_pretrained.use_auth_token",description:`<strong>use_auth_token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>huggingface-cli login</code> (stored in <code>~/.huggingface</code>).`,name:"use_auth_token"},{anchor:"transformers.TFPreTrainedModel.from_pretrained.revision",description:`<strong>revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;main&quot;</code>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <code>revision</code> can be any identifier allowed by git.`,name:"revision"},{anchor:"transformers.TFPreTrainedModel.from_pretrained.mirror",description:`<strong>mirror</strong> (<code>str</code>, <em>optional</em>) &#x2014; Mirror source to accelerate downloads in China. If you are from China and have an accessibility problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. Please refer to the mirror site for more information.`,name:"mirror"},{anchor:"transformers.TFPreTrainedModel.from_pretrained.subfolder",description:`<strong>subfolder</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&quot;</code>) &#x2014; In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here.`,name:"subfolder"},{anchor:"transformers.TFPreTrainedModel.from_pretrained.kwargs",description:`<strong>kwargs</strong> (remaining dictionary of keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <code>output_attentions=True</code>). Behaves differently depending on whether a <code>config</code> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <code>config</code>, <code>**kwargs</code> will be directly passed to the underlying model&#x2019;s <code>__init__</code> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <code>kwargs</code> will be first passed to the configuration class initialization function (<a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig.from_pretrained">from_pretrained()</a>). Each key of <code>kwargs</code> that corresponds to a configuration attribute will be used to override said attribute with the supplied <code>kwargs</code> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <code>__init__</code> function.</li> </ul>`,name:"kwargs"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L2215"}}),Gt=new Hn({props:{$$slots:{default:[px]},$$scope:{ctx:D}}}),Rt=new Be({props:{anchor:"transformers.TFPreTrainedModel.from_pretrained.example",$$slots:{default:[hx]},$$scope:{ctx:D}}}),Or=new M({props:{name:"get_bias",anchor:"transformers.TFPreTrainedModel.get_bias",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1713",returnDescription:` <p>The weights representing the bias, None if not an LM model.</p> `,returnType:` <p><code>tf.Variable</code></p> `}}),Nr=new M({props:{name:"get_input_embeddings",anchor:"transformers.TFPreTrainedModel.get_input_embeddings",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1102",returnDescription:` <p>The embeddings layer mapping vocabulary to hidden states.</p> `,returnType:` <p><code>tf.Variable</code></p> `}}),Br=new M({props:{name:"get_lm_head",anchor:"transformers.TFPreTrainedModel.get_lm_head",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1746",returnDescription:` <p>The LM head layer if the model has one, None if not.</p> `,returnType:` <p><code>tf.keras.layers.Layer</code></p> `}}),Sr=new M({props:{name:"get_output_embeddings",anchor:"transformers.TFPreTrainedModel.get_output_embeddings",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1653",returnDescription:` <p>The new weights mapping vocabulary to hidden states.</p> `,returnType:` <p><code>tf.Variable</code></p> `}}),Wr=new M({props:{name:"get_output_layer_with_bias",anchor:"transformers.TFPreTrainedModel.get_output_layer_with_bias",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1690",returnDescription:` <p>The layer that handles the bias, None if not an LM model.</p> `,returnType:` <p><code>tf.keras.layers.Layer</code></p> `}}),Xr=new M({props:{name:"get_prefix_bias_name",anchor:"transformers.TFPreTrainedModel.get_prefix_bias_name",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1703",returnDescription:` <p>The _prefix name of the bias.</p> `,returnType:` <p><code>str</code></p> `}}),Vr=new M({props:{name:"load_repo_checkpoint",anchor:"transformers.TFPreTrainedModel.load_repo_checkpoint",parameters:[{name:"repo_path_or_name",val:""}],parametersDescription:[{anchor:"transformers.TFPreTrainedModel.load_repo_checkpoint.repo_path_or_name",description:`<strong>repo_path_or_name</strong> (<code>str</code>) &#x2014; Can either be a repository name for your {object} in the Hub or a path to a local folder (in which case the repository will have the name of that local folder).`,name:"repo_path_or_name"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1129",returnDescription:` <p>A dictionary of extra metadata from the checkpoint, most commonly an \u201Cepoch\u201D count.</p> `,returnType:` <p><code>dict</code></p> `}}),Gr=new M({props:{name:"prepare_tf_dataset",anchor:"transformers.TFPreTrainedModel.prepare_tf_dataset",parameters:[{name:"dataset",val:": datasets.Dataset"},{name:"batch_size",val:": int = 8"},{name:"shuffle",val:": bool = True"},{name:"tokenizer",val:": typing.Optional[ForwardRef('PreTrainedTokenizerBase')] = None"},{name:"collate_fn",val:": typing.Optional[typing.Callable] = None"},{name:"collate_fn_args",val:": typing.Union[typing.Dict[str, typing.Any], NoneType] = None"},{name:"drop_remainder",val:": typing.Optional[bool] = None"},{name:"prefetch",val:": bool = True"}],parametersDescription:[{anchor:"transformers.TFPreTrainedModel.prepare_tf_dataset.dataset",description:`<strong>dataset</strong> (<code>Any</code>) &#x2014; A [~<code>datasets.Dataset</code>] to be wrapped as a <code>tf.data.Dataset</code>.`,name:"dataset"},{anchor:"transformers.TFPreTrainedModel.prepare_tf_dataset.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, defaults to 8) &#x2014; The size of batches to return.`,name:"batch_size"},{anchor:"transformers.TFPreTrainedModel.prepare_tf_dataset.shuffle",description:`<strong>shuffle</strong> (<code>bool</code>, defaults to <code>True</code>) &#x2014; Whether to return samples from the dataset in random order. Usually <code>True</code> for training datasets and <code>False</code> for validation/test datasets.`,name:"shuffle"},{anchor:"transformers.TFPreTrainedModel.prepare_tf_dataset.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase">PreTrainedTokenizerBase</a>, <em>optional</em>) &#x2014; A <code>PreTrainedTokenizer</code> that will be used to pad samples to create batches. Has no effect if a specific <code>collate_fn</code> is passed instead.`,name:"tokenizer"},{anchor:"transformers.TFPreTrainedModel.prepare_tf_dataset.collate_fn",description:`<strong>collate_fn</strong> (<code>Callable</code>, <em>optional</em>) &#x2014; A function that collates samples from the dataset into a single batch. Defaults to <code>DefaultDataCollator</code> if no <code>tokenizer</code> is supplied or <code>DataCollatorWithPadding</code> if a <code>tokenizer</code> is passed.`,name:"collate_fn"},{anchor:"transformers.TFPreTrainedModel.prepare_tf_dataset.collate_fn_args",description:`<strong>collate_fn_args</strong> (<code>Dict[str, Any]</code>, <em>optional</em>) &#x2014; A dict of arguments to pass to the <code>collate_fn</code> alongside the list of samples.`,name:"collate_fn_args"},{anchor:"transformers.TFPreTrainedModel.prepare_tf_dataset.drop_remainder",description:`<strong>drop_remainder</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to drop the final batch, if the batch_size does not evenly divide the dataset length. Defaults to the same setting as <code>shuffle</code>.`,name:"drop_remainder"},{anchor:"transformers.TFPreTrainedModel.prepare_tf_dataset.prefetch",description:`<strong>prefetch</strong> (<code>bool</code>, defaults to <code>True</code>) &#x2014; Whether to add prefetching to the end of the <code>tf.data</code> pipeline. This is almost always beneficial for performance, but can be disabled in edge cases.`,name:"prefetch"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1183",returnDescription:` <p>A <code>tf.data.Dataset</code> which is ready to pass to the Keras API.</p> `,returnType:` <p><code>Dataset</code></p> `}}),Hr=new M({props:{name:"prune_heads",anchor:"transformers.TFPreTrainedModel.prune_heads",parameters:[{name:"heads_to_prune",val:""}],parametersDescription:[{anchor:"transformers.TFPreTrainedModel.prune_heads.heads_to_prune",description:`<strong>heads_to_prune</strong> (<code>Dict[int, List[int]]</code>) &#x2014; Dictionary with keys being selected layer indices (<code>int</code>) and associated values being the list of heads to prune in said layer (list of <code>int</code>). For instance {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.`,name:"heads_to_prune"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L2082"}}),Yr=new M({props:{name:"register_for_auto_class",anchor:"transformers.TFPreTrainedModel.register_for_auto_class",parameters:[{name:"auto_class",val:" = 'TFAutoModel'"}],parametersDescription:[{anchor:"transformers.TFPreTrainedModel.register_for_auto_class.auto_class",description:`<strong>auto_class</strong> (<code>str</code> or <code>type</code>, <em>optional</em>, defaults to <code>&quot;TFAutoModel&quot;</code>) &#x2014; The auto class to register this new model with.`,name:"auto_class"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L2732"}}),ro=new Hn({props:{warning:!0,$$slots:{default:[fx]},$$scope:{ctx:D}}}),Jr=new M({props:{name:"resize_token_embeddings",anchor:"transformers.TFPreTrainedModel.resize_token_embeddings",parameters:[{name:"new_num_tokens",val:": typing.Optional[int] = None"}],parametersDescription:[{anchor:"transformers.TFPreTrainedModel.resize_token_embeddings.new_num_tokens",description:`<strong>new_num_tokens</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of new tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or <code>None</code>, just returns a pointer to the input tokens without doing anything.`,name:"new_num_tokens"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1755",returnDescription:` <p>Pointer to the input tokens of the model.</p> `,returnType:` <p><code>tf.Variable</code> or <code>tf.keras.layers.Embedding</code></p> `}}),Qr=new M({props:{name:"save_pretrained",anchor:"transformers.TFPreTrainedModel.save_pretrained",parameters:[{name:"save_directory",val:""},{name:"saved_model",val:" = False"},{name:"version",val:" = 1"},{name:"push_to_hub",val:" = False"},{name:"max_shard_size",val:": typing.Union[int, str] = '10GB'"},{name:"create_pr",val:": bool = False"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.TFPreTrainedModel.save_pretrained.save_directory",description:`<strong>save_directory</strong> (<code>str</code>) &#x2014; Directory to which to save. Will be created if it doesn&#x2019;t exist.`,name:"save_directory"},{anchor:"transformers.TFPreTrainedModel.save_pretrained.saved_model",description:`<strong>saved_model</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If the model has to be saved in saved model format as well or not.`,name:"saved_model"},{anchor:"transformers.TFPreTrainedModel.save_pretrained.version",description:`<strong>version</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The version of the saved model. A saved model needs to be versioned in order to be properly loaded by TensorFlow Serving as detailed in the official documentation <a href="https://www.tensorflow.org/tfx/serving/serving_basic" rel="nofollow">https://www.tensorflow.org/tfx/serving/serving_basic</a>`,name:"version"},{anchor:"transformers.TFPreTrainedModel.save_pretrained.push_to_hub",description:`<strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with <code>repo_id</code> (will default to the name of <code>save_directory</code> in your namespace).`,name:"push_to_hub"},{anchor:"transformers.TFPreTrainedModel.save_pretrained.max_shard_size",description:`<strong>max_shard_size</strong> (<code>int</code> or <code>str</code>, <em>optional</em>, defaults to <code>&quot;10GB&quot;</code>) &#x2014; The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like <code>&quot;5MB&quot;</code>).</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>If a single weight of the model is bigger than <code>max_shard_size</code>, it will be in its own checkpoint shard which will be bigger than <code>max_shard_size</code>.</p> </div>`,name:"max_shard_size"},{anchor:"transformers.TFPreTrainedModel.save_pretrained.create_pr",description:`<strong>create_pr</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to create a PR with the uploaded files or directly commit.</p> <p>kwargs &#x2014; Additional key word arguments passed along to the <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.push_to_hub">push_to_hub()</a> method.`,name:"create_pr"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L2094"}}),ta=new M({props:{name:"serving",anchor:"transformers.TFPreTrainedModel.serving",parameters:[{name:"inputs",val:""}],parametersDescription:[{anchor:"transformers.TFPreTrainedModel.serving.inputs",description:`<strong>inputs</strong> (<code>Dict[str, tf.Tensor]</code>) &#x2014; The input of the saved model as a dictionary of tensors.`,name:"inputs"}]}}),oa=new M({props:{name:"serving_output",anchor:"transformers.TFPreTrainedModel.serving_output",parameters:[{name:"output",val:""}],parametersDescription:[{anchor:"transformers.TFPreTrainedModel.serving_output.output",description:`<strong>output</strong> (<code>TFBaseModelOutput</code>) &#x2014; The output returned by the model.`,name:"output"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1092"}}),ra=new M({props:{name:"set_bias",anchor:"transformers.TFPreTrainedModel.set_bias",parameters:[{name:"value",val:""}],parametersDescription:[{anchor:"transformers.TFPreTrainedModel.set_bias.value",description:`<strong>value</strong> (<code>Dict[tf.Variable]</code>) &#x2014; All the new bias attached to an LM head.`,name:"value"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1730"}}),aa=new M({props:{name:"set_input_embeddings",anchor:"transformers.TFPreTrainedModel.set_input_embeddings",parameters:[{name:"value",val:""}],parametersDescription:[{anchor:"transformers.TFPreTrainedModel.set_input_embeddings.value",description:`<strong>value</strong> (<code>tf.Variable</code>) &#x2014; The new weights mapping hidden states to vocabulary.`,name:"value"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1633"}}),na=new M({props:{name:"set_output_embeddings",anchor:"transformers.TFPreTrainedModel.set_output_embeddings",parameters:[{name:"value",val:""}],parametersDescription:[{anchor:"transformers.TFPreTrainedModel.set_output_embeddings.value",description:`<strong>value</strong> (<code>tf.Variable</code>) &#x2014; The new weights mapping hidden states to vocabulary.`,name:"value"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1673"}}),sa=new M({props:{name:"test_step",anchor:"transformers.TFPreTrainedModel.test_step",parameters:[{name:"data",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1470"}}),da=new M({props:{name:"train_step",anchor:"transformers.TFPreTrainedModel.train_step",parameters:[{name:"data",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1362"}}),ca=new Ne({}),ma=new M({props:{name:"class transformers.modeling_tf_utils.TFModelUtilsMixin",anchor:"transformers.modeling_tf_utils.TFModelUtilsMixin",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L92"}}),ha=new M({props:{name:"num_parameters",anchor:"transformers.modeling_tf_utils.TFModelUtilsMixin.num_parameters",parameters:[{name:"only_trainable",val:": bool = False"}],parametersDescription:[{anchor:"transformers.modeling_tf_utils.TFModelUtilsMixin.num_parameters.only_trainable",description:`<strong>only_trainable</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return only the number of trainable parameters`,name:"only_trainable"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L97",returnDescription:` <p>The number of parameters.</p> `,returnType:` <p><code>int</code></p> `}}),fa=new Ne({}),ua=new M({props:{name:"class transformers.FlaxPreTrainedModel",anchor:"transformers.FlaxPreTrainedModel",parameters:[{name:"config",val:": PretrainedConfig"},{name:"module",val:": Module"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax.numpy.float32'>"},{name:"_do_init",val:": bool = True"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_utils.py#L159"}}),ga=new M({props:{name:"push_to_hub",anchor:"transformers.FlaxPreTrainedModel.push_to_hub",parameters:[{name:"repo_id",val:": str"},{name:"use_temp_dir",val:": typing.Optional[bool] = None"},{name:"commit_message",val:": typing.Optional[str] = None"},{name:"private",val:": typing.Optional[bool] = None"},{name:"use_auth_token",val:": typing.Union[bool, str, NoneType] = None"},{name:"max_shard_size",val:": typing.Union[int, str, NoneType] = '10GB'"},{name:"create_pr",val:": bool = False"},{name:"**deprecated_kwargs",val:""}],parametersDescription:[{anchor:"transformers.FlaxPreTrainedModel.push_to_hub.repo_id",description:`<strong>repo_id</strong> (<code>str</code>) &#x2014; The name of the repository you want to push your model to. It should contain your organization name when pushing to a given organization.`,name:"repo_id"},{anchor:"transformers.FlaxPreTrainedModel.push_to_hub.use_temp_dir",description:`<strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to use a temporary directory to store the files saved before they are pushed to the Hub. Will default to <code>True</code> if there is no directory named like <code>repo_id</code>, <code>False</code> otherwise.`,name:"use_temp_dir"},{anchor:"transformers.FlaxPreTrainedModel.push_to_hub.commit_message",description:`<strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;Upload model&quot;</code>.`,name:"commit_message"},{anchor:"transformers.FlaxPreTrainedModel.push_to_hub.private",description:`<strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).`,name:"private"},{anchor:"transformers.FlaxPreTrainedModel.push_to_hub.use_auth_token",description:`<strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>huggingface-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.`,name:"use_auth_token"},{anchor:"transformers.FlaxPreTrainedModel.push_to_hub.max_shard_size",description:`<strong>max_shard_size</strong> (<code>int</code> or <code>str</code>, <em>optional</em>, defaults to <code>&quot;10GB&quot;</code>) &#x2014; Only applicable for models. The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like <code>&quot;5MB&quot;</code>).`,name:"max_shard_size"},{anchor:"transformers.FlaxPreTrainedModel.push_to_hub.create_pr",description:`<strong>create_pr</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to create a PR with the uploaded files or directly commit.`,name:"create_pr"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/hub.py#L712"}}),_o=new Be({props:{anchor:"transformers.FlaxPreTrainedModel.push_to_hub.example",$$slots:{default:[ux]},$$scope:{ctx:D}}}),ba=new M({props:{name:"from_pretrained",anchor:"transformers.FlaxPreTrainedModel.from_pretrained",parameters:[{name:"pretrained_model_name_or_path",val:": typing.Union[str, os.PathLike]"},{name:"dtype",val:": dtype = <class 'jax.numpy.float32'>"},{name:"*model_args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.FlaxPreTrainedModel.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.FlaxPreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> <li>A path or url to a <em>pt index checkpoint file</em> (e.g, <code>./tf_model/model.ckpt.index</code>). In this case, <code>from_pt</code> should be set to <code>True</code>.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.FlaxPreTrainedModel.from_pretrained.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"},{anchor:"transformers.FlaxPreTrainedModel.from_pretrained.model_args",description:`<strong>model_args</strong> (sequence of positional arguments, <em>optional</em>) &#x2014; All remaining positional arguments will be passed to the underlying model&#x2019;s <code>__init__</code> method.`,name:"model_args"},{anchor:"transformers.FlaxPreTrainedModel.from_pretrained.config",description:`<strong>config</strong> (<code>Union[PretrainedConfig, str, os.PathLike]</code>, <em>optional</em>) &#x2014; Can be either:</p> <ul> <li>an instance of a class derived from <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>,</li> <li>a string or path valid as input to <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig.from_pretrained">from_pretrained()</a>.</li> </ul> <p>Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.save_pretrained">save_pretrained()</a> and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <code>pretrained_model_name_or_path</code> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul>`,name:"config"},{anchor:"transformers.FlaxPreTrainedModel.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<code>Union[str, os.PathLike]</code>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.FlaxPreTrainedModel.from_pretrained.from_pt",description:`<strong>from_pt</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Load the model weights from a PyTorch checkpoint save file (see docstring of <code>pretrained_model_name_or_path</code> argument).`,name:"from_pt"},{anchor:"transformers.FlaxPreTrainedModel.from_pretrained.ignore_mismatched_sizes",description:`<strong>ignore_mismatched_sizes</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to raise an error if some of the weights from the checkpoint do not have the same size as the weights of the model (if for instance, you are instantiating a model with 10 labels from a checkpoint with 3 labels).`,name:"ignore_mismatched_sizes"},{anchor:"transformers.FlaxPreTrainedModel.from_pretrained.force_download",description:`<strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.FlaxPreTrainedModel.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.FlaxPreTrainedModel.from_pretrained.proxies",description:`<strong>proxies</strong> (<code>Dict[str, str]</code>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <code>{&apos;http&apos;: &apos;foo.bar:3128&apos;, &apos;http://hostname&apos;: &apos;foo.bar:4012&apos;}</code>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.FlaxPreTrainedModel.from_pretrained.local_files_only(bool,",description:`<strong>local_files_only(<code>bool</code>,</strong> <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to only look at local files (i.e., do not try to download the model).`,name:"local_files_only(bool,"},{anchor:"transformers.FlaxPreTrainedModel.from_pretrained.revision",description:`<strong>revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;main&quot;</code>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <code>revision</code> can be any identifier allowed by git.`,name:"revision"},{anchor:"transformers.FlaxPreTrainedModel.from_pretrained.subfolder",description:`<strong>subfolder</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&quot;</code>) &#x2014; In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here.`,name:"subfolder"},{anchor:"transformers.FlaxPreTrainedModel.from_pretrained.kwargs",description:`<strong>kwargs</strong> (remaining dictionary of keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <code>output_attentions=True</code>). Behaves differently depending on whether a <code>config</code> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <code>config</code>, <code>**kwargs</code> will be directly passed to the underlying model&#x2019;s <code>__init__</code> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <code>kwargs</code> will be first passed to the configuration class initialization function (<a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig.from_pretrained">from_pretrained()</a>). Each key of <code>kwargs</code> that corresponds to a configuration attribute will be used to override said attribute with the supplied <code>kwargs</code> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <code>__init__</code> function.</li> </ul>`,name:"kwargs"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_utils.py#L472"}}),bo=new Be({props:{anchor:"transformers.FlaxPreTrainedModel.from_pretrained.example",$$slots:{default:[gx]},$$scope:{ctx:D}}}),$a=new M({props:{name:"load_flax_sharded_weights",anchor:"transformers.FlaxPreTrainedModel.load_flax_sharded_weights",parameters:[{name:"shard_files",val:""}],parametersDescription:[{anchor:"transformers.FlaxPreTrainedModel.load_flax_sharded_weights.shard_files",description:`<strong>shard_files</strong> (<code>List[str]</code> &#x2014; The list of shard files to load.`,name:"shard_files"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_utils.py#L425",returnDescription:` <p>A nested dictionary of the model parameters, in the expected format for flax models : <code>&#123;'model': &#123;'params': &#123;'...'&#125;&#125;&#125;</code>.</p> `,returnType:` <p><code>Dict</code></p> `}}),Ta=new M({props:{name:"register_for_auto_class",anchor:"transformers.FlaxPreTrainedModel.register_for_auto_class",parameters:[{name:"auto_class",val:" = 'FlaxAutoModel'"}],parametersDescription:[{anchor:"transformers.FlaxPreTrainedModel.register_for_auto_class.auto_class",description:`<strong>auto_class</strong> (<code>str</code> or <code>type</code>, <em>optional</em>, defaults to <code>&quot;FlaxAutoModel&quot;</code>) &#x2014; The auto class to register this new model with.`,name:"auto_class"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_utils.py#L1034"}}),vo=new Hn({props:{warning:!0,$$slots:{default:[_x]},$$scope:{ctx:D}}}),xa=new M({props:{name:"save_pretrained",anchor:"transformers.FlaxPreTrainedModel.save_pretrained",parameters:[{name:"save_directory",val:": typing.Union[str, os.PathLike]"},{name:"params",val:" = None"},{name:"push_to_hub",val:" = False"},{name:"max_shard_size",val:" = '10GB'"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.FlaxPreTrainedModel.save_pretrained.save_directory",description:`<strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Directory to which to save. Will be created if it doesn&#x2019;t exist.`,name:"save_directory"},{anchor:"transformers.FlaxPreTrainedModel.save_pretrained.push_to_hub",description:`<strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with <code>repo_id</code> (will default to the name of <code>save_directory</code> in your namespace).`,name:"push_to_hub"},{anchor:"transformers.FlaxPreTrainedModel.save_pretrained.max_shard_size",description:`<strong>max_shard_size</strong> (<code>int</code> or <code>str</code>, <em>optional</em>, defaults to <code>&quot;10GB&quot;</code>) &#x2014; The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like <code>&quot;5MB&quot;</code>).</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>If a single weight of the model is bigger than <code>max_shard_size</code>, it will be in its own checkpoint shard which will be bigger than <code>max_shard_size</code>.</p> </div> <p>kwargs &#x2014; Additional key word arguments passed along to the <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.push_to_hub">push_to_hub()</a> method.`,name:"max_shard_size"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_utils.py#L937"}}),Pa=new M({props:{name:"to_bf16",anchor:"transformers.FlaxPreTrainedModel.to_bf16",parameters:[{name:"params",val:": typing.Union[typing.Dict, flax.core.frozen_dict.FrozenDict]"},{name:"mask",val:": typing.Any = None"}],parametersDescription:[{anchor:"transformers.FlaxPreTrainedModel.to_bf16.params",description:`<strong>params</strong> (<code>Union[Dict, FrozenDict]</code>) &#x2014; A <code>PyTree</code> of model parameters.`,name:"params"},{anchor:"transformers.FlaxPreTrainedModel.to_bf16.mask",description:`<strong>mask</strong> (<code>Union[Dict, FrozenDict]</code>) &#x2014; A <code>PyTree</code> with same structure as the <code>params</code> tree. The leaves should be booleans, <code>True</code> for params you want to cast, and should be <code>False</code> for those you want to skip.`,name:"mask"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_utils.py#L320"}}),$o=new Be({props:{anchor:"transformers.FlaxPreTrainedModel.to_bf16.example",$$slots:{default:[bx]},$$scope:{ctx:D}}}),Ma=new M({props:{name:"to_fp16",anchor:"transformers.FlaxPreTrainedModel.to_fp16",parameters:[{name:"params",val:": typing.Union[typing.Dict, flax.core.frozen_dict.FrozenDict]"},{name:"mask",val:": typing.Any = None"}],parametersDescription:[{anchor:"transformers.FlaxPreTrainedModel.to_fp16.params",description:`<strong>params</strong> (<code>Union[Dict, FrozenDict]</code>) &#x2014; A <code>PyTree</code> of model parameters.`,name:"params"},{anchor:"transformers.FlaxPreTrainedModel.to_fp16.mask",description:`<strong>mask</strong> (<code>Union[Dict, FrozenDict]</code>) &#x2014; A <code>PyTree</code> with same structure as the <code>params</code> tree. The leaves should be booleans, <code>True</code> for params you want to cast, and should be <code>False</code> for those you want to skip`,name:"mask"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_utils.py#L386"}}),wo=new Be({props:{anchor:"transformers.FlaxPreTrainedModel.to_fp16.example",$$slots:{default:[vx]},$$scope:{ctx:D}}}),Ea=new M({props:{name:"to_fp32",anchor:"transformers.FlaxPreTrainedModel.to_fp32",parameters:[{name:"params",val:": typing.Union[typing.Dict, flax.core.frozen_dict.FrozenDict]"},{name:"mask",val:": typing.Any = None"}],parametersDescription:[{anchor:"transformers.FlaxPreTrainedModel.to_fp32.params",description:`<strong>params</strong> (<code>Union[Dict, FrozenDict]</code>) &#x2014; A <code>PyTree</code> of model parameters.`,name:"params"},{anchor:"transformers.FlaxPreTrainedModel.to_fp32.mask",description:`<strong>mask</strong> (<code>Union[Dict, FrozenDict]</code>) &#x2014; A <code>PyTree</code> with same structure as the <code>params</code> tree. The leaves should be booleans, <code>True</code> for params you want to cast, and should be <code>False</code> for those you want to skip`,name:"mask"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_utils.py#L359"}}),To=new Be({props:{anchor:"transformers.FlaxPreTrainedModel.to_fp32.example",$$slots:{default:[yx]},$$scope:{ctx:D}}}),Fa=new Ne({}),ja=new M({props:{name:"class transformers.utils.PushToHubMixin",anchor:"transformers.utils.PushToHubMixin",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/hub.py#L627"}}),Da=new M({props:{name:"push_to_hub",anchor:"transformers.utils.PushToHubMixin.push_to_hub",parameters:[{name:"repo_id",val:": str"},{name:"use_temp_dir",val:": typing.Optional[bool] = None"},{name:"commit_message",val:": typing.Optional[str] = None"},{name:"private",val:": typing.Optional[bool] = None"},{name:"use_auth_token",val:": typing.Union[bool, str, NoneType] = None"},{name:"max_shard_size",val:": typing.Union[int, str, NoneType] = '10GB'"},{name:"create_pr",val:": bool = False"},{name:"**deprecated_kwargs",val:""}],parametersDescription:[{anchor:"transformers.utils.PushToHubMixin.push_to_hub.repo_id",description:`<strong>repo_id</strong> (<code>str</code>) &#x2014; The name of the repository you want to push your {object} to. It should contain your organization name when pushing to a given organization.`,name:"repo_id"},{anchor:"transformers.utils.PushToHubMixin.push_to_hub.use_temp_dir",description:`<strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to use a temporary directory to store the files saved before they are pushed to the Hub. Will default to <code>True</code> if there is no directory named like <code>repo_id</code>, <code>False</code> otherwise.`,name:"use_temp_dir"},{anchor:"transformers.utils.PushToHubMixin.push_to_hub.commit_message",description:`<strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;Upload {object}&quot;</code>.`,name:"commit_message"},{anchor:"transformers.utils.PushToHubMixin.push_to_hub.private",description:`<strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).`,name:"private"},{anchor:"transformers.utils.PushToHubMixin.push_to_hub.use_auth_token",description:`<strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>huggingface-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.`,name:"use_auth_token"},{anchor:"transformers.utils.PushToHubMixin.push_to_hub.max_shard_size",description:`<strong>max_shard_size</strong> (<code>int</code> or <code>str</code>, <em>optional</em>, defaults to <code>&quot;10GB&quot;</code>) &#x2014; Only applicable for models. The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like <code>&quot;5MB&quot;</code>).`,name:"max_shard_size"},{anchor:"transformers.utils.PushToHubMixin.push_to_hub.create_pr",description:`<strong>create_pr</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to create a PR with the uploaded files or directly commit.`,name:"create_pr"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/hub.py#L712"}}),ko=new Be({props:{anchor:"transformers.utils.PushToHubMixin.push_to_hub.example",$$slots:{default:[$x]},$$scope:{ctx:D}}}),za=new Ne({}),Aa=new M({props:{name:"transformers.modeling_utils.load_sharded_checkpoint",anchor:"transformers.modeling_utils.load_sharded_checkpoint",parameters:[{name:"model",val:""},{name:"folder",val:""},{name:"strict",val:" = True"}],parametersDescription:[{anchor:"transformers.modeling_utils.load_sharded_checkpoint.model",description:"<strong>model</strong> (<code>torch.nn.Module</code>) &#x2014; The model in which to load the checkpoint.",name:"model"},{anchor:"transformers.modeling_utils.load_sharded_checkpoint.folder",description:"<strong>folder</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; A path to a folder containing the sharded checkpoint.",name:"folder"},{anchor:"transformers.modeling_utils.load_sharded_checkpoint.strict",description:"<strong>strict</strong> (<code>bool</code>, *optional<code>, defaults to </code>True`) &#x2014;\nWhether to strictly enforce that the keys in the model state dict match the keys in the sharded checkpoint.",name:"strict"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L323",returnDescription:` <p>A named tuple with <code>missing_keys</code> and <code>unexpected_keys</code> fields</p> <ul> <li><code>missing_keys</code> is a list of str containing the missing keys</li> <li><code>unexpected_keys</code> is a list of str containing the unexpected keys</li> </ul> `,returnType:` <p><code>NamedTuple</code></p> `}}),{c(){p=r("meta"),x=d(),w=r("h1"),f=r("a"),k=r("span"),u(c.$$.fragment),T=d(),ae=r("span"),Bm=s("Models"),jl=d(),Z=r("p"),Sm=s("The base classes "),Wa=r("a"),Wm=s("PreTrainedModel"),Xm=s(", "),Xa=r("a"),Vm=s("TFPreTrainedModel"),Gm=s(`, and `),Va=r("a"),Rm=s("FlaxPreTrainedModel"),Hm=s(` implement the common methods for loading/saving a model either from a local file or directory, or from a pretrained model configuration provided by the library (downloaded from HuggingFace\u2019s AWS S3 repository).`),Dl=d(),We=r("p"),Ga=r("a"),Ym=s("PreTrainedModel"),Jm=s(" and "),Ra=r("a"),Km=s("TFPreTrainedModel"),Zm=s(` also implement a few methods which are common among all the models to:`),ql=d(),st=r("ul"),Yn=r("li"),Qm=s("resize the input token embeddings when new tokens are added to the vocabulary"),ep=d(),Jn=r("li"),tp=s("prune the attention heads of the model."),zl=d(),O=r("p"),op=s("The other methods that are common to each model are defined in "),Ha=r("a"),rp=s("ModuleUtilsMixin"),ap=s(` (for the PyTorch models) and `),Kn=r("code"),np=s("~modeling_tf_utils.TFModuleUtilsMixin"),sp=s(` (for the TensorFlow models) or for text generation, `),Ya=r("a"),ip=s("GenerationMixin"),dp=s(` (for the PyTorch models), `),Ja=r("a"),lp=s("TFGenerationMixin"),cp=s(` (for the TensorFlow models) and `),Ka=r("a"),mp=s("FlaxGenerationMixin"),pp=s(" (for the Flax/JAX models)."),Al=d(),Xe=r("h2"),it=r("a"),Zn=r("span"),u(qo.$$.fragment),hp=d(),Qn=r("span"),fp=s("PreTrainedModel"),Cl=d(),F=r("div"),u(zo.$$.fragment),up=d(),es=r("p"),gp=s("Base class for all models."),_p=d(),Za=r("p"),Qa=r("a"),bp=s("PreTrainedModel"),vp=s(` takes care of storing the configuration of the models and handles methods for loading, downloading and saving models as well as a few methods common to all models to:`),yp=d(),Ao=r("ul"),ts=r("li"),$p=s("resize the input embeddings,"),wp=d(),os=r("li"),Tp=s("prune heads in the self-attention heads."),xp=d(),rs=r("p"),kp=s("Class attributes (overridden by derived classes):"),Pp=d(),G=r("ul"),as=r("li"),pe=r("p"),ns=r("strong"),Mp=s("config_class"),Ep=s(" ("),en=r("a"),Fp=s("PretrainedConfig"),jp=s(") \u2014 A subclass of "),tn=r("a"),Dp=s("PretrainedConfig"),qp=s(` to use as configuration class for this model architecture.`),zp=d(),Co=r("li"),he=r("p"),ss=r("strong"),Ap=s("load_tf_weights"),Cp=s(" ("),is=r("code"),Ip=s("Callable"),Lp=s(") \u2014 A python "),ds=r("em"),Up=s("method"),Op=s(` for loading a TensorFlow checkpoint in a PyTorch model, taking as arguments:`),Np=d(),Ve=r("ul"),dt=r("li"),ls=r("strong"),Bp=s("model"),Sp=s(" ("),on=r("a"),Wp=s("PreTrainedModel"),Xp=s(") \u2014 An instance of the model on which to load the TensorFlow checkpoint."),Vp=d(),lt=r("li"),cs=r("strong"),Gp=s("config"),Rp=s(" ("),ms=r("code"),Hp=s("PreTrainedConfig"),Yp=s(") \u2014 An instance of the configuration associated to the model."),Jp=d(),ct=r("li"),ps=r("strong"),Kp=s("path"),Zp=s(" ("),hs=r("code"),Qp=s("str"),eh=s(") \u2014 A path to the TensorFlow checkpoint."),th=d(),fs=r("li"),mt=r("p"),us=r("strong"),oh=s("base_model_prefix"),rh=s(" ("),gs=r("code"),ah=s("str"),nh=s(`) \u2014 A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.`),sh=d(),_s=r("li"),pt=r("p"),bs=r("strong"),ih=s("is_parallelizable"),dh=s(" ("),vs=r("code"),lh=s("bool"),ch=s(") \u2014 A flag indicating whether this model supports model parallelization."),mh=d(),ys=r("li"),S=r("p"),$s=r("strong"),ph=s("main_input_name"),hh=s(" ("),ws=r("code"),fh=s("str"),uh=s(") \u2014 The name of the principal input to the model (often "),Ts=r("code"),gh=s("input_ids"),_h=s(` for NLP models, `),xs=r("code"),bh=s("pixel_values"),vh=s(" for vision models and "),ks=r("code"),yh=s("input_values"),$h=s(" for speech models)."),wh=d(),fe=r("div"),u(Io.$$.fragment),Th=d(),Lo=r("p"),xh=s(`Upload the model file to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),Ps=r("code"),kh=s("repo_path_or_name"),Ph=s("."),Mh=d(),u(ht.$$.fragment),Eh=d(),z=r("div"),u(Uo.$$.fragment),Fh=d(),Ms=r("p"),jh=s("Instantiate a pretrained pytorch model from a pre-trained model configuration."),Dh=d(),Ge=r("p"),qh=s("The model is set in evaluation mode by default using "),Es=r("code"),zh=s("model.eval()"),Ah=s(` (Dropout modules are deactivated). To train the model, you should first set it back in training mode with `),Fs=r("code"),Ch=s("model.train()"),Ih=s("."),Lh=d(),Oo=r("p"),Uh=s("The warning "),js=r("em"),Oh=s("Weights from XXX not initialized from pretrained model"),Nh=s(` means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task.`),Bh=d(),No=r("p"),Sh=s("The warning "),Ds=r("em"),Wh=s("Weights from XXX not used in YYY"),Xh=s(` means that the layer XXX is not used by YYY, therefore those weights are discarded.`),Vh=d(),u(ft.$$.fragment),Gh=d(),u(ut.$$.fragment),Rh=d(),u(gt.$$.fragment),Hh=d(),qs=r("ul"),rn=r("li"),zs=r("code"),Yh=s("low_cpu_mem_usage"),Jh=s(" algorithm:"),Kh=d(),As=r("p"),Zh=s("This is an experimental function that loads the model using ~1x model size CPU memory"),Qh=d(),Cs=r("p"),ef=s("Here is how it works:"),tf=d(),R=r("ol"),Is=r("li"),of=s("save which state_dict keys we have"),rf=d(),Ls=r("li"),af=s("drop state_dict before the model is created, since the latter takes 1x model size CPU memory"),nf=d(),Us=r("li"),sf=s(`after the model has been instantiated switch to the meta device all params/buffers that are going to be replaced from the loaded state_dict`),df=d(),Os=r("li"),lf=s("load state_dict 2nd time"),cf=d(),Ns=r("li"),mf=s("replace the params/buffers from the state_dict"),pf=d(),Bs=r("p"),hf=s("Currently, it can\u2019t handle deepspeed ZeRO stage 3 and ignores loading errors"),ff=d(),_t=r("div"),u(Bo.$$.fragment),uf=d(),Ss=r("p"),gf=s("Returns the model\u2019s input embeddings."),_f=d(),bt=r("div"),u(So.$$.fragment),bf=d(),an=r("p"),vf=s(`Get the memory footprint of a model. This will return the memory footprint of the current model in bytes. Useful to benchmark the memory footprint of the current model and design some tests. Solution inspired from the PyTorch discussions: `),Wo=r("a"),yf=s("https://discuss.pytorch.org/t/gpu-memory-that-model-uses/56822/2"),$f=d(),vt=r("div"),u(Xo.$$.fragment),wf=d(),Ws=r("p"),Tf=s("Returns the model\u2019s output embeddings."),xf=d(),ue=r("div"),u(Vo.$$.fragment),kf=d(),Xs=r("p"),Pf=s("Deactivates gradient checkpointing for the current model."),Mf=d(),Vs=r("p"),Ef=s(`Note that in other frameworks this feature can be referred to as \u201Cactivation checkpointing\u201D or \u201Ccheckpoint activations\u201D.`),Ff=d(),ge=r("div"),u(Go.$$.fragment),jf=d(),Gs=r("p"),Df=s("Activates gradient checkpointing for the current model."),qf=d(),Rs=r("p"),zf=s(`Note that in other frameworks this feature can be referred to as \u201Cactivation checkpointing\u201D or \u201Ccheckpoint activations\u201D.`),Af=d(),yt=r("div"),u(Ro.$$.fragment),Cf=d(),Hs=r("p"),If=s("If needed prunes and maybe initializes weights."),Lf=d(),$t=r("div"),u(Ho.$$.fragment),Uf=d(),Ys=r("p"),Of=s(`A method executed at the end of each Transformer model initialization, to execute code that needs the model\u2019s modules properly initialized (such as weight initialization).`),Nf=d(),wt=r("div"),u(Yo.$$.fragment),Bf=d(),Js=r("p"),Sf=s("Prunes heads of the base model."),Wf=d(),_e=r("div"),u(Jo.$$.fragment),Xf=d(),Ks=r("p"),Vf=s(`Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class.`),Gf=d(),u(Tt.$$.fragment),Rf=d(),be=r("div"),u(Ko.$$.fragment),Hf=d(),Zo=r("p"),Yf=s("Resizes input token embeddings matrix of the model if "),Zs=r("code"),Jf=s("new_num_tokens != config.vocab_size"),Kf=s("."),Zf=d(),Qo=r("p"),Qf=s("Takes care of tying weights embeddings afterwards if the model class has a "),Qs=r("code"),eu=s("tie_weights()"),tu=s(" method."),ou=d(),xt=r("div"),u(er.$$.fragment),ru=d(),tr=r("p"),au=s(`Save a model and its configuration file to a directory, so that it can be re-loaded using the `),nn=r("a"),nu=s("from_pretrained()"),su=s(" class method."),iu=d(),kt=r("div"),u(or.$$.fragment),du=d(),ei=r("p"),lu=s("Set model\u2019s input embeddings."),cu=d(),ve=r("div"),u(rr.$$.fragment),mu=d(),ti=r("p"),pu=s("Tie the weights between the input embeddings and the output embeddings."),hu=d(),ar=r("p"),fu=s("If the "),oi=r("code"),uu=s("torchscript"),gu=s(` flag is set in the configuration, can\u2019t handle parameter sharing so we are cloning the weights instead.`),Il=d(),sn=r("a"),Ll=d(),Re=r("h3"),Pt=r("a"),ri=r("span"),u(nr.$$.fragment),_u=d(),ai=r("span"),bu=s("Large model loading"),Ul=d(),ye=r("p"),vu=s("In Transformers 4.20.0, the "),dn=r("a"),yu=s("from_pretrained()"),$u=s(" method has been reworked to accommodate large models using "),sr=r("a"),wu=s("Accelerate"),Tu=s(". This requires Accelerate >= 0.9.0 and PyTorch >= 1.9.0. Instead of creating the full model, then loading the pretrained weights inside it (which takes twice the size of the model in RAM, one for the randomly initialized model, one for the weights), there is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded."),Ol=d(),Mt=r("p"),xu=s("This option can be activated with "),ni=r("code"),ku=s("low_cpu_mem_usage=True"),Pu=s(". The model is first created on the Meta device (with empty weights) and the state dict is then loaded inside it (shard by shard in the case of a sharded checkpoint). This way the maximum RAM used is the full size of the model only."),Nl=d(),u(ir.$$.fragment),Bl=d(),Et=r("p"),Mu=s("Moreover, you can directly place the model on different devices if it doesn\u2019t fully fit in RAM (only works for inference for now). With "),si=r("code"),Eu=s('device_map="auto"'),Fu=s(", Accelerate will determine where to put each layer to maximize the use of your fastest devices (GPUs) and offload the rest on the CPU, or even the hard drive if you don\u2019t have enough GPU RAM (or CPU RAM). Even if the model is split across several devices, it will run as you would normally expect."),Sl=d(),Q=r("p"),ju=s("When passing a "),ii=r("code"),Du=s("device_map"),qu=s(", "),di=r("code"),zu=s("low_cpu_mem_usage"),Au=s(" is automatically set to "),li=r("code"),Cu=s("True"),Iu=s(", so you don\u2019t need to specify it:"),Wl=d(),u(dr.$$.fragment),Xl=d(),Ft=r("p"),Lu=s("You can inspect how the model was split across devices by looking at its "),ci=r("code"),Uu=s("hf_device_map"),Ou=s(" attribute:"),Vl=d(),u(lr.$$.fragment),Gl=d(),u(cr.$$.fragment),Rl=d(),ln=r("p"),Nu=s("You can also write your own device map following the same format (a dictionary layer name to device). It should map all parameters of the model to a given device, but you don\u2019t have to detail where all the submosules of one layer go if that layer is entirely on the same device. For instance, the following device map would work properly for T0pp (as long as you have the GPU memory):"),Hl=d(),u(mr.$$.fragment),Yl=d(),jt=r("p"),Bu=s("Another way to minimize the memory impact of your model is to instantiate it at a lower precision dtype (like "),mi=r("code"),Su=s("torch.float16"),Wu=s(") or use direct quantization techniques as described below."),Jl=d(),He=r("h3"),Dt=r("a"),pi=r("span"),u(pr.$$.fragment),Xu=d(),hi=r("span"),Vu=s("Model Instantiation dtype"),Kl=d(),ee=r("p"),Gu=s("Under Pytorch a model normally gets instantiated with "),fi=r("code"),Ru=s("torch.float32"),Hu=s(` format. This can be an issue if one tries to load a model whose weights are in fp16, since it\u2019d require twice as much memory. To overcome this limitation, you can either explicitly pass the desired `),ui=r("code"),Yu=s("dtype"),Ju=s(" using "),gi=r("code"),Ku=s("torch_dtype"),Zu=s(" argument:"),Zl=d(),u(hr.$$.fragment),Ql=d(),$e=r("p"),Qu=s("or, if you want the model to always load in the most optimal memory pattern, you can use the special value "),_i=r("code"),eg=s('"auto"'),tg=s(`, and then `),bi=r("code"),og=s("dtype"),rg=s(" will be automatically derived from the model\u2019s weights:"),ec=d(),u(fr.$$.fragment),tc=d(),qt=r("p"),ag=s("Models instantiated from scratch can also be told which "),vi=r("code"),ng=s("dtype"),sg=s(" to use with:"),oc=d(),u(ur.$$.fragment),rc=d(),cn=r("p"),ig=s("Due to Pytorch design, this functionality is only available for floating dtypes."),ac=d(),Ye=r("h2"),zt=r("a"),yi=r("span"),u(gr.$$.fragment),dg=d(),$i=r("span"),lg=s("ModuleUtilsMixin"),nc=d(),I=r("div"),u(_r.$$.fragment),cg=d(),br=r("p"),mg=s("A few utilities for "),wi=r("code"),pg=s("torch.nn.Modules"),hg=s(", to be used as a mixin."),fg=d(),we=r("div"),u(vr.$$.fragment),ug=d(),Ti=r("p"),gg=s("Add a memory hook before and after each sub-module forward pass to record increase in memory consumption."),_g=d(),Je=r("p"),bg=s("Increase in memory consumption is stored in a "),xi=r("code"),vg=s("mem_rss_diff"),yg=s(` attribute for each module and can be reset to zero with `),ki=r("code"),$g=s("model.reset_memory_hooks_state()"),wg=s("."),Tg=d(),At=r("div"),u(yr.$$.fragment),xg=d(),Pi=r("p"),kg=s("Helper function to estimate the total number of tokens from the model inputs."),Pg=d(),Ct=r("div"),u($r.$$.fragment),Mg=d(),Ke=r("p"),Eg=s(`Get number of (optionally, non-embeddings) floating-point operations for the forward and backward passes of a batch with this transformer model. Default approximation neglects the quadratic dependency on the number of tokens (valid if `),Mi=r("code"),Fg=s("12 * d_model << sequence_length"),jg=s(") as laid out in "),wr=r("a"),Dg=s(`this paper`),qg=s(` section 2.1. Should be overridden for transformers with parameter re-use e.g. Albert or Universal Transformers, or if doing long-range modeling with very high sequence lengths.`),zg=d(),It=r("div"),u(Tr.$$.fragment),Ag=d(),Ei=r("p"),Cg=s("Makes broadcastable attention and causal masks so that future and masked tokens are ignored."),Ig=d(),Lt=r("div"),u(xr.$$.fragment),Lg=d(),Fi=r("p"),Ug=s("Prepare the head mask if needed."),Og=d(),Ut=r("div"),u(kr.$$.fragment),Ng=d(),ji=r("p"),Bg=s("Invert an attention mask (e.g., switches 0. and 1.)."),Sg=d(),Ot=r("div"),u(Pr.$$.fragment),Wg=d(),Di=r("p"),Xg=s("Get number of (optionally, trainable or non-embeddings) parameters in the module."),Vg=d(),Nt=r("div"),u(Mr.$$.fragment),Gg=d(),Ze=r("p"),Rg=s("Reset the "),qi=r("code"),Hg=s("mem_rss_diff"),Yg=s(" attribute of each module (see "),mn=r("a"),Jg=s("add_memory_hooks()"),Kg=s(")."),sc=d(),Qe=r("h2"),Bt=r("a"),zi=r("span"),u(Er.$$.fragment),Zg=d(),Ai=r("span"),Qg=s("TFPreTrainedModel"),ic=d(),P=r("div"),u(Fr.$$.fragment),e_=d(),Ci=r("p"),t_=s("Base class for all TF models."),o_=d(),pn=r("p"),hn=r("a"),r_=s("TFPreTrainedModel"),a_=s(` takes care of storing the configuration of the models and handles methods for loading, downloading and saving models as well as a few methods common to all models to:`),n_=d(),jr=r("ul"),Ii=r("li"),s_=s("resize the input embeddings,"),i_=d(),Li=r("li"),d_=s("prune heads in the self-attention heads."),l_=d(),Ui=r("p"),c_=s("Class attributes (overridden by derived classes):"),m_=d(),et=r("ul"),Te=r("li"),Oi=r("strong"),p_=s("config_class"),h_=s(" ("),fn=r("a"),f_=s("PretrainedConfig"),u_=s(") \u2014 A subclass of "),un=r("a"),g_=s("PretrainedConfig"),__=s(` to use as configuration class for this model architecture.`),b_=d(),St=r("li"),Ni=r("strong"),v_=s("base_model_prefix"),y_=s(" ("),Bi=r("code"),$_=s("str"),w_=s(`) \u2014 A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.`),T_=d(),W=r("li"),Si=r("strong"),x_=s("main_input_name"),k_=s(" ("),Wi=r("code"),P_=s("str"),M_=s(") \u2014 The name of the principal input to the model (often "),Xi=r("code"),E_=s("input_ids"),F_=s(` for NLP models, `),Vi=r("code"),j_=s("pixel_values"),D_=s(" for vision models and "),Gi=r("code"),q_=s("input_values"),z_=s(" for speech models)."),A_=d(),xe=r("div"),u(Dr.$$.fragment),C_=d(),qr=r("p"),I_=s("Upload the model files to the \u{1F917} Model Hub while synchronizing a local clone of the repo in "),Ri=r("code"),L_=s("repo_path_or_name"),U_=s("."),O_=d(),u(Wt.$$.fragment),N_=d(),Xt=r("div"),u(zr.$$.fragment),B_=d(),Hi=r("p"),S_=s(`This is a thin wrapper that sets the model\u2019s loss output head as the loss if the user does not specify a loss function themselves.`),W_=d(),Vt=r("div"),u(Ar.$$.fragment),X_=d(),Cr=r("p"),V_=s("Creates a draft of a model card using the information available to the "),Yi=r("code"),G_=s("Trainer"),R_=s("."),H_=d(),N=r("div"),u(Ir.$$.fragment),Y_=d(),Ji=r("p"),J_=s("Instantiate a pretrained TF 2.0 model from a pre-trained model configuration."),K_=d(),Lr=r("p"),Z_=s("The warning "),Ki=r("em"),Q_=s("Weights from XXX not initialized from pretrained model"),eb=s(` means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task.`),tb=d(),Ur=r("p"),ob=s("The warning "),Zi=r("em"),rb=s("Weights from XXX not used in YYY"),ab=s(` means that the layer XXX is not used by YYY, therefore those weights are discarded.`),nb=d(),u(Gt.$$.fragment),sb=d(),u(Rt.$$.fragment),ib=d(),Ht=r("div"),u(Or.$$.fragment),db=d(),Qi=r("p"),lb=s("Dict of bias attached to an LM head. The key represents the name of the bias attribute."),cb=d(),Yt=r("div"),u(Nr.$$.fragment),mb=d(),ed=r("p"),pb=s("Returns the model\u2019s input embeddings layer."),hb=d(),Jt=r("div"),u(Br.$$.fragment),fb=d(),td=r("p"),ub=s("The LM Head layer. This method must be overwritten by all the models that have a lm head."),gb=d(),Kt=r("div"),u(Sr.$$.fragment),_b=d(),od=r("p"),bb=s("Returns the model\u2019s output embeddings"),vb=d(),Zt=r("div"),u(Wr.$$.fragment),yb=d(),rd=r("p"),$b=s(`Get the layer that handles a bias attribute in case the model has an LM head with weights tied to the embeddings`),wb=d(),Qt=r("div"),u(Xr.$$.fragment),Tb=d(),ad=r("p"),xb=s("Get the concatenated _prefix name of the bias from the model name to the parent layer"),kb=d(),eo=r("div"),u(Vr.$$.fragment),Pb=d(),nd=r("p"),Mb=s(`Loads a saved checkpoint (model weights and optimizer state) from a repo. Returns the current epoch count when the checkpoint was made.`),Eb=d(),to=r("div"),u(Gr.$$.fragment),Fb=d(),H=r("p"),jb=s("Wraps a HuggingFace "),Rr=r("a"),Db=s("Dataset"),qb=s(" as a "),sd=r("code"),zb=s("tf.data.Dataset"),Ab=s(` with collation and batching. This method is designed to create a \u201Cready-to-use\u201D dataset that can be passed directly to Keras methods like `),id=r("code"),Cb=s("fit()"),Ib=s(` without further modification. The method will drop columns from the dataset if they don\u2019t match input names for the model. If you want to specify the column names to return rather than using the names that match this model, we recommend using `),dd=r("code"),Lb=s("Dataset.to_tf_dataset()"),Ub=s(" instead."),Ob=d(),oo=r("div"),u(Hr.$$.fragment),Nb=d(),ld=r("p"),Bb=s("Prunes heads of the base model."),Sb=d(),ke=r("div"),u(Yr.$$.fragment),Wb=d(),cd=r("p"),Xb=s(`Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class.`),Vb=d(),u(ro.$$.fragment),Gb=d(),Pe=r("div"),u(Jr.$$.fragment),Rb=d(),Kr=r("p"),Hb=s("Resizes input token embeddings matrix of the model if "),md=r("code"),Yb=s("new_num_tokens != config.vocab_size"),Jb=s("."),Kb=d(),Zr=r("p"),Zb=s("Takes care of tying weights embeddings afterwards if the model class has a "),pd=r("code"),Qb=s("tie_weights()"),ev=s(" method."),tv=d(),ao=r("div"),u(Qr.$$.fragment),ov=d(),ea=r("p"),rv=s(`Save a model and its configuration file to a directory, so that it can be re-loaded using the `),gn=r("a"),av=s("from_pretrained()"),nv=s(" class method."),sv=d(),no=r("div"),u(ta.$$.fragment),iv=d(),hd=r("p"),dv=s("Method used for serving the model."),lv=d(),so=r("div"),u(oa.$$.fragment),cv=d(),fd=r("p"),mv=s("Prepare the output of the saved model. Each model must implement this function."),pv=d(),io=r("div"),u(ra.$$.fragment),hv=d(),ud=r("p"),fv=s("Set all the bias in the LM head."),uv=d(),lo=r("div"),u(aa.$$.fragment),gv=d(),gd=r("p"),_v=s("Set model\u2019s input embeddings"),bv=d(),co=r("div"),u(na.$$.fragment),vv=d(),_d=r("p"),yv=s("Set model\u2019s output embeddings"),$v=d(),mo=r("div"),u(sa.$$.fragment),wv=d(),ia=r("p"),Tv=s("A modification of Keras\u2019s default "),bd=r("code"),xv=s("train_step"),kv=s(` that correctly handles matching outputs to labels for our models and supports directly training on the loss output head. In addition, it ensures input keys are copied to the labels where appropriate. It will also copy label keys into the input dict when using the dummy loss, to ensure that they are available to the model during the forward pass.`),Pv=d(),po=r("div"),u(da.$$.fragment),Mv=d(),la=r("p"),Ev=s("A modification of Keras\u2019s default "),vd=r("code"),Fv=s("train_step"),jv=s(` that correctly handles matching outputs to labels for our models and supports directly training on the loss output head. In addition, it ensures input keys are copied to the labels where appropriate. It will also copy label keys into the input dict when using the dummy loss, to ensure that they are available to the model during the forward pass.`),dc=d(),tt=r("h2"),ho=r("a"),yd=r("span"),u(ca.$$.fragment),Dv=d(),$d=r("span"),qv=s("TFModelUtilsMixin"),lc=d(),ne=r("div"),u(ma.$$.fragment),zv=d(),pa=r("p"),Av=s("A few utilities for "),wd=r("code"),Cv=s("tf.keras.Model"),Iv=s(", to be used as a mixin."),Lv=d(),fo=r("div"),u(ha.$$.fragment),Uv=d(),Td=r("p"),Ov=s("Get the number of (optionally, trainable) parameters in the model."),cc=d(),ot=r("h2"),uo=r("a"),xd=r("span"),u(fa.$$.fragment),Nv=d(),kd=r("span"),Bv=s("FlaxPreTrainedModel"),mc=d(),q=r("div"),u(ua.$$.fragment),Sv=d(),Pd=r("p"),Wv=s("Base class for all models."),Xv=d(),_n=r("p"),bn=r("a"),Vv=s("FlaxPreTrainedModel"),Gv=s(` takes care of storing the configuration of the models and handles methods for loading, downloading and saving models.`),Rv=d(),Md=r("p"),Hv=s("Class attributes (overridden by derived classes):"),Yv=d(),rt=r("ul"),Me=r("li"),Ed=r("strong"),Jv=s("config_class"),Kv=s(" ("),vn=r("a"),Zv=s("PretrainedConfig"),Qv=s(") \u2014 A subclass of "),yn=r("a"),ey=s("PretrainedConfig"),ty=s(` to use as configuration class for this model architecture.`),oy=d(),go=r("li"),Fd=r("strong"),ry=s("base_model_prefix"),ay=s(" ("),jd=r("code"),ny=s("str"),sy=s(`) \u2014 A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.`),iy=d(),X=r("li"),Dd=r("strong"),dy=s("main_input_name"),ly=s(" ("),qd=r("code"),cy=s("str"),my=s(") \u2014 The name of the principal input to the model (often "),zd=r("code"),py=s("input_ids"),hy=s(` for NLP models, `),Ad=r("code"),fy=s("pixel_values"),uy=s(" for vision models and "),Cd=r("code"),gy=s("input_values"),_y=s(" for speech models)."),by=d(),Ee=r("div"),u(ga.$$.fragment),vy=d(),_a=r("p"),yy=s(`Upload the model checkpoint to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),Id=r("code"),$y=s("repo_path_or_name"),wy=s("."),Ty=d(),u(_o.$$.fragment),xy=d(),V=r("div"),u(ba.$$.fragment),ky=d(),Ld=r("p"),Py=s("Instantiate a pretrained flax model from a pre-trained model configuration."),My=d(),va=r("p"),Ey=s("The warning "),Ud=r("em"),Fy=s("Weights from XXX not initialized from pretrained model"),jy=s(` means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task.`),Dy=d(),ya=r("p"),qy=s("The warning "),Od=r("em"),zy=s("Weights from XXX not used in YYY"),Ay=s(` means that the layer XXX is not used by YYY, therefore those weights are discarded.`),Cy=d(),u(bo.$$.fragment),Iy=d(),Fe=r("div"),u($a.$$.fragment),Ly=d(),wa=r("p"),Uy=s("This is the same as "),Nd=r("code"),Oy=s("flax.serialization.from_bytes"),Ny=s(` (https:lax.readthedocs.io/en/latest/_modules/flax/serialization.html#from_bytes) but for a sharded checkpoint.`),By=d(),Bd=r("p"),Sy=s(`This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being loaded in the model.`),Wy=d(),je=r("div"),u(Ta.$$.fragment),Xy=d(),Sd=r("p"),Vy=s(`Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class.`),Gy=d(),u(vo.$$.fragment),Ry=d(),yo=r("div"),u(xa.$$.fragment),Hy=d(),ka=r("p"),Yy=s(`Save a model and its configuration file to a directory, so that it can be re-loaded using the `),Wd=r("code"),Jy=s("[from_pretrained()](/docs/transformers/pr_19429/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained)"),Ky=s(" class method"),Zy=d(),te=r("div"),u(Pa.$$.fragment),Qy=d(),Y=r("p"),e1=s("Cast the floating-point "),Xd=r("code"),t1=s("params"),o1=s(" to "),Vd=r("code"),r1=s("jax.numpy.bfloat16"),a1=s(". This returns a new "),Gd=r("code"),n1=s("params"),s1=s(` tree and does not cast the `),Rd=r("code"),i1=s("params"),d1=s(" in place."),l1=d(),Hd=r("p"),c1=s(`This method can be used on TPU to explicitly convert the model parameters to bfloat16 precision to do full half-precision training or to save weights in bfloat16 for inference in order to save memory and improve speed.`),m1=d(),u($o.$$.fragment),p1=d(),oe=r("div"),u(Ma.$$.fragment),h1=d(),J=r("p"),f1=s("Cast the floating-point "),Yd=r("code"),u1=s("parmas"),g1=s(" to "),Jd=r("code"),_1=s("jax.numpy.float16"),b1=s(". This returns a new "),Kd=r("code"),v1=s("params"),y1=s(` tree and does not cast the `),Zd=r("code"),$1=s("params"),w1=s(" in place."),T1=d(),Qd=r("p"),x1=s(`This method can be used on GPU to explicitly convert the model parameters to float16 precision to do full half-precision training or to save weights in float16 for inference in order to save memory and improve speed.`),k1=d(),u(wo.$$.fragment),P1=d(),De=r("div"),u(Ea.$$.fragment),M1=d(),K=r("p"),E1=s("Cast the floating-point "),el=r("code"),F1=s("parmas"),j1=s(" to "),tl=r("code"),D1=s("jax.numpy.float32"),q1=s(`. This method can be used to explicitly convert the model parameters to fp32 precision. This returns a new `),ol=r("code"),z1=s("params"),A1=s(" tree and does not cast the "),rl=r("code"),C1=s("params"),I1=s(" in place."),L1=d(),u(To.$$.fragment),pc=d(),at=r("h2"),xo=r("a"),al=r("span"),u(Fa.$$.fragment),U1=d(),nl=r("span"),O1=s("Pushing to the Hub"),hc=d(),se=r("div"),u(ja.$$.fragment),N1=d(),sl=r("p"),B1=s("A Mixin containing the functionality to push a model or tokenizer to the hub."),S1=d(),qe=r("div"),u(Da.$$.fragment),W1=d(),qa=r("p"),X1=s(`Upload the {object_files} to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),il=r("code"),V1=s("repo_path_or_name"),G1=s("."),R1=d(),u(ko.$$.fragment),fc=d(),nt=r("h2"),Po=r("a"),dl=r("span"),u(za.$$.fragment),H1=d(),ll=r("span"),Y1=s("Sharded checkpoints"),uc=d(),ie=r("div"),u(Aa.$$.fragment),J1=d(),Ca=r("p"),K1=s(`This is the same as `),Ia=r("a"),cl=r("code"),Z1=s("torch.nn.Module.load_state_dict"),Q1=s(` but for a sharded checkpoint.`),e2=d(),ml=r("p"),t2=s(`This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being loaded in the model.`),this.h()},l(o){const h=ax('[data-svelte="svelte-1phssyn"]',document.head);p=a(h,"META",{name:!0,content:!0}),h.forEach(t),x=l(o),w=a(o,"H1",{class:!0});var La=n(w);f=a(La,"A",{id:!0,class:!0,href:!0});var pl=n(f);k=a(pl,"SPAN",{});var hl=n(k);g(c.$$.fragment,hl),hl.forEach(t),pl.forEach(t),T=l(La),ae=a(La,"SPAN",{});var fl=n(ae);Bm=i(fl,"Models"),fl.forEach(t),La.forEach(t),jl=l(o),Z=a(o,"P",{});var de=n(Z);Sm=i(de,"The base classes "),Wa=a(de,"A",{href:!0});var ul=n(Wa);Wm=i(ul,"PreTrainedModel"),ul.forEach(t),Xm=i(de,", "),Xa=a(de,"A",{href:!0});var gl=n(Xa);Vm=i(gl,"TFPreTrainedModel"),gl.forEach(t),Gm=i(de,`, and `),Va=a(de,"A",{href:!0});var _l=n(Va);Rm=i(_l,"FlaxPreTrainedModel"),_l.forEach(t),Hm=i(de,` implement the common methods for loading/saving a model either from a local file or directory, or from a pretrained model configuration provided by the library (downloaded from HuggingFace\u2019s AWS S3 repository).`),de.forEach(t),Dl=l(o),We=a(o,"P",{});var Mo=n(We);Ga=a(Mo,"A",{href:!0});var bl=n(Ga);Ym=i(bl,"PreTrainedModel"),bl.forEach(t),Jm=i(Mo," and "),Ra=a(Mo,"A",{href:!0});var vl=n(Ra);Km=i(vl,"TFPreTrainedModel"),vl.forEach(t),Zm=i(Mo,` also implement a few methods which are common among all the models to:`),Mo.forEach(t),ql=l(o),st=a(o,"UL",{});var Ua=n(st);Yn=a(Ua,"LI",{});var yl=n(Yn);Qm=i(yl,"resize the input token embeddings when new tokens are added to the vocabulary"),yl.forEach(t),ep=l(Ua),Jn=a(Ua,"LI",{});var $l=n(Jn);tp=i($l,"prune the attention heads of the model."),$l.forEach(t),Ua.forEach(t),zl=l(o),O=a(o,"P",{});var B=n(O);op=i(B,"The other methods that are common to each model are defined in "),Ha=a(B,"A",{href:!0});var wl=n(Ha);rp=i(wl,"ModuleUtilsMixin"),wl.forEach(t),ap=i(B,` (for the PyTorch models) and `),Kn=a(B,"CODE",{});var i2=n(Kn);np=i(i2,"~modeling_tf_utils.TFModuleUtilsMixin"),i2.forEach(t),sp=i(B,` (for the TensorFlow models) or for text generation, `),Ya=a(B,"A",{href:!0});var d2=n(Ya);ip=i(d2,"GenerationMixin"),d2.forEach(t),dp=i(B,` (for the PyTorch models), `),Ja=a(B,"A",{href:!0});var l2=n(Ja);lp=i(l2,"TFGenerationMixin"),l2.forEach(t),cp=i(B,` (for the TensorFlow models) and `),Ka=a(B,"A",{href:!0});var c2=n(Ka);mp=i(c2,"FlaxGenerationMixin"),c2.forEach(t),pp=i(B," (for the Flax/JAX models)."),B.forEach(t),Al=l(o),Xe=a(o,"H2",{class:!0});var _c=n(Xe);it=a(_c,"A",{id:!0,class:!0,href:!0});var m2=n(it);Zn=a(m2,"SPAN",{});var p2=n(Zn);g(qo.$$.fragment,p2),p2.forEach(t),m2.forEach(t),hp=l(_c),Qn=a(_c,"SPAN",{});var h2=n(Qn);fp=i(h2,"PreTrainedModel"),h2.forEach(t),_c.forEach(t),Cl=l(o),F=a(o,"DIV",{class:!0});var j=n(F);g(zo.$$.fragment,j),up=l(j),es=a(j,"P",{});var f2=n(es);gp=i(f2,"Base class for all models."),f2.forEach(t),_p=l(j),Za=a(j,"P",{});var o2=n(Za);Qa=a(o2,"A",{href:!0});var u2=n(Qa);bp=i(u2,"PreTrainedModel"),u2.forEach(t),vp=i(o2,` takes care of storing the configuration of the models and handles methods for loading, downloading and saving models as well as a few methods common to all models to:`),o2.forEach(t),yp=l(j),Ao=a(j,"UL",{});var bc=n(Ao);ts=a(bc,"LI",{});var g2=n(ts);$p=i(g2,"resize the input embeddings,"),g2.forEach(t),wp=l(bc),os=a(bc,"LI",{});var _2=n(os);Tp=i(_2,"prune heads in the self-attention heads."),_2.forEach(t),bc.forEach(t),xp=l(j),rs=a(j,"P",{});var b2=n(rs);kp=i(b2,"Class attributes (overridden by derived classes):"),b2.forEach(t),Pp=l(j),G=a(j,"UL",{});var ze=n(G);as=a(ze,"LI",{});var v2=n(as);pe=a(v2,"P",{});var Oa=n(pe);ns=a(Oa,"STRONG",{});var y2=n(ns);Mp=i(y2,"config_class"),y2.forEach(t),Ep=i(Oa," ("),en=a(Oa,"A",{href:!0});var $2=n(en);Fp=i($2,"PretrainedConfig"),$2.forEach(t),jp=i(Oa,") \u2014 A subclass of "),tn=a(Oa,"A",{href:!0});var w2=n(tn);Dp=i(w2,"PretrainedConfig"),w2.forEach(t),qp=i(Oa,` to use as configuration class for this model architecture.`),Oa.forEach(t),v2.forEach(t),zp=l(ze),Co=a(ze,"LI",{});var vc=n(Co);he=a(vc,"P",{});var Na=n(he);ss=a(Na,"STRONG",{});var T2=n(ss);Ap=i(T2,"load_tf_weights"),T2.forEach(t),Cp=i(Na," ("),is=a(Na,"CODE",{});var x2=n(is);Ip=i(x2,"Callable"),x2.forEach(t),Lp=i(Na,") \u2014 A python "),ds=a(Na,"EM",{});var k2=n(ds);Up=i(k2,"method"),k2.forEach(t),Op=i(Na,` for loading a TensorFlow checkpoint in a PyTorch model, taking as arguments:`),Na.forEach(t),Np=l(vc),Ve=a(vc,"UL",{});var $n=n(Ve);dt=a($n,"LI",{});var Tl=n(dt);ls=a(Tl,"STRONG",{});var P2=n(ls);Bp=i(P2,"model"),P2.forEach(t),Sp=i(Tl," ("),on=a(Tl,"A",{href:!0});var M2=n(on);Wp=i(M2,"PreTrainedModel"),M2.forEach(t),Xp=i(Tl,") \u2014 An instance of the model on which to load the TensorFlow checkpoint."),Tl.forEach(t),Vp=l($n),lt=a($n,"LI",{});var xl=n(lt);cs=a(xl,"STRONG",{});var E2=n(cs);Gp=i(E2,"config"),E2.forEach(t),Rp=i(xl," ("),ms=a(xl,"CODE",{});var F2=n(ms);Hp=i(F2,"PreTrainedConfig"),F2.forEach(t),Yp=i(xl,") \u2014 An instance of the configuration associated to the model."),xl.forEach(t),Jp=l($n),ct=a($n,"LI",{});var kl=n(ct);ps=a(kl,"STRONG",{});var j2=n(ps);Kp=i(j2,"path"),j2.forEach(t),Zp=i(kl," ("),hs=a(kl,"CODE",{});var D2=n(hs);Qp=i(D2,"str"),D2.forEach(t),eh=i(kl,") \u2014 A path to the TensorFlow checkpoint."),kl.forEach(t),$n.forEach(t),vc.forEach(t),th=l(ze),fs=a(ze,"LI",{});var q2=n(fs);mt=a(q2,"P",{});var Pl=n(mt);us=a(Pl,"STRONG",{});var z2=n(us);oh=i(z2,"base_model_prefix"),z2.forEach(t),rh=i(Pl," ("),gs=a(Pl,"CODE",{});var A2=n(gs);ah=i(A2,"str"),A2.forEach(t),nh=i(Pl,`) \u2014 A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.`),Pl.forEach(t),q2.forEach(t),sh=l(ze),_s=a(ze,"LI",{});var C2=n(_s);pt=a(C2,"P",{});var Ml=n(pt);bs=a(Ml,"STRONG",{});var I2=n(bs);ih=i(I2,"is_parallelizable"),I2.forEach(t),dh=i(Ml," ("),vs=a(Ml,"CODE",{});var L2=n(vs);lh=i(L2,"bool"),L2.forEach(t),ch=i(Ml,") \u2014 A flag indicating whether this model supports model parallelization."),Ml.forEach(t),C2.forEach(t),mh=l(ze),ys=a(ze,"LI",{});var U2=n(ys);S=a(U2,"P",{});var le=n(S);$s=a(le,"STRONG",{});var O2=n($s);ph=i(O2,"main_input_name"),O2.forEach(t),hh=i(le," ("),ws=a(le,"CODE",{});var N2=n(ws);fh=i(N2,"str"),N2.forEach(t),uh=i(le,") \u2014 The name of the principal input to the model (often "),Ts=a(le,"CODE",{});var B2=n(Ts);gh=i(B2,"input_ids"),B2.forEach(t),_h=i(le,` for NLP models, `),xs=a(le,"CODE",{});var S2=n(xs);bh=i(S2,"pixel_values"),S2.forEach(t),vh=i(le," for vision models and "),ks=a(le,"CODE",{});var W2=n(ks);yh=i(W2,"input_values"),W2.forEach(t),$h=i(le," for speech models)."),le.forEach(t),U2.forEach(t),ze.forEach(t),wh=l(j),fe=a(j,"DIV",{class:!0});var wn=n(fe);g(Io.$$.fragment,wn),Th=l(wn),Lo=a(wn,"P",{});var yc=n(Lo);xh=i(yc,`Upload the model file to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),Ps=a(yc,"CODE",{});var X2=n(Ps);kh=i(X2,"repo_path_or_name"),X2.forEach(t),Ph=i(yc,"."),yc.forEach(t),Mh=l(wn),g(ht.$$.fragment,wn),wn.forEach(t),Eh=l(j),z=a(j,"DIV",{class:!0});var A=n(z);g(Uo.$$.fragment,A),Fh=l(A),Ms=a(A,"P",{});var V2=n(Ms);jh=i(V2,"Instantiate a pretrained pytorch model from a pre-trained model configuration."),V2.forEach(t),Dh=l(A),Ge=a(A,"P",{});var Tn=n(Ge);qh=i(Tn,"The model is set in evaluation mode by default using "),Es=a(Tn,"CODE",{});var G2=n(Es);zh=i(G2,"model.eval()"),G2.forEach(t),Ah=i(Tn,` (Dropout modules are deactivated). To train the model, you should first set it back in training mode with `),Fs=a(Tn,"CODE",{});var R2=n(Fs);Ch=i(R2,"model.train()"),R2.forEach(t),Ih=i(Tn,"."),Tn.forEach(t),Lh=l(A),Oo=a(A,"P",{});var $c=n(Oo);Uh=i($c,"The warning "),js=a($c,"EM",{});var H2=n(js);Oh=i(H2,"Weights from XXX not initialized from pretrained model"),H2.forEach(t),Nh=i($c,` means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task.`),$c.forEach(t),Bh=l(A),No=a(A,"P",{});var wc=n(No);Sh=i(wc,"The warning "),Ds=a(wc,"EM",{});var Y2=n(Ds);Wh=i(Y2,"Weights from XXX not used in YYY"),Y2.forEach(t),Xh=i(wc,` means that the layer XXX is not used by YYY, therefore those weights are discarded.`),wc.forEach(t),Vh=l(A),g(ft.$$.fragment,A),Gh=l(A),g(ut.$$.fragment,A),Rh=l(A),g(gt.$$.fragment,A),Hh=l(A),qs=a(A,"UL",{});var J2=n(qs);rn=a(J2,"LI",{});var r2=n(rn);zs=a(r2,"CODE",{});var K2=n(zs);Yh=i(K2,"low_cpu_mem_usage"),K2.forEach(t),Jh=i(r2," algorithm:"),r2.forEach(t),J2.forEach(t),Kh=l(A),As=a(A,"P",{});var Z2=n(As);Zh=i(Z2,"This is an experimental function that loads the model using ~1x model size CPU memory"),Z2.forEach(t),Qh=l(A),Cs=a(A,"P",{});var Q2=n(Cs);ef=i(Q2,"Here is how it works:"),Q2.forEach(t),tf=l(A),R=a(A,"OL",{});var Ae=n(R);Is=a(Ae,"LI",{});var e$=n(Is);of=i(e$,"save which state_dict keys we have"),e$.forEach(t),rf=l(Ae),Ls=a(Ae,"LI",{});var t$=n(Ls);af=i(t$,"drop state_dict before the model is created, since the latter takes 1x model size CPU memory"),t$.forEach(t),nf=l(Ae),Us=a(Ae,"LI",{});var o$=n(Us);sf=i(o$,`after the model has been instantiated switch to the meta device all params/buffers that are going to be replaced from the loaded state_dict`),o$.forEach(t),df=l(Ae),Os=a(Ae,"LI",{});var r$=n(Os);lf=i(r$,"load state_dict 2nd time"),r$.forEach(t),cf=l(Ae),Ns=a(Ae,"LI",{});var a$=n(Ns);mf=i(a$,"replace the params/buffers from the state_dict"),a$.forEach(t),Ae.forEach(t),pf=l(A),Bs=a(A,"P",{});var n$=n(Bs);hf=i(n$,"Currently, it can\u2019t handle deepspeed ZeRO stage 3 and ignores loading errors"),n$.forEach(t),A.forEach(t),ff=l(j),_t=a(j,"DIV",{class:!0});var Tc=n(_t);g(Bo.$$.fragment,Tc),uf=l(Tc),Ss=a(Tc,"P",{});var s$=n(Ss);gf=i(s$,"Returns the model\u2019s input embeddings."),s$.forEach(t),Tc.forEach(t),_f=l(j),bt=a(j,"DIV",{class:!0});var xc=n(bt);g(So.$$.fragment,xc),bf=l(xc),an=a(xc,"P",{});var a2=n(an);vf=i(a2,`Get the memory footprint of a model. This will return the memory footprint of the current model in bytes. Useful to benchmark the memory footprint of the current model and design some tests. Solution inspired from the PyTorch discussions: `),Wo=a(a2,"A",{href:!0,rel:!0});var i$=n(Wo);yf=i(i$,"https://discuss.pytorch.org/t/gpu-memory-that-model-uses/56822/2"),i$.forEach(t),a2.forEach(t),xc.forEach(t),$f=l(j),vt=a(j,"DIV",{class:!0});var kc=n(vt);g(Xo.$$.fragment,kc),wf=l(kc),Ws=a(kc,"P",{});var d$=n(Ws);Tf=i(d$,"Returns the model\u2019s output embeddings."),d$.forEach(t),kc.forEach(t),xf=l(j),ue=a(j,"DIV",{class:!0});var xn=n(ue);g(Vo.$$.fragment,xn),kf=l(xn),Xs=a(xn,"P",{});var l$=n(Xs);Pf=i(l$,"Deactivates gradient checkpointing for the current model."),l$.forEach(t),Mf=l(xn),Vs=a(xn,"P",{});var c$=n(Vs);Ef=i(c$,`Note that in other frameworks this feature can be referred to as \u201Cactivation checkpointing\u201D or \u201Ccheckpoint activations\u201D.`),c$.forEach(t),xn.forEach(t),Ff=l(j),ge=a(j,"DIV",{class:!0});var kn=n(ge);g(Go.$$.fragment,kn),jf=l(kn),Gs=a(kn,"P",{});var m$=n(Gs);Df=i(m$,"Activates gradient checkpointing for the current model."),m$.forEach(t),qf=l(kn),Rs=a(kn,"P",{});var p$=n(Rs);zf=i(p$,`Note that in other frameworks this feature can be referred to as \u201Cactivation checkpointing\u201D or \u201Ccheckpoint activations\u201D.`),p$.forEach(t),kn.forEach(t),Af=l(j),yt=a(j,"DIV",{class:!0});var Pc=n(yt);g(Ro.$$.fragment,Pc),Cf=l(Pc),Hs=a(Pc,"P",{});var h$=n(Hs);If=i(h$,"If needed prunes and maybe initializes weights."),h$.forEach(t),Pc.forEach(t),Lf=l(j),$t=a(j,"DIV",{class:!0});var Mc=n($t);g(Ho.$$.fragment,Mc),Uf=l(Mc),Ys=a(Mc,"P",{});var f$=n(Ys);Of=i(f$,`A method executed at the end of each Transformer model initialization, to execute code that needs the model\u2019s modules properly initialized (such as weight initialization).`),f$.forEach(t),Mc.forEach(t),Nf=l(j),wt=a(j,"DIV",{class:!0});var Ec=n(wt);g(Yo.$$.fragment,Ec),Bf=l(Ec),Js=a(Ec,"P",{});var u$=n(Js);Sf=i(u$,"Prunes heads of the base model."),u$.forEach(t),Ec.forEach(t),Wf=l(j),_e=a(j,"DIV",{class:!0});var Pn=n(_e);g(Jo.$$.fragment,Pn),Xf=l(Pn),Ks=a(Pn,"P",{});var g$=n(Ks);Vf=i(g$,`Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class.`),g$.forEach(t),Gf=l(Pn),g(Tt.$$.fragment,Pn),Pn.forEach(t),Rf=l(j),be=a(j,"DIV",{class:!0});var Mn=n(be);g(Ko.$$.fragment,Mn),Hf=l(Mn),Zo=a(Mn,"P",{});var Fc=n(Zo);Yf=i(Fc,"Resizes input token embeddings matrix of the model if "),Zs=a(Fc,"CODE",{});var _$=n(Zs);Jf=i(_$,"new_num_tokens != config.vocab_size"),_$.forEach(t),Kf=i(Fc,"."),Fc.forEach(t),Zf=l(Mn),Qo=a(Mn,"P",{});var jc=n(Qo);Qf=i(jc,"Takes care of tying weights embeddings afterwards if the model class has a "),Qs=a(jc,"CODE",{});var b$=n(Qs);eu=i(b$,"tie_weights()"),b$.forEach(t),tu=i(jc," method."),jc.forEach(t),Mn.forEach(t),ou=l(j),xt=a(j,"DIV",{class:!0});var Dc=n(xt);g(er.$$.fragment,Dc),ru=l(Dc),tr=a(Dc,"P",{});var qc=n(tr);au=i(qc,`Save a model and its configuration file to a directory, so that it can be re-loaded using the `),nn=a(qc,"A",{href:!0});var v$=n(nn);nu=i(v$,"from_pretrained()"),v$.forEach(t),su=i(qc," class method."),qc.forEach(t),Dc.forEach(t),iu=l(j),kt=a(j,"DIV",{class:!0});var zc=n(kt);g(or.$$.fragment,zc),du=l(zc),ei=a(zc,"P",{});var y$=n(ei);lu=i(y$,"Set model\u2019s input embeddings."),y$.forEach(t),zc.forEach(t),cu=l(j),ve=a(j,"DIV",{class:!0});var En=n(ve);g(rr.$$.fragment,En),mu=l(En),ti=a(En,"P",{});var $$=n(ti);pu=i($$,"Tie the weights between the input embeddings and the output embeddings."),$$.forEach(t),hu=l(En),ar=a(En,"P",{});var Ac=n(ar);fu=i(Ac,"If the "),oi=a(Ac,"CODE",{});var w$=n(oi);uu=i(w$,"torchscript"),w$.forEach(t),gu=i(Ac,` flag is set in the configuration, can\u2019t handle parameter sharing so we are cloning the weights instead.`),Ac.forEach(t),En.forEach(t),j.forEach(t),Il=l(o),sn=a(o,"A",{id:!0}),n(sn).forEach(t),Ll=l(o),Re=a(o,"H3",{class:!0});var Cc=n(Re);Pt=a(Cc,"A",{id:!0,class:!0,href:!0});var T$=n(Pt);ri=a(T$,"SPAN",{});var x$=n(ri);g(nr.$$.fragment,x$),x$.forEach(t),T$.forEach(t),_u=l(Cc),ai=a(Cc,"SPAN",{});var k$=n(ai);bu=i(k$,"Large model loading"),k$.forEach(t),Cc.forEach(t),Ul=l(o),ye=a(o,"P",{});var Fn=n(ye);vu=i(Fn,"In Transformers 4.20.0, the "),dn=a(Fn,"A",{href:!0});var P$=n(dn);yu=i(P$,"from_pretrained()"),P$.forEach(t),$u=i(Fn," method has been reworked to accommodate large models using "),sr=a(Fn,"A",{href:!0,rel:!0});var M$=n(sr);wu=i(M$,"Accelerate"),M$.forEach(t),Tu=i(Fn,". This requires Accelerate >= 0.9.0 and PyTorch >= 1.9.0. Instead of creating the full model, then loading the pretrained weights inside it (which takes twice the size of the model in RAM, one for the randomly initialized model, one for the weights), there is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded."),Fn.forEach(t),Ol=l(o),Mt=a(o,"P",{});var Ic=n(Mt);xu=i(Ic,"This option can be activated with "),ni=a(Ic,"CODE",{});var E$=n(ni);ku=i(E$,"low_cpu_mem_usage=True"),E$.forEach(t),Pu=i(Ic,". The model is first created on the Meta device (with empty weights) and the state dict is then loaded inside it (shard by shard in the case of a sharded checkpoint). This way the maximum RAM used is the full size of the model only."),Ic.forEach(t),Nl=l(o),g(ir.$$.fragment,o),Bl=l(o),Et=a(o,"P",{});var Lc=n(Et);Mu=i(Lc,"Moreover, you can directly place the model on different devices if it doesn\u2019t fully fit in RAM (only works for inference for now). With "),si=a(Lc,"CODE",{});var F$=n(si);Eu=i(F$,'device_map="auto"'),F$.forEach(t),Fu=i(Lc,", Accelerate will determine where to put each layer to maximize the use of your fastest devices (GPUs) and offload the rest on the CPU, or even the hard drive if you don\u2019t have enough GPU RAM (or CPU RAM). Even if the model is split across several devices, it will run as you would normally expect."),Lc.forEach(t),Sl=l(o),Q=a(o,"P",{});var Eo=n(Q);ju=i(Eo,"When passing a "),ii=a(Eo,"CODE",{});var j$=n(ii);Du=i(j$,"device_map"),j$.forEach(t),qu=i(Eo,", "),di=a(Eo,"CODE",{});var D$=n(di);zu=i(D$,"low_cpu_mem_usage"),D$.forEach(t),Au=i(Eo," is automatically set to "),li=a(Eo,"CODE",{});var q$=n(li);Cu=i(q$,"True"),q$.forEach(t),Iu=i(Eo,", so you don\u2019t need to specify it:"),Eo.forEach(t),Wl=l(o),g(dr.$$.fragment,o),Xl=l(o),Ft=a(o,"P",{});var Uc=n(Ft);Lu=i(Uc,"You can inspect how the model was split across devices by looking at its "),ci=a(Uc,"CODE",{});var z$=n(ci);Uu=i(z$,"hf_device_map"),z$.forEach(t),Ou=i(Uc," attribute:"),Uc.forEach(t),Vl=l(o),g(lr.$$.fragment,o),Gl=l(o),g(cr.$$.fragment,o),Rl=l(o),ln=a(o,"P",{});var A$=n(ln);Nu=i(A$,"You can also write your own device map following the same format (a dictionary layer name to device). It should map all parameters of the model to a given device, but you don\u2019t have to detail where all the submosules of one layer go if that layer is entirely on the same device. For instance, the following device map would work properly for T0pp (as long as you have the GPU memory):"),A$.forEach(t),Hl=l(o),g(mr.$$.fragment,o),Yl=l(o),jt=a(o,"P",{});var Oc=n(jt);Bu=i(Oc,"Another way to minimize the memory impact of your model is to instantiate it at a lower precision dtype (like "),mi=a(Oc,"CODE",{});var C$=n(mi);Su=i(C$,"torch.float16"),C$.forEach(t),Wu=i(Oc,") or use direct quantization techniques as described below."),Oc.forEach(t),Jl=l(o),He=a(o,"H3",{class:!0});var Nc=n(He);Dt=a(Nc,"A",{id:!0,class:!0,href:!0});var I$=n(Dt);pi=a(I$,"SPAN",{});var L$=n(pi);g(pr.$$.fragment,L$),L$.forEach(t),I$.forEach(t),Xu=l(Nc),hi=a(Nc,"SPAN",{});var U$=n(hi);Vu=i(U$,"Model Instantiation dtype"),U$.forEach(t),Nc.forEach(t),Kl=l(o),ee=a(o,"P",{});var Fo=n(ee);Gu=i(Fo,"Under Pytorch a model normally gets instantiated with "),fi=a(Fo,"CODE",{});var O$=n(fi);Ru=i(O$,"torch.float32"),O$.forEach(t),Hu=i(Fo,` format. This can be an issue if one tries to load a model whose weights are in fp16, since it\u2019d require twice as much memory. To overcome this limitation, you can either explicitly pass the desired `),ui=a(Fo,"CODE",{});var N$=n(ui);Yu=i(N$,"dtype"),N$.forEach(t),Ju=i(Fo," using "),gi=a(Fo,"CODE",{});var B$=n(gi);Ku=i(B$,"torch_dtype"),B$.forEach(t),Zu=i(Fo," argument:"),Fo.forEach(t),Zl=l(o),g(hr.$$.fragment,o),Ql=l(o),$e=a(o,"P",{});var jn=n($e);Qu=i(jn,"or, if you want the model to always load in the most optimal memory pattern, you can use the special value "),_i=a(jn,"CODE",{});var S$=n(_i);eg=i(S$,'"auto"'),S$.forEach(t),tg=i(jn,`, and then `),bi=a(jn,"CODE",{});var W$=n(bi);og=i(W$,"dtype"),W$.forEach(t),rg=i(jn," will be automatically derived from the model\u2019s weights:"),jn.forEach(t),ec=l(o),g(fr.$$.fragment,o),tc=l(o),qt=a(o,"P",{});var Bc=n(qt);ag=i(Bc,"Models instantiated from scratch can also be told which "),vi=a(Bc,"CODE",{});var X$=n(vi);ng=i(X$,"dtype"),X$.forEach(t),sg=i(Bc," to use with:"),Bc.forEach(t),oc=l(o),g(ur.$$.fragment,o),rc=l(o),cn=a(o,"P",{});var V$=n(cn);ig=i(V$,"Due to Pytorch design, this functionality is only available for floating dtypes."),V$.forEach(t),ac=l(o),Ye=a(o,"H2",{class:!0});var Sc=n(Ye);zt=a(Sc,"A",{id:!0,class:!0,href:!0});var G$=n(zt);yi=a(G$,"SPAN",{});var R$=n(yi);g(gr.$$.fragment,R$),R$.forEach(t),G$.forEach(t),dg=l(Sc),$i=a(Sc,"SPAN",{});var H$=n($i);lg=i(H$,"ModuleUtilsMixin"),H$.forEach(t),Sc.forEach(t),nc=l(o),I=a(o,"DIV",{class:!0});var L=n(I);g(_r.$$.fragment,L),cg=l(L),br=a(L,"P",{});var Wc=n(br);mg=i(Wc,"A few utilities for "),wi=a(Wc,"CODE",{});var Y$=n(wi);pg=i(Y$,"torch.nn.Modules"),Y$.forEach(t),hg=i(Wc,", to be used as a mixin."),Wc.forEach(t),fg=l(L),we=a(L,"DIV",{class:!0});var Dn=n(we);g(vr.$$.fragment,Dn),ug=l(Dn),Ti=a(Dn,"P",{});var J$=n(Ti);gg=i(J$,"Add a memory hook before and after each sub-module forward pass to record increase in memory consumption."),J$.forEach(t),_g=l(Dn),Je=a(Dn,"P",{});var qn=n(Je);bg=i(qn,"Increase in memory consumption is stored in a "),xi=a(qn,"CODE",{});var K$=n(xi);vg=i(K$,"mem_rss_diff"),K$.forEach(t),yg=i(qn,` attribute for each module and can be reset to zero with `),ki=a(qn,"CODE",{});var Z$=n(ki);$g=i(Z$,"model.reset_memory_hooks_state()"),Z$.forEach(t),wg=i(qn,"."),qn.forEach(t),Dn.forEach(t),Tg=l(L),At=a(L,"DIV",{class:!0});var Xc=n(At);g(yr.$$.fragment,Xc),xg=l(Xc),Pi=a(Xc,"P",{});var Q$=n(Pi);kg=i(Q$,"Helper function to estimate the total number of tokens from the model inputs."),Q$.forEach(t),Xc.forEach(t),Pg=l(L),Ct=a(L,"DIV",{class:!0});var Vc=n(Ct);g($r.$$.fragment,Vc),Mg=l(Vc),Ke=a(Vc,"P",{});var zn=n(Ke);Eg=i(zn,`Get number of (optionally, non-embeddings) floating-point operations for the forward and backward passes of a batch with this transformer model. Default approximation neglects the quadratic dependency on the number of tokens (valid if `),Mi=a(zn,"CODE",{});var ew=n(Mi);Fg=i(ew,"12 * d_model << sequence_length"),ew.forEach(t),jg=i(zn,") as laid out in "),wr=a(zn,"A",{href:!0,rel:!0});var tw=n(wr);Dg=i(tw,`this paper`),tw.forEach(t),qg=i(zn,` section 2.1. Should be overridden for transformers with parameter re-use e.g. Albert or Universal Transformers, or if doing long-range modeling with very high sequence lengths.`),zn.forEach(t),Vc.forEach(t),zg=l(L),It=a(L,"DIV",{class:!0});var Gc=n(It);g(Tr.$$.fragment,Gc),Ag=l(Gc),Ei=a(Gc,"P",{});var ow=n(Ei);Cg=i(ow,"Makes broadcastable attention and causal masks so that future and masked tokens are ignored."),ow.forEach(t),Gc.forEach(t),Ig=l(L),Lt=a(L,"DIV",{class:!0});var Rc=n(Lt);g(xr.$$.fragment,Rc),Lg=l(Rc),Fi=a(Rc,"P",{});var rw=n(Fi);Ug=i(rw,"Prepare the head mask if needed."),rw.forEach(t),Rc.forEach(t),Og=l(L),Ut=a(L,"DIV",{class:!0});var Hc=n(Ut);g(kr.$$.fragment,Hc),Ng=l(Hc),ji=a(Hc,"P",{});var aw=n(ji);Bg=i(aw,"Invert an attention mask (e.g., switches 0. and 1.)."),aw.forEach(t),Hc.forEach(t),Sg=l(L),Ot=a(L,"DIV",{class:!0});var Yc=n(Ot);g(Pr.$$.fragment,Yc),Wg=l(Yc),Di=a(Yc,"P",{});var nw=n(Di);Xg=i(nw,"Get number of (optionally, trainable or non-embeddings) parameters in the module."),nw.forEach(t),Yc.forEach(t),Vg=l(L),Nt=a(L,"DIV",{class:!0});var Jc=n(Nt);g(Mr.$$.fragment,Jc),Gg=l(Jc),Ze=a(Jc,"P",{});var An=n(Ze);Rg=i(An,"Reset the "),qi=a(An,"CODE",{});var sw=n(qi);Hg=i(sw,"mem_rss_diff"),sw.forEach(t),Yg=i(An," attribute of each module (see "),mn=a(An,"A",{href:!0});var iw=n(mn);Jg=i(iw,"add_memory_hooks()"),iw.forEach(t),Kg=i(An,")."),An.forEach(t),Jc.forEach(t),L.forEach(t),sc=l(o),Qe=a(o,"H2",{class:!0});var Kc=n(Qe);Bt=a(Kc,"A",{id:!0,class:!0,href:!0});var dw=n(Bt);zi=a(dw,"SPAN",{});var lw=n(zi);g(Er.$$.fragment,lw),lw.forEach(t),dw.forEach(t),Zg=l(Kc),Ai=a(Kc,"SPAN",{});var cw=n(Ai);Qg=i(cw,"TFPreTrainedModel"),cw.forEach(t),Kc.forEach(t),ic=l(o),P=a(o,"DIV",{class:!0});var E=n(P);g(Fr.$$.fragment,E),e_=l(E),Ci=a(E,"P",{});var mw=n(Ci);t_=i(mw,"Base class for all TF models."),mw.forEach(t),o_=l(E),pn=a(E,"P",{});var n2=n(pn);hn=a(n2,"A",{href:!0});var pw=n(hn);r_=i(pw,"TFPreTrainedModel"),pw.forEach(t),a_=i(n2,` takes care of storing the configuration of the models and handles methods for loading, downloading and saving models as well as a few methods common to all models to:`),n2.forEach(t),n_=l(E),jr=a(E,"UL",{});var Zc=n(jr);Ii=a(Zc,"LI",{});var hw=n(Ii);s_=i(hw,"resize the input embeddings,"),hw.forEach(t),i_=l(Zc),Li=a(Zc,"LI",{});var fw=n(Li);d_=i(fw,"prune heads in the self-attention heads."),fw.forEach(t),Zc.forEach(t),l_=l(E),Ui=a(E,"P",{});var uw=n(Ui);c_=i(uw,"Class attributes (overridden by derived classes):"),uw.forEach(t),m_=l(E),et=a(E,"UL",{});var Cn=n(et);Te=a(Cn,"LI",{});var Ba=n(Te);Oi=a(Ba,"STRONG",{});var gw=n(Oi);p_=i(gw,"config_class"),gw.forEach(t),h_=i(Ba," ("),fn=a(Ba,"A",{href:!0});var _w=n(fn);f_=i(_w,"PretrainedConfig"),_w.forEach(t),u_=i(Ba,") \u2014 A subclass of "),un=a(Ba,"A",{href:!0});var bw=n(un);g_=i(bw,"PretrainedConfig"),bw.forEach(t),__=i(Ba,` to use as configuration class for this model architecture.`),Ba.forEach(t),b_=l(Cn),St=a(Cn,"LI",{});var El=n(St);Ni=a(El,"STRONG",{});var vw=n(Ni);v_=i(vw,"base_model_prefix"),vw.forEach(t),y_=i(El," ("),Bi=a(El,"CODE",{});var yw=n(Bi);$_=i(yw,"str"),yw.forEach(t),w_=i(El,`) \u2014 A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.`),El.forEach(t),T_=l(Cn),W=a(Cn,"LI",{});var ce=n(W);Si=a(ce,"STRONG",{});var $w=n(Si);x_=i($w,"main_input_name"),$w.forEach(t),k_=i(ce," ("),Wi=a(ce,"CODE",{});var ww=n(Wi);P_=i(ww,"str"),ww.forEach(t),M_=i(ce,") \u2014 The name of the principal input to the model (often "),Xi=a(ce,"CODE",{});var Tw=n(Xi);E_=i(Tw,"input_ids"),Tw.forEach(t),F_=i(ce,` for NLP models, `),Vi=a(ce,"CODE",{});var xw=n(Vi);j_=i(xw,"pixel_values"),xw.forEach(t),D_=i(ce," for vision models and "),Gi=a(ce,"CODE",{});var kw=n(Gi);q_=i(kw,"input_values"),kw.forEach(t),z_=i(ce," for speech models)."),ce.forEach(t),Cn.forEach(t),A_=l(E),xe=a(E,"DIV",{class:!0});var In=n(xe);g(Dr.$$.fragment,In),C_=l(In),qr=a(In,"P",{});var Qc=n(qr);I_=i(Qc,"Upload the model files to the \u{1F917} Model Hub while synchronizing a local clone of the repo in "),Ri=a(Qc,"CODE",{});var Pw=n(Ri);L_=i(Pw,"repo_path_or_name"),Pw.forEach(t),U_=i(Qc,"."),Qc.forEach(t),O_=l(In),g(Wt.$$.fragment,In),In.forEach(t),N_=l(E),Xt=a(E,"DIV",{class:!0});var em=n(Xt);g(zr.$$.fragment,em),B_=l(em),Hi=a(em,"P",{});var Mw=n(Hi);S_=i(Mw,`This is a thin wrapper that sets the model\u2019s loss output head as the loss if the user does not specify a loss function themselves.`),Mw.forEach(t),em.forEach(t),W_=l(E),Vt=a(E,"DIV",{class:!0});var tm=n(Vt);g(Ar.$$.fragment,tm),X_=l(tm),Cr=a(tm,"P",{});var om=n(Cr);V_=i(om,"Creates a draft of a model card using the information available to the "),Yi=a(om,"CODE",{});var Ew=n(Yi);G_=i(Ew,"Trainer"),Ew.forEach(t),R_=i(om,"."),om.forEach(t),tm.forEach(t),H_=l(E),N=a(E,"DIV",{class:!0});var re=n(N);g(Ir.$$.fragment,re),Y_=l(re),Ji=a(re,"P",{});var Fw=n(Ji);J_=i(Fw,"Instantiate a pretrained TF 2.0 model from a pre-trained model configuration."),Fw.forEach(t),K_=l(re),Lr=a(re,"P",{});var rm=n(Lr);Z_=i(rm,"The warning "),Ki=a(rm,"EM",{});var jw=n(Ki);Q_=i(jw,"Weights from XXX not initialized from pretrained model"),jw.forEach(t),eb=i(rm,` means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task.`),rm.forEach(t),tb=l(re),Ur=a(re,"P",{});var am=n(Ur);ob=i(am,"The warning "),Zi=a(am,"EM",{});var Dw=n(Zi);rb=i(Dw,"Weights from XXX not used in YYY"),Dw.forEach(t),ab=i(am,` means that the layer XXX is not used by YYY, therefore those weights are discarded.`),am.forEach(t),nb=l(re),g(Gt.$$.fragment,re),sb=l(re),g(Rt.$$.fragment,re),re.forEach(t),ib=l(E),Ht=a(E,"DIV",{class:!0});var nm=n(Ht);g(Or.$$.fragment,nm),db=l(nm),Qi=a(nm,"P",{});var qw=n(Qi);lb=i(qw,"Dict of bias attached to an LM head. The key represents the name of the bias attribute."),qw.forEach(t),nm.forEach(t),cb=l(E),Yt=a(E,"DIV",{class:!0});var sm=n(Yt);g(Nr.$$.fragment,sm),mb=l(sm),ed=a(sm,"P",{});var zw=n(ed);pb=i(zw,"Returns the model\u2019s input embeddings layer."),zw.forEach(t),sm.forEach(t),hb=l(E),Jt=a(E,"DIV",{class:!0});var im=n(Jt);g(Br.$$.fragment,im),fb=l(im),td=a(im,"P",{});var Aw=n(td);ub=i(Aw,"The LM Head layer. This method must be overwritten by all the models that have a lm head."),Aw.forEach(t),im.forEach(t),gb=l(E),Kt=a(E,"DIV",{class:!0});var dm=n(Kt);g(Sr.$$.fragment,dm),_b=l(dm),od=a(dm,"P",{});var Cw=n(od);bb=i(Cw,"Returns the model\u2019s output embeddings"),Cw.forEach(t),dm.forEach(t),vb=l(E),Zt=a(E,"DIV",{class:!0});var lm=n(Zt);g(Wr.$$.fragment,lm),yb=l(lm),rd=a(lm,"P",{});var Iw=n(rd);$b=i(Iw,`Get the layer that handles a bias attribute in case the model has an LM head with weights tied to the embeddings`),Iw.forEach(t),lm.forEach(t),wb=l(E),Qt=a(E,"DIV",{class:!0});var cm=n(Qt);g(Xr.$$.fragment,cm),Tb=l(cm),ad=a(cm,"P",{});var Lw=n(ad);xb=i(Lw,"Get the concatenated _prefix name of the bias from the model name to the parent layer"),Lw.forEach(t),cm.forEach(t),kb=l(E),eo=a(E,"DIV",{class:!0});var mm=n(eo);g(Vr.$$.fragment,mm),Pb=l(mm),nd=a(mm,"P",{});var Uw=n(nd);Mb=i(Uw,`Loads a saved checkpoint (model weights and optimizer state) from a repo. Returns the current epoch count when the checkpoint was made.`),Uw.forEach(t),mm.forEach(t),Eb=l(E),to=a(E,"DIV",{class:!0});var pm=n(to);g(Gr.$$.fragment,pm),Fb=l(pm),H=a(pm,"P",{});var Ce=n(H);jb=i(Ce,"Wraps a HuggingFace "),Rr=a(Ce,"A",{href:!0,rel:!0});var Ow=n(Rr);Db=i(Ow,"Dataset"),Ow.forEach(t),qb=i(Ce," as a "),sd=a(Ce,"CODE",{});var Nw=n(sd);zb=i(Nw,"tf.data.Dataset"),Nw.forEach(t),Ab=i(Ce,` with collation and batching. This method is designed to create a \u201Cready-to-use\u201D dataset that can be passed directly to Keras methods like `),id=a(Ce,"CODE",{});var Bw=n(id);Cb=i(Bw,"fit()"),Bw.forEach(t),Ib=i(Ce,` without further modification. The method will drop columns from the dataset if they don\u2019t match input names for the model. If you want to specify the column names to return rather than using the names that match this model, we recommend using `),dd=a(Ce,"CODE",{});var Sw=n(dd);Lb=i(Sw,"Dataset.to_tf_dataset()"),Sw.forEach(t),Ub=i(Ce," instead."),Ce.forEach(t),pm.forEach(t),Ob=l(E),oo=a(E,"DIV",{class:!0});var hm=n(oo);g(Hr.$$.fragment,hm),Nb=l(hm),ld=a(hm,"P",{});var Ww=n(ld);Bb=i(Ww,"Prunes heads of the base model."),Ww.forEach(t),hm.forEach(t),Sb=l(E),ke=a(E,"DIV",{class:!0});var Ln=n(ke);g(Yr.$$.fragment,Ln),Wb=l(Ln),cd=a(Ln,"P",{});var Xw=n(cd);Xb=i(Xw,`Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class.`),Xw.forEach(t),Vb=l(Ln),g(ro.$$.fragment,Ln),Ln.forEach(t),Gb=l(E),Pe=a(E,"DIV",{class:!0});var Un=n(Pe);g(Jr.$$.fragment,Un),Rb=l(Un),Kr=a(Un,"P",{});var fm=n(Kr);Hb=i(fm,"Resizes input token embeddings matrix of the model if "),md=a(fm,"CODE",{});var Vw=n(md);Yb=i(Vw,"new_num_tokens != config.vocab_size"),Vw.forEach(t),Jb=i(fm,"."),fm.forEach(t),Kb=l(Un),Zr=a(Un,"P",{});var um=n(Zr);Zb=i(um,"Takes care of tying weights embeddings afterwards if the model class has a "),pd=a(um,"CODE",{});var Gw=n(pd);Qb=i(Gw,"tie_weights()"),Gw.forEach(t),ev=i(um," method."),um.forEach(t),Un.forEach(t),tv=l(E),ao=a(E,"DIV",{class:!0});var gm=n(ao);g(Qr.$$.fragment,gm),ov=l(gm),ea=a(gm,"P",{});var _m=n(ea);rv=i(_m,`Save a model and its configuration file to a directory, so that it can be re-loaded using the `),gn=a(_m,"A",{href:!0});var Rw=n(gn);av=i(Rw,"from_pretrained()"),Rw.forEach(t),nv=i(_m," class method."),_m.forEach(t),gm.forEach(t),sv=l(E),no=a(E,"DIV",{class:!0});var bm=n(no);g(ta.$$.fragment,bm),iv=l(bm),hd=a(bm,"P",{});var Hw=n(hd);dv=i(Hw,"Method used for serving the model."),Hw.forEach(t),bm.forEach(t),lv=l(E),so=a(E,"DIV",{class:!0});var vm=n(so);g(oa.$$.fragment,vm),cv=l(vm),fd=a(vm,"P",{});var Yw=n(fd);mv=i(Yw,"Prepare the output of the saved model. Each model must implement this function."),Yw.forEach(t),vm.forEach(t),pv=l(E),io=a(E,"DIV",{class:!0});var ym=n(io);g(ra.$$.fragment,ym),hv=l(ym),ud=a(ym,"P",{});var Jw=n(ud);fv=i(Jw,"Set all the bias in the LM head."),Jw.forEach(t),ym.forEach(t),uv=l(E),lo=a(E,"DIV",{class:!0});var $m=n(lo);g(aa.$$.fragment,$m),gv=l($m),gd=a($m,"P",{});var Kw=n(gd);_v=i(Kw,"Set model\u2019s input embeddings"),Kw.forEach(t),$m.forEach(t),bv=l(E),co=a(E,"DIV",{class:!0});var wm=n(co);g(na.$$.fragment,wm),vv=l(wm),_d=a(wm,"P",{});var Zw=n(_d);yv=i(Zw,"Set model\u2019s output embeddings"),Zw.forEach(t),wm.forEach(t),$v=l(E),mo=a(E,"DIV",{class:!0});var Tm=n(mo);g(sa.$$.fragment,Tm),wv=l(Tm),ia=a(Tm,"P",{});var xm=n(ia);Tv=i(xm,"A modification of Keras\u2019s default "),bd=a(xm,"CODE",{});var Qw=n(bd);xv=i(Qw,"train_step"),Qw.forEach(t),kv=i(xm,` that correctly handles matching outputs to labels for our models and supports directly training on the loss output head. In addition, it ensures input keys are copied to the labels where appropriate. It will also copy label keys into the input dict when using the dummy loss, to ensure that they are available to the model during the forward pass.`),xm.forEach(t),Tm.forEach(t),Pv=l(E),po=a(E,"DIV",{class:!0});var km=n(po);g(da.$$.fragment,km),Mv=l(km),la=a(km,"P",{});var Pm=n(la);Ev=i(Pm,"A modification of Keras\u2019s default "),vd=a(Pm,"CODE",{});var eT=n(vd);Fv=i(eT,"train_step"),eT.forEach(t),jv=i(Pm,` that correctly handles matching outputs to labels for our models and supports directly training on the loss output head. In addition, it ensures input keys are copied to the labels where appropriate. It will also copy label keys into the input dict when using the dummy loss, to ensure that they are available to the model during the forward pass.`),Pm.forEach(t),km.forEach(t),E.forEach(t),dc=l(o),tt=a(o,"H2",{class:!0});var Mm=n(tt);ho=a(Mm,"A",{id:!0,class:!0,href:!0});var tT=n(ho);yd=a(tT,"SPAN",{});var oT=n(yd);g(ca.$$.fragment,oT),oT.forEach(t),tT.forEach(t),Dv=l(Mm),$d=a(Mm,"SPAN",{});var rT=n($d);qv=i(rT,"TFModelUtilsMixin"),rT.forEach(t),Mm.forEach(t),lc=l(o),ne=a(o,"DIV",{class:!0});var On=n(ne);g(ma.$$.fragment,On),zv=l(On),pa=a(On,"P",{});var Em=n(pa);Av=i(Em,"A few utilities for "),wd=a(Em,"CODE",{});var aT=n(wd);Cv=i(aT,"tf.keras.Model"),aT.forEach(t),Iv=i(Em,", to be used as a mixin."),Em.forEach(t),Lv=l(On),fo=a(On,"DIV",{class:!0});var Fm=n(fo);g(ha.$$.fragment,Fm),Uv=l(Fm),Td=a(Fm,"P",{});var nT=n(Td);Ov=i(nT,"Get the number of (optionally, trainable) parameters in the model."),nT.forEach(t),Fm.forEach(t),On.forEach(t),cc=l(o),ot=a(o,"H2",{class:!0});var jm=n(ot);uo=a(jm,"A",{id:!0,class:!0,href:!0});var sT=n(uo);xd=a(sT,"SPAN",{});var iT=n(xd);g(fa.$$.fragment,iT),iT.forEach(t),sT.forEach(t),Nv=l(jm),kd=a(jm,"SPAN",{});var dT=n(kd);Bv=i(dT,"FlaxPreTrainedModel"),dT.forEach(t),jm.forEach(t),mc=l(o),q=a(o,"DIV",{class:!0});var C=n(q);g(ua.$$.fragment,C),Sv=l(C),Pd=a(C,"P",{});var lT=n(Pd);Wv=i(lT,"Base class for all models."),lT.forEach(t),Xv=l(C),_n=a(C,"P",{});var s2=n(_n);bn=a(s2,"A",{href:!0});var cT=n(bn);Vv=i(cT,"FlaxPreTrainedModel"),cT.forEach(t),Gv=i(s2,` takes care of storing the configuration of the models and handles methods for loading, downloading and saving models.`),s2.forEach(t),Rv=l(C),Md=a(C,"P",{});var mT=n(Md);Hv=i(mT,"Class attributes (overridden by derived classes):"),mT.forEach(t),Yv=l(C),rt=a(C,"UL",{});var Nn=n(rt);Me=a(Nn,"LI",{});var Sa=n(Me);Ed=a(Sa,"STRONG",{});var pT=n(Ed);Jv=i(pT,"config_class"),pT.forEach(t),Kv=i(Sa," ("),vn=a(Sa,"A",{href:!0});var hT=n(vn);Zv=i(hT,"PretrainedConfig"),hT.forEach(t),Qv=i(Sa,") \u2014 A subclass of "),yn=a(Sa,"A",{href:!0});var fT=n(yn);ey=i(fT,"PretrainedConfig"),fT.forEach(t),ty=i(Sa,` to use as configuration class for this model architecture.`),Sa.forEach(t),oy=l(Nn),go=a(Nn,"LI",{});var Fl=n(go);Fd=a(Fl,"STRONG",{});var uT=n(Fd);ry=i(uT,"base_model_prefix"),uT.forEach(t),ay=i(Fl," ("),jd=a(Fl,"CODE",{});var gT=n(jd);ny=i(gT,"str"),gT.forEach(t),sy=i(Fl,`) \u2014 A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.`),Fl.forEach(t),iy=l(Nn),X=a(Nn,"LI",{});var me=n(X);Dd=a(me,"STRONG",{});var _T=n(Dd);dy=i(_T,"main_input_name"),_T.forEach(t),ly=i(me," ("),qd=a(me,"CODE",{});var bT=n(qd);cy=i(bT,"str"),bT.forEach(t),my=i(me,") \u2014 The name of the principal input to the model (often "),zd=a(me,"CODE",{});var vT=n(zd);py=i(vT,"input_ids"),vT.forEach(t),hy=i(me,` for NLP models, `),Ad=a(me,"CODE",{});var yT=n(Ad);fy=i(yT,"pixel_values"),yT.forEach(t),uy=i(me," for vision models and "),Cd=a(me,"CODE",{});var $T=n(Cd);gy=i($T,"input_values"),$T.forEach(t),_y=i(me," for speech models)."),me.forEach(t),Nn.forEach(t),by=l(C),Ee=a(C,"DIV",{class:!0});var Bn=n(Ee);g(ga.$$.fragment,Bn),vy=l(Bn),_a=a(Bn,"P",{});var Dm=n(_a);yy=i(Dm,`Upload the model checkpoint to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),Id=a(Dm,"CODE",{});var wT=n(Id);$y=i(wT,"repo_path_or_name"),wT.forEach(t),wy=i(Dm,"."),Dm.forEach(t),Ty=l(Bn),g(_o.$$.fragment,Bn),Bn.forEach(t),xy=l(C),V=a(C,"DIV",{class:!0});var Ie=n(V);g(ba.$$.fragment,Ie),ky=l(Ie),Ld=a(Ie,"P",{});var TT=n(Ld);Py=i(TT,"Instantiate a pretrained flax model from a pre-trained model configuration."),TT.forEach(t),My=l(Ie),va=a(Ie,"P",{});var qm=n(va);Ey=i(qm,"The warning "),Ud=a(qm,"EM",{});var xT=n(Ud);Fy=i(xT,"Weights from XXX not initialized from pretrained model"),xT.forEach(t),jy=i(qm,` means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task.`),qm.forEach(t),Dy=l(Ie),ya=a(Ie,"P",{});var zm=n(ya);qy=i(zm,"The warning "),Od=a(zm,"EM",{});var kT=n(Od);zy=i(kT,"Weights from XXX not used in YYY"),kT.forEach(t),Ay=i(zm,` means that the layer XXX is not used by YYY, therefore those weights are discarded.`),zm.forEach(t),Cy=l(Ie),g(bo.$$.fragment,Ie),Ie.forEach(t),Iy=l(C),Fe=a(C,"DIV",{class:!0});var Sn=n(Fe);g($a.$$.fragment,Sn),Ly=l(Sn),wa=a(Sn,"P",{});var Am=n(wa);Uy=i(Am,"This is the same as "),Nd=a(Am,"CODE",{});var PT=n(Nd);Oy=i(PT,"flax.serialization.from_bytes"),PT.forEach(t),Ny=i(Am,` (https:lax.readthedocs.io/en/latest/_modules/flax/serialization.html#from_bytes) but for a sharded checkpoint.`),Am.forEach(t),By=l(Sn),Bd=a(Sn,"P",{});var MT=n(Bd);Sy=i(MT,`This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being loaded in the model.`),MT.forEach(t),Sn.forEach(t),Wy=l(C),je=a(C,"DIV",{class:!0});var Wn=n(je);g(Ta.$$.fragment,Wn),Xy=l(Wn),Sd=a(Wn,"P",{});var ET=n(Sd);Vy=i(ET,`Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class.`),ET.forEach(t),Gy=l(Wn),g(vo.$$.fragment,Wn),Wn.forEach(t),Ry=l(C),yo=a(C,"DIV",{class:!0});var Cm=n(yo);g(xa.$$.fragment,Cm),Hy=l(Cm),ka=a(Cm,"P",{});var Im=n(ka);Yy=i(Im,`Save a model and its configuration file to a directory, so that it can be re-loaded using the `),Wd=a(Im,"CODE",{});var FT=n(Wd);Jy=i(FT,"[from_pretrained()](/docs/transformers/pr_19429/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained)"),FT.forEach(t),Ky=i(Im," class method"),Im.forEach(t),Cm.forEach(t),Zy=l(C),te=a(C,"DIV",{class:!0});var jo=n(te);g(Pa.$$.fragment,jo),Qy=l(jo),Y=a(jo,"P",{});var Le=n(Y);e1=i(Le,"Cast the floating-point "),Xd=a(Le,"CODE",{});var jT=n(Xd);t1=i(jT,"params"),jT.forEach(t),o1=i(Le," to "),Vd=a(Le,"CODE",{});var DT=n(Vd);r1=i(DT,"jax.numpy.bfloat16"),DT.forEach(t),a1=i(Le,". This returns a new "),Gd=a(Le,"CODE",{});var qT=n(Gd);n1=i(qT,"params"),qT.forEach(t),s1=i(Le,` tree and does not cast the `),Rd=a(Le,"CODE",{});var zT=n(Rd);i1=i(zT,"params"),zT.forEach(t),d1=i(Le," in place."),Le.forEach(t),l1=l(jo),Hd=a(jo,"P",{});var AT=n(Hd);c1=i(AT,`This method can be used on TPU to explicitly convert the model parameters to bfloat16 precision to do full half-precision training or to save weights in bfloat16 for inference in order to save memory and improve speed.`),AT.forEach(t),m1=l(jo),g($o.$$.fragment,jo),jo.forEach(t),p1=l(C),oe=a(C,"DIV",{class:!0});var Do=n(oe);g(Ma.$$.fragment,Do),h1=l(Do),J=a(Do,"P",{});var Ue=n(J);f1=i(Ue,"Cast the floating-point "),Yd=a(Ue,"CODE",{});var CT=n(Yd);u1=i(CT,"parmas"),CT.forEach(t),g1=i(Ue," to "),Jd=a(Ue,"CODE",{});var IT=n(Jd);_1=i(IT,"jax.numpy.float16"),IT.forEach(t),b1=i(Ue,". This returns a new "),Kd=a(Ue,"CODE",{});var LT=n(Kd);v1=i(LT,"params"),LT.forEach(t),y1=i(Ue,` tree and does not cast the `),Zd=a(Ue,"CODE",{});var UT=n(Zd);$1=i(UT,"params"),UT.forEach(t),w1=i(Ue," in place."),Ue.forEach(t),T1=l(Do),Qd=a(Do,"P",{});var OT=n(Qd);x1=i(OT,`This method can be used on GPU to explicitly convert the model parameters to float16 precision to do full half-precision training or to save weights in float16 for inference in order to save memory and improve speed.`),OT.forEach(t),k1=l(Do),g(wo.$$.fragment,Do),Do.forEach(t),P1=l(C),De=a(C,"DIV",{class:!0});var Xn=n(De);g(Ea.$$.fragment,Xn),M1=l(Xn),K=a(Xn,"P",{});var Oe=n(K);E1=i(Oe,"Cast the floating-point "),el=a(Oe,"CODE",{});var NT=n(el);F1=i(NT,"parmas"),NT.forEach(t),j1=i(Oe," to "),tl=a(Oe,"CODE",{});var BT=n(tl);D1=i(BT,"jax.numpy.float32"),BT.forEach(t),q1=i(Oe,`. This method can be used to explicitly convert the model parameters to fp32 precision. This returns a new `),ol=a(Oe,"CODE",{});var ST=n(ol);z1=i(ST,"params"),ST.forEach(t),A1=i(Oe," tree and does not cast the "),rl=a(Oe,"CODE",{});var WT=n(rl);C1=i(WT,"params"),WT.forEach(t),I1=i(Oe," in place."),Oe.forEach(t),L1=l(Xn),g(To.$$.fragment,Xn),Xn.forEach(t),C.forEach(t),pc=l(o),at=a(o,"H2",{class:!0});var Lm=n(at);xo=a(Lm,"A",{id:!0,class:!0,href:!0});var XT=n(xo);al=a(XT,"SPAN",{});var VT=n(al);g(Fa.$$.fragment,VT),VT.forEach(t),XT.forEach(t),U1=l(Lm),nl=a(Lm,"SPAN",{});var GT=n(nl);O1=i(GT,"Pushing to the Hub"),GT.forEach(t),Lm.forEach(t),hc=l(o),se=a(o,"DIV",{class:!0});var Vn=n(se);g(ja.$$.fragment,Vn),N1=l(Vn),sl=a(Vn,"P",{});var RT=n(sl);B1=i(RT,"A Mixin containing the functionality to push a model or tokenizer to the hub."),RT.forEach(t),S1=l(Vn),qe=a(Vn,"DIV",{class:!0});var Gn=n(qe);g(Da.$$.fragment,Gn),W1=l(Gn),qa=a(Gn,"P",{});var Um=n(qa);X1=i(Um,`Upload the {object_files} to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),il=a(Um,"CODE",{});var HT=n(il);V1=i(HT,"repo_path_or_name"),HT.forEach(t),G1=i(Um,"."),Um.forEach(t),R1=l(Gn),g(ko.$$.fragment,Gn),Gn.forEach(t),Vn.forEach(t),fc=l(o),nt=a(o,"H2",{class:!0});var Om=n(nt);Po=a(Om,"A",{id:!0,class:!0,href:!0});var YT=n(Po);dl=a(YT,"SPAN",{});var JT=n(dl);g(za.$$.fragment,JT),JT.forEach(t),YT.forEach(t),H1=l(Om),ll=a(Om,"SPAN",{});var KT=n(ll);Y1=i(KT,"Sharded checkpoints"),KT.forEach(t),Om.forEach(t),uc=l(o),ie=a(o,"DIV",{class:!0});var Rn=n(ie);g(Aa.$$.fragment,Rn),J1=l(Rn),Ca=a(Rn,"P",{});var Nm=n(Ca);K1=i(Nm,`This is the same as `),Ia=a(Nm,"A",{href:!0,rel:!0});var ZT=n(Ia);cl=a(ZT,"CODE",{});var QT=n(cl);Z1=i(QT,"torch.nn.Module.load_state_dict"),QT.forEach(t),ZT.forEach(t),Q1=i(Nm,` but for a sharded checkpoint.`),Nm.forEach(t),e2=l(Rn),ml=a(Rn,"P",{});var ex=n(ml);t2=i(ex,`This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being loaded in the model.`),ex.forEach(t),Rn.forEach(t),this.h()},h(){m(p,"name","hf:doc:metadata"),m(p,"content",JSON.stringify(Tx)),m(f,"id","models"),m(f,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(f,"href","#models"),m(w,"class","relative group"),m(Wa,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel"),m(Xa,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel"),m(Va,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.FlaxPreTrainedModel"),m(Ga,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel"),m(Ra,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel"),m(Ha,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.modeling_utils.ModuleUtilsMixin"),m(Ya,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin"),m(Ja,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_tf_utils.TFGenerationMixin"),m(Ka,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_flax_utils.FlaxGenerationMixin"),m(it,"id","transformers.PreTrainedModel"),m(it,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(it,"href","#transformers.PreTrainedModel"),m(Xe,"class","relative group"),m(Qa,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel"),m(en,"href","/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig"),m(tn,"href","/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig"),m(on,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel"),m(fe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(z,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(_t,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Wo,"href","https://discuss.pytorch.org/t/gpu-memory-that-model-uses/56822/2"),m(Wo,"rel","nofollow"),m(bt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(vt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(ue,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(ge,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(yt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m($t,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(wt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(_e,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(be,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(nn,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.from_pretrained"),m(xt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(kt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(ve,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(F,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(sn,"id","from_pretrained-torch-dtype"),m(Pt,"id","large-model-loading"),m(Pt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Pt,"href","#large-model-loading"),m(Re,"class","relative group"),m(dn,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.from_pretrained"),m(sr,"href","https://huggingface.co/docs/accelerate/big_modeling"),m(sr,"rel","nofollow"),m(Dt,"id","model-instantiation-dtype"),m(Dt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Dt,"href","#model-instantiation-dtype"),m(He,"class","relative group"),m(zt,"id","transformers.modeling_utils.ModuleUtilsMixin"),m(zt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(zt,"href","#transformers.modeling_utils.ModuleUtilsMixin"),m(Ye,"class","relative group"),m(we,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(At,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(wr,"href","https://arxiv.org/pdf/2001.08361.pdf"),m(wr,"rel","nofollow"),m(Ct,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(It,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Lt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Ut,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Ot,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(mn,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.modeling_utils.ModuleUtilsMixin.add_memory_hooks"),m(Nt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(I,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Bt,"id","transformers.TFPreTrainedModel"),m(Bt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Bt,"href","#transformers.TFPreTrainedModel"),m(Qe,"class","relative group"),m(hn,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel"),m(fn,"href","/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig"),m(un,"href","/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig"),m(xe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Xt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Vt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(N,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Ht,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Yt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Jt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Kt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Zt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Qt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(eo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Rr,"href","https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset"),m(Rr,"rel","nofollow"),m(to,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(oo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(ke,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Pe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(gn,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained"),m(ao,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(no,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(so,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(io,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(lo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(co,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(mo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(po,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(P,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(ho,"id","transformers.modeling_tf_utils.TFModelUtilsMixin"),m(ho,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(ho,"href","#transformers.modeling_tf_utils.TFModelUtilsMixin"),m(tt,"class","relative group"),m(fo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(ne,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(uo,"id","transformers.FlaxPreTrainedModel"),m(uo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(uo,"href","#transformers.FlaxPreTrainedModel"),m(ot,"class","relative group"),m(bn,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.FlaxPreTrainedModel"),m(vn,"href","/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig"),m(yn,"href","/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig"),m(Ee,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(V,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Fe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(je,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(yo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(te,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(oe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(De,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(xo,"id","transformers.utils.PushToHubMixin"),m(xo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(xo,"href","#transformers.utils.PushToHubMixin"),m(at,"class","relative group"),m(qe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(se,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Po,"id","transformers.modeling_utils.load_sharded_checkpoint"),m(Po,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Po,"href","#transformers.modeling_utils.load_sharded_checkpoint"),m(nt,"class","relative group"),m(Ia,"href","https://pytorch.org/docs/stable/generated/torch.nn.Module.html?highlight=load_state_dict#torch.nn.Module.load_state_dict"),m(Ia,"rel","nofollow"),m(ie,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(o,h){e(document.head,p),$(o,x,h),$(o,w,h),e(w,f),e(f,k),_(c,k,null),e(w,T),e(w,ae),e(ae,Bm),$(o,jl,h),$(o,Z,h),e(Z,Sm),e(Z,Wa),e(Wa,Wm),e(Z,Xm),e(Z,Xa),e(Xa,Vm),e(Z,Gm),e(Z,Va),e(Va,Rm),e(Z,Hm),$(o,Dl,h),$(o,We,h),e(We,Ga),e(Ga,Ym),e(We,Jm),e(We,Ra),e(Ra,Km),e(We,Zm),$(o,ql,h),$(o,st,h),e(st,Yn),e(Yn,Qm),e(st,ep),e(st,Jn),e(Jn,tp),$(o,zl,h),$(o,O,h),e(O,op),e(O,Ha),e(Ha,rp),e(O,ap),e(O,Kn),e(Kn,np),e(O,sp),e(O,Ya),e(Ya,ip),e(O,dp),e(O,Ja),e(Ja,lp),e(O,cp),e(O,Ka),e(Ka,mp),e(O,pp),$(o,Al,h),$(o,Xe,h),e(Xe,it),e(it,Zn),_(qo,Zn,null),e(Xe,hp),e(Xe,Qn),e(Qn,fp),$(o,Cl,h),$(o,F,h),_(zo,F,null),e(F,up),e(F,es),e(es,gp),e(F,_p),e(F,Za),e(Za,Qa),e(Qa,bp),e(Za,vp),e(F,yp),e(F,Ao),e(Ao,ts),e(ts,$p),e(Ao,wp),e(Ao,os),e(os,Tp),e(F,xp),e(F,rs),e(rs,kp),e(F,Pp),e(F,G),e(G,as),e(as,pe),e(pe,ns),e(ns,Mp),e(pe,Ep),e(pe,en),e(en,Fp),e(pe,jp),e(pe,tn),e(tn,Dp),e(pe,qp),e(G,zp),e(G,Co),e(Co,he),e(he,ss),e(ss,Ap),e(he,Cp),e(he,is),e(is,Ip),e(he,Lp),e(he,ds),e(ds,Up),e(he,Op),e(Co,Np),e(Co,Ve),e(Ve,dt),e(dt,ls),e(ls,Bp),e(dt,Sp),e(dt,on),e(on,Wp),e(dt,Xp),e(Ve,Vp),e(Ve,lt),e(lt,cs),e(cs,Gp),e(lt,Rp),e(lt,ms),e(ms,Hp),e(lt,Yp),e(Ve,Jp),e(Ve,ct),e(ct,ps),e(ps,Kp),e(ct,Zp),e(ct,hs),e(hs,Qp),e(ct,eh),e(G,th),e(G,fs),e(fs,mt),e(mt,us),e(us,oh),e(mt,rh),e(mt,gs),e(gs,ah),e(mt,nh),e(G,sh),e(G,_s),e(_s,pt),e(pt,bs),e(bs,ih),e(pt,dh),e(pt,vs),e(vs,lh),e(pt,ch),e(G,mh),e(G,ys),e(ys,S),e(S,$s),e($s,ph),e(S,hh),e(S,ws),e(ws,fh),e(S,uh),e(S,Ts),e(Ts,gh),e(S,_h),e(S,xs),e(xs,bh),e(S,vh),e(S,ks),e(ks,yh),e(S,$h),e(F,wh),e(F,fe),_(Io,fe,null),e(fe,Th),e(fe,Lo),e(Lo,xh),e(Lo,Ps),e(Ps,kh),e(Lo,Ph),e(fe,Mh),_(ht,fe,null),e(F,Eh),e(F,z),_(Uo,z,null),e(z,Fh),e(z,Ms),e(Ms,jh),e(z,Dh),e(z,Ge),e(Ge,qh),e(Ge,Es),e(Es,zh),e(Ge,Ah),e(Ge,Fs),e(Fs,Ch),e(Ge,Ih),e(z,Lh),e(z,Oo),e(Oo,Uh),e(Oo,js),e(js,Oh),e(Oo,Nh),e(z,Bh),e(z,No),e(No,Sh),e(No,Ds),e(Ds,Wh),e(No,Xh),e(z,Vh),_(ft,z,null),e(z,Gh),_(ut,z,null),e(z,Rh),_(gt,z,null),e(z,Hh),e(z,qs),e(qs,rn),e(rn,zs),e(zs,Yh),e(rn,Jh),e(z,Kh),e(z,As),e(As,Zh),e(z,Qh),e(z,Cs),e(Cs,ef),e(z,tf),e(z,R),e(R,Is),e(Is,of),e(R,rf),e(R,Ls),e(Ls,af),e(R,nf),e(R,Us),e(Us,sf),e(R,df),e(R,Os),e(Os,lf),e(R,cf),e(R,Ns),e(Ns,mf),e(z,pf),e(z,Bs),e(Bs,hf),e(F,ff),e(F,_t),_(Bo,_t,null),e(_t,uf),e(_t,Ss),e(Ss,gf),e(F,_f),e(F,bt),_(So,bt,null),e(bt,bf),e(bt,an),e(an,vf),e(an,Wo),e(Wo,yf),e(F,$f),e(F,vt),_(Xo,vt,null),e(vt,wf),e(vt,Ws),e(Ws,Tf),e(F,xf),e(F,ue),_(Vo,ue,null),e(ue,kf),e(ue,Xs),e(Xs,Pf),e(ue,Mf),e(ue,Vs),e(Vs,Ef),e(F,Ff),e(F,ge),_(Go,ge,null),e(ge,jf),e(ge,Gs),e(Gs,Df),e(ge,qf),e(ge,Rs),e(Rs,zf),e(F,Af),e(F,yt),_(Ro,yt,null),e(yt,Cf),e(yt,Hs),e(Hs,If),e(F,Lf),e(F,$t),_(Ho,$t,null),e($t,Uf),e($t,Ys),e(Ys,Of),e(F,Nf),e(F,wt),_(Yo,wt,null),e(wt,Bf),e(wt,Js),e(Js,Sf),e(F,Wf),e(F,_e),_(Jo,_e,null),e(_e,Xf),e(_e,Ks),e(Ks,Vf),e(_e,Gf),_(Tt,_e,null),e(F,Rf),e(F,be),_(Ko,be,null),e(be,Hf),e(be,Zo),e(Zo,Yf),e(Zo,Zs),e(Zs,Jf),e(Zo,Kf),e(be,Zf),e(be,Qo),e(Qo,Qf),e(Qo,Qs),e(Qs,eu),e(Qo,tu),e(F,ou),e(F,xt),_(er,xt,null),e(xt,ru),e(xt,tr),e(tr,au),e(tr,nn),e(nn,nu),e(tr,su),e(F,iu),e(F,kt),_(or,kt,null),e(kt,du),e(kt,ei),e(ei,lu),e(F,cu),e(F,ve),_(rr,ve,null),e(ve,mu),e(ve,ti),e(ti,pu),e(ve,hu),e(ve,ar),e(ar,fu),e(ar,oi),e(oi,uu),e(ar,gu),$(o,Il,h),$(o,sn,h),$(o,Ll,h),$(o,Re,h),e(Re,Pt),e(Pt,ri),_(nr,ri,null),e(Re,_u),e(Re,ai),e(ai,bu),$(o,Ul,h),$(o,ye,h),e(ye,vu),e(ye,dn),e(dn,yu),e(ye,$u),e(ye,sr),e(sr,wu),e(ye,Tu),$(o,Ol,h),$(o,Mt,h),e(Mt,xu),e(Mt,ni),e(ni,ku),e(Mt,Pu),$(o,Nl,h),_(ir,o,h),$(o,Bl,h),$(o,Et,h),e(Et,Mu),e(Et,si),e(si,Eu),e(Et,Fu),$(o,Sl,h),$(o,Q,h),e(Q,ju),e(Q,ii),e(ii,Du),e(Q,qu),e(Q,di),e(di,zu),e(Q,Au),e(Q,li),e(li,Cu),e(Q,Iu),$(o,Wl,h),_(dr,o,h),$(o,Xl,h),$(o,Ft,h),e(Ft,Lu),e(Ft,ci),e(ci,Uu),e(Ft,Ou),$(o,Vl,h),_(lr,o,h),$(o,Gl,h),_(cr,o,h),$(o,Rl,h),$(o,ln,h),e(ln,Nu),$(o,Hl,h),_(mr,o,h),$(o,Yl,h),$(o,jt,h),e(jt,Bu),e(jt,mi),e(mi,Su),e(jt,Wu),$(o,Jl,h),$(o,He,h),e(He,Dt),e(Dt,pi),_(pr,pi,null),e(He,Xu),e(He,hi),e(hi,Vu),$(o,Kl,h),$(o,ee,h),e(ee,Gu),e(ee,fi),e(fi,Ru),e(ee,Hu),e(ee,ui),e(ui,Yu),e(ee,Ju),e(ee,gi),e(gi,Ku),e(ee,Zu),$(o,Zl,h),_(hr,o,h),$(o,Ql,h),$(o,$e,h),e($e,Qu),e($e,_i),e(_i,eg),e($e,tg),e($e,bi),e(bi,og),e($e,rg),$(o,ec,h),_(fr,o,h),$(o,tc,h),$(o,qt,h),e(qt,ag),e(qt,vi),e(vi,ng),e(qt,sg),$(o,oc,h),_(ur,o,h),$(o,rc,h),$(o,cn,h),e(cn,ig),$(o,ac,h),$(o,Ye,h),e(Ye,zt),e(zt,yi),_(gr,yi,null),e(Ye,dg),e(Ye,$i),e($i,lg),$(o,nc,h),$(o,I,h),_(_r,I,null),e(I,cg),e(I,br),e(br,mg),e(br,wi),e(wi,pg),e(br,hg),e(I,fg),e(I,we),_(vr,we,null),e(we,ug),e(we,Ti),e(Ti,gg),e(we,_g),e(we,Je),e(Je,bg),e(Je,xi),e(xi,vg),e(Je,yg),e(Je,ki),e(ki,$g),e(Je,wg),e(I,Tg),e(I,At),_(yr,At,null),e(At,xg),e(At,Pi),e(Pi,kg),e(I,Pg),e(I,Ct),_($r,Ct,null),e(Ct,Mg),e(Ct,Ke),e(Ke,Eg),e(Ke,Mi),e(Mi,Fg),e(Ke,jg),e(Ke,wr),e(wr,Dg),e(Ke,qg),e(I,zg),e(I,It),_(Tr,It,null),e(It,Ag),e(It,Ei),e(Ei,Cg),e(I,Ig),e(I,Lt),_(xr,Lt,null),e(Lt,Lg),e(Lt,Fi),e(Fi,Ug),e(I,Og),e(I,Ut),_(kr,Ut,null),e(Ut,Ng),e(Ut,ji),e(ji,Bg),e(I,Sg),e(I,Ot),_(Pr,Ot,null),e(Ot,Wg),e(Ot,Di),e(Di,Xg),e(I,Vg),e(I,Nt),_(Mr,Nt,null),e(Nt,Gg),e(Nt,Ze),e(Ze,Rg),e(Ze,qi),e(qi,Hg),e(Ze,Yg),e(Ze,mn),e(mn,Jg),e(Ze,Kg),$(o,sc,h),$(o,Qe,h),e(Qe,Bt),e(Bt,zi),_(Er,zi,null),e(Qe,Zg),e(Qe,Ai),e(Ai,Qg),$(o,ic,h),$(o,P,h),_(Fr,P,null),e(P,e_),e(P,Ci),e(Ci,t_),e(P,o_),e(P,pn),e(pn,hn),e(hn,r_),e(pn,a_),e(P,n_),e(P,jr),e(jr,Ii),e(Ii,s_),e(jr,i_),e(jr,Li),e(Li,d_),e(P,l_),e(P,Ui),e(Ui,c_),e(P,m_),e(P,et),e(et,Te),e(Te,Oi),e(Oi,p_),e(Te,h_),e(Te,fn),e(fn,f_),e(Te,u_),e(Te,un),e(un,g_),e(Te,__),e(et,b_),e(et,St),e(St,Ni),e(Ni,v_),e(St,y_),e(St,Bi),e(Bi,$_),e(St,w_),e(et,T_),e(et,W),e(W,Si),e(Si,x_),e(W,k_),e(W,Wi),e(Wi,P_),e(W,M_),e(W,Xi),e(Xi,E_),e(W,F_),e(W,Vi),e(Vi,j_),e(W,D_),e(W,Gi),e(Gi,q_),e(W,z_),e(P,A_),e(P,xe),_(Dr,xe,null),e(xe,C_),e(xe,qr),e(qr,I_),e(qr,Ri),e(Ri,L_),e(qr,U_),e(xe,O_),_(Wt,xe,null),e(P,N_),e(P,Xt),_(zr,Xt,null),e(Xt,B_),e(Xt,Hi),e(Hi,S_),e(P,W_),e(P,Vt),_(Ar,Vt,null),e(Vt,X_),e(Vt,Cr),e(Cr,V_),e(Cr,Yi),e(Yi,G_),e(Cr,R_),e(P,H_),e(P,N),_(Ir,N,null),e(N,Y_),e(N,Ji),e(Ji,J_),e(N,K_),e(N,Lr),e(Lr,Z_),e(Lr,Ki),e(Ki,Q_),e(Lr,eb),e(N,tb),e(N,Ur),e(Ur,ob),e(Ur,Zi),e(Zi,rb),e(Ur,ab),e(N,nb),_(Gt,N,null),e(N,sb),_(Rt,N,null),e(P,ib),e(P,Ht),_(Or,Ht,null),e(Ht,db),e(Ht,Qi),e(Qi,lb),e(P,cb),e(P,Yt),_(Nr,Yt,null),e(Yt,mb),e(Yt,ed),e(ed,pb),e(P,hb),e(P,Jt),_(Br,Jt,null),e(Jt,fb),e(Jt,td),e(td,ub),e(P,gb),e(P,Kt),_(Sr,Kt,null),e(Kt,_b),e(Kt,od),e(od,bb),e(P,vb),e(P,Zt),_(Wr,Zt,null),e(Zt,yb),e(Zt,rd),e(rd,$b),e(P,wb),e(P,Qt),_(Xr,Qt,null),e(Qt,Tb),e(Qt,ad),e(ad,xb),e(P,kb),e(P,eo),_(Vr,eo,null),e(eo,Pb),e(eo,nd),e(nd,Mb),e(P,Eb),e(P,to),_(Gr,to,null),e(to,Fb),e(to,H),e(H,jb),e(H,Rr),e(Rr,Db),e(H,qb),e(H,sd),e(sd,zb),e(H,Ab),e(H,id),e(id,Cb),e(H,Ib),e(H,dd),e(dd,Lb),e(H,Ub),e(P,Ob),e(P,oo),_(Hr,oo,null),e(oo,Nb),e(oo,ld),e(ld,Bb),e(P,Sb),e(P,ke),_(Yr,ke,null),e(ke,Wb),e(ke,cd),e(cd,Xb),e(ke,Vb),_(ro,ke,null),e(P,Gb),e(P,Pe),_(Jr,Pe,null),e(Pe,Rb),e(Pe,Kr),e(Kr,Hb),e(Kr,md),e(md,Yb),e(Kr,Jb),e(Pe,Kb),e(Pe,Zr),e(Zr,Zb),e(Zr,pd),e(pd,Qb),e(Zr,ev),e(P,tv),e(P,ao),_(Qr,ao,null),e(ao,ov),e(ao,ea),e(ea,rv),e(ea,gn),e(gn,av),e(ea,nv),e(P,sv),e(P,no),_(ta,no,null),e(no,iv),e(no,hd),e(hd,dv),e(P,lv),e(P,so),_(oa,so,null),e(so,cv),e(so,fd),e(fd,mv),e(P,pv),e(P,io),_(ra,io,null),e(io,hv),e(io,ud),e(ud,fv),e(P,uv),e(P,lo),_(aa,lo,null),e(lo,gv),e(lo,gd),e(gd,_v),e(P,bv),e(P,co),_(na,co,null),e(co,vv),e(co,_d),e(_d,yv),e(P,$v),e(P,mo),_(sa,mo,null),e(mo,wv),e(mo,ia),e(ia,Tv),e(ia,bd),e(bd,xv),e(ia,kv),e(P,Pv),e(P,po),_(da,po,null),e(po,Mv),e(po,la),e(la,Ev),e(la,vd),e(vd,Fv),e(la,jv),$(o,dc,h),$(o,tt,h),e(tt,ho),e(ho,yd),_(ca,yd,null),e(tt,Dv),e(tt,$d),e($d,qv),$(o,lc,h),$(o,ne,h),_(ma,ne,null),e(ne,zv),e(ne,pa),e(pa,Av),e(pa,wd),e(wd,Cv),e(pa,Iv),e(ne,Lv),e(ne,fo),_(ha,fo,null),e(fo,Uv),e(fo,Td),e(Td,Ov),$(o,cc,h),$(o,ot,h),e(ot,uo),e(uo,xd),_(fa,xd,null),e(ot,Nv),e(ot,kd),e(kd,Bv),$(o,mc,h),$(o,q,h),_(ua,q,null),e(q,Sv),e(q,Pd),e(Pd,Wv),e(q,Xv),e(q,_n),e(_n,bn),e(bn,Vv),e(_n,Gv),e(q,Rv),e(q,Md),e(Md,Hv),e(q,Yv),e(q,rt),e(rt,Me),e(Me,Ed),e(Ed,Jv),e(Me,Kv),e(Me,vn),e(vn,Zv),e(Me,Qv),e(Me,yn),e(yn,ey),e(Me,ty),e(rt,oy),e(rt,go),e(go,Fd),e(Fd,ry),e(go,ay),e(go,jd),e(jd,ny),e(go,sy),e(rt,iy),e(rt,X),e(X,Dd),e(Dd,dy),e(X,ly),e(X,qd),e(qd,cy),e(X,my),e(X,zd),e(zd,py),e(X,hy),e(X,Ad),e(Ad,fy),e(X,uy),e(X,Cd),e(Cd,gy),e(X,_y),e(q,by),e(q,Ee),_(ga,Ee,null),e(Ee,vy),e(Ee,_a),e(_a,yy),e(_a,Id),e(Id,$y),e(_a,wy),e(Ee,Ty),_(_o,Ee,null),e(q,xy),e(q,V),_(ba,V,null),e(V,ky),e(V,Ld),e(Ld,Py),e(V,My),e(V,va),e(va,Ey),e(va,Ud),e(Ud,Fy),e(va,jy),e(V,Dy),e(V,ya),e(ya,qy),e(ya,Od),e(Od,zy),e(ya,Ay),e(V,Cy),_(bo,V,null),e(q,Iy),e(q,Fe),_($a,Fe,null),e(Fe,Ly),e(Fe,wa),e(wa,Uy),e(wa,Nd),e(Nd,Oy),e(wa,Ny),e(Fe,By),e(Fe,Bd),e(Bd,Sy),e(q,Wy),e(q,je),_(Ta,je,null),e(je,Xy),e(je,Sd),e(Sd,Vy),e(je,Gy),_(vo,je,null),e(q,Ry),e(q,yo),_(xa,yo,null),e(yo,Hy),e(yo,ka),e(ka,Yy),e(ka,Wd),e(Wd,Jy),e(ka,Ky),e(q,Zy),e(q,te),_(Pa,te,null),e(te,Qy),e(te,Y),e(Y,e1),e(Y,Xd),e(Xd,t1),e(Y,o1),e(Y,Vd),e(Vd,r1),e(Y,a1),e(Y,Gd),e(Gd,n1),e(Y,s1),e(Y,Rd),e(Rd,i1),e(Y,d1),e(te,l1),e(te,Hd),e(Hd,c1),e(te,m1),_($o,te,null),e(q,p1),e(q,oe),_(Ma,oe,null),e(oe,h1),e(oe,J),e(J,f1),e(J,Yd),e(Yd,u1),e(J,g1),e(J,Jd),e(Jd,_1),e(J,b1),e(J,Kd),e(Kd,v1),e(J,y1),e(J,Zd),e(Zd,$1),e(J,w1),e(oe,T1),e(oe,Qd),e(Qd,x1),e(oe,k1),_(wo,oe,null),e(q,P1),e(q,De),_(Ea,De,null),e(De,M1),e(De,K),e(K,E1),e(K,el),e(el,F1),e(K,j1),e(K,tl),e(tl,D1),e(K,q1),e(K,ol),e(ol,z1),e(K,A1),e(K,rl),e(rl,C1),e(K,I1),e(De,L1),_(To,De,null),$(o,pc,h),$(o,at,h),e(at,xo),e(xo,al),_(Fa,al,null),e(at,U1),e(at,nl),e(nl,O1),$(o,hc,h),$(o,se,h),_(ja,se,null),e(se,N1),e(se,sl),e(sl,B1),e(se,S1),e(se,qe),_(Da,qe,null),e(qe,W1),e(qe,qa),e(qa,X1),e(qa,il),e(il,V1),e(qa,G1),e(qe,R1),_(ko,qe,null),$(o,fc,h),$(o,nt,h),e(nt,Po),e(Po,dl),_(za,dl,null),e(nt,H1),e(nt,ll),e(ll,Y1),$(o,uc,h),$(o,ie,h),_(Aa,ie,null),e(ie,J1),e(ie,Ca),e(Ca,K1),e(Ca,Ia),e(Ia,cl),e(cl,Z1),e(Ca,Q1),e(ie,e2),e(ie,ml),e(ml,t2),gc=!0},p(o,[h]){const La={};h&2&&(La.$$scope={dirty:h,ctx:o}),ht.$set(La);const pl={};h&2&&(pl.$$scope={dirty:h,ctx:o}),ft.$set(pl);const hl={};h&2&&(hl.$$scope={dirty:h,ctx:o}),ut.$set(hl);const fl={};h&2&&(fl.$$scope={dirty:h,ctx:o}),gt.$set(fl);const de={};h&2&&(de.$$scope={dirty:h,ctx:o}),Tt.$set(de);const ul={};h&2&&(ul.$$scope={dirty:h,ctx:o}),Wt.$set(ul);const gl={};h&2&&(gl.$$scope={dirty:h,ctx:o}),Gt.$set(gl);const _l={};h&2&&(_l.$$scope={dirty:h,ctx:o}),Rt.$set(_l);const Mo={};h&2&&(Mo.$$scope={dirty:h,ctx:o}),ro.$set(Mo);const bl={};h&2&&(bl.$$scope={dirty:h,ctx:o}),_o.$set(bl);const vl={};h&2&&(vl.$$scope={dirty:h,ctx:o}),bo.$set(vl);const Ua={};h&2&&(Ua.$$scope={dirty:h,ctx:o}),vo.$set(Ua);const yl={};h&2&&(yl.$$scope={dirty:h,ctx:o}),$o.$set(yl);const $l={};h&2&&($l.$$scope={dirty:h,ctx:o}),wo.$set($l);const B={};h&2&&(B.$$scope={dirty:h,ctx:o}),To.$set(B);const wl={};h&2&&(wl.$$scope={dirty:h,ctx:o}),ko.$set(wl)},i(o){gc||(b(c.$$.fragment,o),b(qo.$$.fragment,o),b(zo.$$.fragment,o),b(Io.$$.fragment,o),b(ht.$$.fragment,o),b(Uo.$$.fragment,o),b(ft.$$.fragment,o),b(ut.$$.fragment,o),b(gt.$$.fragment,o),b(Bo.$$.fragment,o),b(So.$$.fragment,o),b(Xo.$$.fragment,o),b(Vo.$$.fragment,o),b(Go.$$.fragment,o),b(Ro.$$.fragment,o),b(Ho.$$.fragment,o),b(Yo.$$.fragment,o),b(Jo.$$.fragment,o),b(Tt.$$.fragment,o),b(Ko.$$.fragment,o),b(er.$$.fragment,o),b(or.$$.fragment,o),b(rr.$$.fragment,o),b(nr.$$.fragment,o),b(ir.$$.fragment,o),b(dr.$$.fragment,o),b(lr.$$.fragment,o),b(cr.$$.fragment,o),b(mr.$$.fragment,o),b(pr.$$.fragment,o),b(hr.$$.fragment,o),b(fr.$$.fragment,o),b(ur.$$.fragment,o),b(gr.$$.fragment,o),b(_r.$$.fragment,o),b(vr.$$.fragment,o),b(yr.$$.fragment,o),b($r.$$.fragment,o),b(Tr.$$.fragment,o),b(xr.$$.fragment,o),b(kr.$$.fragment,o),b(Pr.$$.fragment,o),b(Mr.$$.fragment,o),b(Er.$$.fragment,o),b(Fr.$$.fragment,o),b(Dr.$$.fragment,o),b(Wt.$$.fragment,o),b(zr.$$.fragment,o),b(Ar.$$.fragment,o),b(Ir.$$.fragment,o),b(Gt.$$.fragment,o),b(Rt.$$.fragment,o),b(Or.$$.fragment,o),b(Nr.$$.fragment,o),b(Br.$$.fragment,o),b(Sr.$$.fragment,o),b(Wr.$$.fragment,o),b(Xr.$$.fragment,o),b(Vr.$$.fragment,o),b(Gr.$$.fragment,o),b(Hr.$$.fragment,o),b(Yr.$$.fragment,o),b(ro.$$.fragment,o),b(Jr.$$.fragment,o),b(Qr.$$.fragment,o),b(ta.$$.fragment,o),b(oa.$$.fragment,o),b(ra.$$.fragment,o),b(aa.$$.fragment,o),b(na.$$.fragment,o),b(sa.$$.fragment,o),b(da.$$.fragment,o),b(ca.$$.fragment,o),b(ma.$$.fragment,o),b(ha.$$.fragment,o),b(fa.$$.fragment,o),b(ua.$$.fragment,o),b(ga.$$.fragment,o),b(_o.$$.fragment,o),b(ba.$$.fragment,o),b(bo.$$.fragment,o),b($a.$$.fragment,o),b(Ta.$$.fragment,o),b(vo.$$.fragment,o),b(xa.$$.fragment,o),b(Pa.$$.fragment,o),b($o.$$.fragment,o),b(Ma.$$.fragment,o),b(wo.$$.fragment,o),b(Ea.$$.fragment,o),b(To.$$.fragment,o),b(Fa.$$.fragment,o),b(ja.$$.fragment,o),b(Da.$$.fragment,o),b(ko.$$.fragment,o),b(za.$$.fragment,o),b(Aa.$$.fragment,o),gc=!0)},o(o){v(c.$$.fragment,o),v(qo.$$.fragment,o),v(zo.$$.fragment,o),v(Io.$$.fragment,o),v(ht.$$.fragment,o),v(Uo.$$.fragment,o),v(ft.$$.fragment,o),v(ut.$$.fragment,o),v(gt.$$.fragment,o),v(Bo.$$.fragment,o),v(So.$$.fragment,o),v(Xo.$$.fragment,o),v(Vo.$$.fragment,o),v(Go.$$.fragment,o),v(Ro.$$.fragment,o),v(Ho.$$.fragment,o),v(Yo.$$.fragment,o),v(Jo.$$.fragment,o),v(Tt.$$.fragment,o),v(Ko.$$.fragment,o),v(er.$$.fragment,o),v(or.$$.fragment,o),v(rr.$$.fragment,o),v(nr.$$.fragment,o),v(ir.$$.fragment,o),v(dr.$$.fragment,o),v(lr.$$.fragment,o),v(cr.$$.fragment,o),v(mr.$$.fragment,o),v(pr.$$.fragment,o),v(hr.$$.fragment,o),v(fr.$$.fragment,o),v(ur.$$.fragment,o),v(gr.$$.fragment,o),v(_r.$$.fragment,o),v(vr.$$.fragment,o),v(yr.$$.fragment,o),v($r.$$.fragment,o),v(Tr.$$.fragment,o),v(xr.$$.fragment,o),v(kr.$$.fragment,o),v(Pr.$$.fragment,o),v(Mr.$$.fragment,o),v(Er.$$.fragment,o),v(Fr.$$.fragment,o),v(Dr.$$.fragment,o),v(Wt.$$.fragment,o),v(zr.$$.fragment,o),v(Ar.$$.fragment,o),v(Ir.$$.fragment,o),v(Gt.$$.fragment,o),v(Rt.$$.fragment,o),v(Or.$$.fragment,o),v(Nr.$$.fragment,o),v(Br.$$.fragment,o),v(Sr.$$.fragment,o),v(Wr.$$.fragment,o),v(Xr.$$.fragment,o),v(Vr.$$.fragment,o),v(Gr.$$.fragment,o),v(Hr.$$.fragment,o),v(Yr.$$.fragment,o),v(ro.$$.fragment,o),v(Jr.$$.fragment,o),v(Qr.$$.fragment,o),v(ta.$$.fragment,o),v(oa.$$.fragment,o),v(ra.$$.fragment,o),v(aa.$$.fragment,o),v(na.$$.fragment,o),v(sa.$$.fragment,o),v(da.$$.fragment,o),v(ca.$$.fragment,o),v(ma.$$.fragment,o),v(ha.$$.fragment,o),v(fa.$$.fragment,o),v(ua.$$.fragment,o),v(ga.$$.fragment,o),v(_o.$$.fragment,o),v(ba.$$.fragment,o),v(bo.$$.fragment,o),v($a.$$.fragment,o),v(Ta.$$.fragment,o),v(vo.$$.fragment,o),v(xa.$$.fragment,o),v(Pa.$$.fragment,o),v($o.$$.fragment,o),v(Ma.$$.fragment,o),v(wo.$$.fragment,o),v(Ea.$$.fragment,o),v(To.$$.fragment,o),v(Fa.$$.fragment,o),v(ja.$$.fragment,o),v(Da.$$.fragment,o),v(ko.$$.fragment,o),v(za.$$.fragment,o),v(Aa.$$.fragment,o),gc=!1},d(o){t(p),o&&t(x),o&&t(w),y(c),o&&t(jl),o&&t(Z),o&&t(Dl),o&&t(We),o&&t(ql),o&&t(st),o&&t(zl),o&&t(O),o&&t(Al),o&&t(Xe),y(qo),o&&t(Cl),o&&t(F),y(zo),y(Io),y(ht),y(Uo),y(ft),y(ut),y(gt),y(Bo),y(So),y(Xo),y(Vo),y(Go),y(Ro),y(Ho),y(Yo),y(Jo),y(Tt),y(Ko),y(er),y(or),y(rr),o&&t(Il),o&&t(sn),o&&t(Ll),o&&t(Re),y(nr),o&&t(Ul),o&&t(ye),o&&t(Ol),o&&t(Mt),o&&t(Nl),y(ir,o),o&&t(Bl),o&&t(Et),o&&t(Sl),o&&t(Q),o&&t(Wl),y(dr,o),o&&t(Xl),o&&t(Ft),o&&t(Vl),y(lr,o),o&&t(Gl),y(cr,o),o&&t(Rl),o&&t(ln),o&&t(Hl),y(mr,o),o&&t(Yl),o&&t(jt),o&&t(Jl),o&&t(He),y(pr),o&&t(Kl),o&&t(ee),o&&t(Zl),y(hr,o),o&&t(Ql),o&&t($e),o&&t(ec),y(fr,o),o&&t(tc),o&&t(qt),o&&t(oc),y(ur,o),o&&t(rc),o&&t(cn),o&&t(ac),o&&t(Ye),y(gr),o&&t(nc),o&&t(I),y(_r),y(vr),y(yr),y($r),y(Tr),y(xr),y(kr),y(Pr),y(Mr),o&&t(sc),o&&t(Qe),y(Er),o&&t(ic),o&&t(P),y(Fr),y(Dr),y(Wt),y(zr),y(Ar),y(Ir),y(Gt),y(Rt),y(Or),y(Nr),y(Br),y(Sr),y(Wr),y(Xr),y(Vr),y(Gr),y(Hr),y(Yr),y(ro),y(Jr),y(Qr),y(ta),y(oa),y(ra),y(aa),y(na),y(sa),y(da),o&&t(dc),o&&t(tt),y(ca),o&&t(lc),o&&t(ne),y(ma),y(ha),o&&t(cc),o&&t(ot),y(fa),o&&t(mc),o&&t(q),y(ua),y(ga),y(_o),y(ba),y(bo),y($a),y(Ta),y(vo),y(xa),y(Pa),y($o),y(Ma),y(wo),y(Ea),y(To),o&&t(pc),o&&t(at),y(Fa),o&&t(hc),o&&t(se),y(ja),y(Da),y(ko),o&&t(fc),o&&t(nt),y(za),o&&t(uc),o&&t(ie),y(Aa)}}}const Tx={local:"models",sections:[{local:"transformers.PreTrainedModel",sections:[{local:"large-model-loading",title:"Large model loading"},{local:"model-instantiation-dtype",title:"Model Instantiation dtype"}],title:"PreTrainedModel"},{local:"transformers.modeling_utils.ModuleUtilsMixin",title:"ModuleUtilsMixin"},{local:"transformers.TFPreTrainedModel",title:"TFPreTrainedModel"},{local:"transformers.modeling_tf_utils.TFModelUtilsMixin",title:"TFModelUtilsMixin"},{local:"transformers.FlaxPreTrainedModel",title:"FlaxPreTrainedModel"},{local:"transformers.utils.PushToHubMixin",title:"Pushing to the Hub"},{local:"transformers.modeling_utils.load_sharded_checkpoint",title:"Sharded checkpoints"}],title:"Models"};function xx(D){return nx(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Dx extends tx{constructor(p){super();ox(this,p,xx,wx,rx,{})}}export{Dx as default,Tx as metadata};
13
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/main_classes/configuration.mdx-hf-doc-builder.js
import{S as xs,i as Es,s as Ts,e as n,k as d,w as y,t as o,M as Ds,c as a,d as t,m as c,a as s,x as w,h as r,b as u,G as e,g as E,y as $,q as P,o as C,B as k,v as zs,L as Cs}from"../../chunks/vendor-hf-doc-builder.js";import{T as $a}from"../../chunks/Tip-hf-doc-builder.js";import{D as T}from"../../chunks/Docstring-hf-doc-builder.js";import{C as ks}from"../../chunks/CodeBlock-hf-doc-builder.js";import{I as $s}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{E as Ps}from"../../chunks/ExampleCodeBlock-hf-doc-builder.js";function js(j){let f,b,p,g,x;return{c(){f=n("p"),b=o(`A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to initialize a model does `),p=n("strong"),g=o("not"),x=o(" load the model weights. It only affects the model\u2019s configuration.")},l(l){f=a(l,"P",{});var _=s(f);b=r(_,`A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to initialize a model does `),p=a(_,"STRONG",{});var O=s(p);g=r(O,"not"),O.forEach(t),x=r(_," load the model weights. It only affects the model\u2019s configuration."),_.forEach(t)},m(l,_){E(l,f,_),e(f,b),e(f,p),e(p,g),e(f,x)},d(l){l&&t(f)}}}function qs(j){let f,b,p,g,x;return g=new ks({props:{code:`from transformers import AutoConfig config = AutoConfig.from_pretrained("bert-base-cased") # Push the config to your namespace with the name "my-finetuned-bert". config.push_to_hub("my-finetuned-bert") # Push the config to an organization with the name "my-finetuned-bert". config.push_to_hub("huggingface/my-finetuned-bert")`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig config = AutoConfig.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the config to your namespace with the name &quot;my-finetuned-bert&quot;.</span> config.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the config to an organization with the name &quot;my-finetuned-bert&quot;.</span> config.push_to_hub(<span class="hljs-string">&quot;huggingface/my-finetuned-bert&quot;</span>)`}}),{c(){f=n("p"),b=o("Examples:"),p=d(),y(g.$$.fragment)},l(l){f=a(l,"P",{});var _=s(f);b=r(_,"Examples:"),_.forEach(t),p=c(l),w(g.$$.fragment,l)},m(l,_){E(l,f,_),e(f,b),E(l,p,_),$(g,l,_),x=!0},p:Cs,i(l){x||(P(g.$$.fragment,l),x=!0)},o(l){C(g.$$.fragment,l),x=!1},d(l){l&&t(f),l&&t(p),k(g,l)}}}function As(j){let f,b,p,g,x;return{c(){f=n("p"),b=o("Passing "),p=n("code"),g=o("use_auth_token=True"),x=o(" is required when you want to use a private model.")},l(l){f=a(l,"P",{});var _=s(f);b=r(_,"Passing "),p=a(_,"CODE",{});var O=s(p);g=r(O,"use_auth_token=True"),O.forEach(t),x=r(_," is required when you want to use a private model."),_.forEach(t)},m(l,_){E(l,f,_),e(f,b),e(f,p),e(p,g),e(f,x)},d(l){l&&t(f)}}}function Os(j){let f,b,p,g,x;return g=new ks({props:{code:`# We can't instantiate directly the base class *PretrainedConfig* so let's show the examples on a # derived class: BertConfig config = BertConfig.from_pretrained( "bert-base-uncased" ) # Download configuration from huggingface.co and cache. config = BertConfig.from_pretrained( "./test/saved_model/" ) # E.g. config (or model) was saved using *save_pretrained('./test/saved_model/')* config = BertConfig.from_pretrained("./test/saved_model/my_configuration.json") config = BertConfig.from_pretrained("bert-base-uncased", output_attentions=True, foo=False) assert config.output_attentions == True config, unused_kwargs = BertConfig.from_pretrained( "bert-base-uncased", output_attentions=True, foo=False, return_unused_kwargs=True ) assert config.output_attentions == True assert unused_kwargs == {"foo": False}`,highlighted:`<span class="hljs-comment"># We can&#x27;t instantiate directly the base class *PretrainedConfig* so let&#x27;s show the examples on a</span> <span class="hljs-comment"># derived class: BertConfig</span> config = BertConfig.from_pretrained( <span class="hljs-string">&quot;bert-base-uncased&quot;</span> ) <span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> config = BertConfig.from_pretrained( <span class="hljs-string">&quot;./test/saved_model/&quot;</span> ) <span class="hljs-comment"># E.g. config (or model) was saved using *save_pretrained(&#x27;./test/saved_model/&#x27;)*</span> config = BertConfig.from_pretrained(<span class="hljs-string">&quot;./test/saved_model/my_configuration.json&quot;</span>) config = BertConfig.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>, output_attentions=<span class="hljs-literal">True</span>, foo=<span class="hljs-literal">False</span>) <span class="hljs-keyword">assert</span> config.output_attentions == <span class="hljs-literal">True</span> config, unused_kwargs = BertConfig.from_pretrained( <span class="hljs-string">&quot;bert-base-uncased&quot;</span>, output_attentions=<span class="hljs-literal">True</span>, foo=<span class="hljs-literal">False</span>, return_unused_kwargs=<span class="hljs-literal">True</span> ) <span class="hljs-keyword">assert</span> config.output_attentions == <span class="hljs-literal">True</span> <span class="hljs-keyword">assert</span> unused_kwargs == {<span class="hljs-string">&quot;foo&quot;</span>: <span class="hljs-literal">False</span>}`}}),{c(){f=n("p"),b=o("Examples:"),p=d(),y(g.$$.fragment)},l(l){f=a(l,"P",{});var _=s(f);b=r(_,"Examples:"),_.forEach(t),p=c(l),w(g.$$.fragment,l)},m(l,_){E(l,f,_),e(f,b),E(l,p,_),$(g,l,_),x=!0},p:Cs,i(l){x||(P(g.$$.fragment,l),x=!0)},o(l){C(g.$$.fragment,l),x=!1},d(l){l&&t(f),l&&t(p),k(g,l)}}}function Fs(j){let f,b;return{c(){f=n("p"),b=o("This API is experimental and may have some slight breaking changes in the next releases.")},l(p){f=a(p,"P",{});var g=s(f);b=r(g,"This API is experimental and may have some slight breaking changes in the next releases."),g.forEach(t)},m(p,g){E(p,f,g),e(f,b)},d(p){p&&t(f)}}}function Ss(j){let f,b,p,g,x,l,_,O,Do,ao,G,zo,He,jo,qo,so,D,Ao,dt,Oo,Fo,ct,So,Io,lt,Lo,No,ft,Wo,Bo,io,R,H,mt,$e,Mo,ht,Vo,co,m,Pe,Ro,pt,Uo,Jo,X,Go,gt,Ho,Xo,F,W,ut,Yo,Ko,_t,Qo,Zo,Xe,er,tr,or,z,bt,rr,nr,vt,ar,sr,Ye,ir,dr,Ke,cr,lr,Qe,fr,mr,hr,Y,yt,pr,gr,wt,ur,_r,br,K,$t,vr,yr,Pt,wr,$r,Pr,Ct,Cr,kr,S,Q,kt,xr,Er,xt,Tr,Dr,zr,Z,Et,jr,qr,Tt,Ar,Or,Fr,ee,Dt,Sr,Ir,zt,Lr,Nr,Wr,te,jt,Br,Mr,qt,Vr,Rr,Ur,B,Ce,Jr,ke,Gr,At,Hr,Xr,Yr,oe,Kr,re,xe,Qr,I,Zr,Ot,en,tn,Ft,on,rn,St,nn,an,sn,ne,Ee,dn,Te,cn,Ze,ln,fn,mn,ae,De,hn,ze,pn,et,gn,un,_n,q,je,bn,qe,vn,tt,yn,wn,$n,se,Pn,ie,Cn,de,Ae,kn,L,xn,It,En,Tn,ot,Dn,zn,Lt,jn,qn,An,M,Oe,On,Fe,Fn,Nt,Sn,In,Ln,ce,Nn,le,Se,Wn,U,Bn,Wt,Mn,Vn,rt,Rn,Un,Jn,fe,Ie,Gn,Bt,Hn,Xn,me,Le,Yn,Mt,Kn,Qn,he,Ne,Zn,Vt,ea,ta,pe,We,oa,Rt,ra,na,ge,Be,aa,Me,sa,Ut,ia,da,ca,A,Ve,la,Re,fa,Jt,ma,ha,pa,J,ga,Gt,ua,_a,Ht,ba,va,ya,Xt,wa,lo;return l=new $s({}),$e=new $s({}),Pe=new T({props:{name:"class transformers.PretrainedConfig",anchor:"transformers.PretrainedConfig",parameters:[{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PretrainedConfig.name_or_path",description:`<strong>name_or_path</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&quot;</code>) &#x2014; Store the string that was passed to <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">PreTrainedModel.from_pretrained()</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">TFPreTrainedModel.from_pretrained()</a> as <code>pretrained_model_name_or_path</code> if the configuration was created with such a method.`,name:"name_or_path"},{anchor:"transformers.PretrainedConfig.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the model should return all hidden-states.`,name:"output_hidden_states"},{anchor:"transformers.PretrainedConfig.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the model should returns all attentions.`,name:"output_attentions"},{anchor:"transformers.PretrainedConfig.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"},{anchor:"transformers.PretrainedConfig.is_encoder_decoder",description:`<strong>is_encoder_decoder</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the model is used as an encoder/decoder or not.`,name:"is_encoder_decoder"},{anchor:"transformers.PretrainedConfig.is_decoder",description:`<strong>is_decoder</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the model is used as decoder or not (in which case it&#x2019;s used as an encoder).`,name:"is_decoder"},{anchor:"transformers.PretrainedConfig.cross_attention_hidden_size**",description:`<strong>cross_attention_hidden_size**</strong> (<code>bool</code>, <em>optional</em>) &#x2014; The hidden size of the cross-attention layer in case the model is used as a decoder in an encoder-decoder setting and the cross-attention hidden dimension differs from <code>self.config.hidden_size</code>.`,name:"cross_attention_hidden_size**"},{anchor:"transformers.PretrainedConfig.add_cross_attention",description:`<strong>add_cross_attention</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether cross-attention layers should be added to the model. Note, this option is only relevant for models that can be used as decoder models within the <a href="/docs/transformers/pr_19429/en/model_doc/encoder-decoder#transformers.EncoderDecoderModel">EncoderDecoderModel</a> class, which consists of all models in <code>AUTO_MODELS_FOR_CAUSAL_LM</code>.`,name:"add_cross_attention"},{anchor:"transformers.PretrainedConfig.tie_encoder_decoder",description:`<strong>tie_encoder_decoder</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether all encoder weights should be tied to their equivalent decoder weights. This requires the encoder and decoder model to have the exact same parameter names.`,name:"tie_encoder_decoder"},{anchor:"transformers.PretrainedConfig.prune_heads",description:`<strong>prune_heads</strong> (<code>Dict[int, List[int]]</code>, <em>optional</em>, defaults to <code>{}</code>) &#x2014; Pruned heads of the model. The keys are the selected layer indices and the associated values, the list of heads to prune in said layer.</p> <p>For instance <code>{1: [0, 2], 2: [2, 3]}</code> will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.`,name:"prune_heads"},{anchor:"transformers.PretrainedConfig.chunk_size_feed_forward",description:`<strong>chunk_size_feed_forward</strong> (<code>int</code>, <em>optional</em>, defaults to <code>0</code>) &#x2014; The chunk size of all feed forward layers in the residual attention blocks. A chunk size of <code>0</code> means that the feed forward layer is not chunked. A chunk size of n means that the feed forward layer processes <code>n</code> &lt; sequence_length embeddings at a time. For more information on feed forward chunking, see <a href="../glossary.html#feed-forward-chunking">How does Feed Forward Chunking work?</a>.`,name:"chunk_size_feed_forward"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/configuration_utils.py#L48",parameterGroups:[{title:"Parameters for sequence generation",parametersDescription:[{anchor:"transformers.PretrainedConfig.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; Maximum length that will be used by default in the <code>generate</code> method of the model.`,name:"max_length"},{anchor:"transformers.PretrainedConfig.min_length",description:`<strong>min_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; Minimum length that will be used by default in the <code>generate</code> method of the model.`,name:"min_length"},{anchor:"transformers.PretrainedConfig.do_sample",description:`<strong>do_sample</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag that will be used by default in the <code>generate</code> method of the model. Whether or not to use sampling ; use greedy decoding otherwise.`,name:"do_sample"},{anchor:"transformers.PretrainedConfig.early_stopping",description:`<strong>early_stopping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag that will be used by default in the <code>generate</code> method of the model. Whether to stop the beam search when at least <code>num_beams</code> sentences are finished per batch or not.`,name:"early_stopping"},{anchor:"transformers.PretrainedConfig.num_beams",description:`<strong>num_beams</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of beams for beam search that will be used by default in the <code>generate</code> method of the model. 1 means no beam search.`,name:"num_beams"},{anchor:"transformers.PretrainedConfig.num_beam_groups",description:`<strong>num_beam_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of groups to divide <code>num_beams</code> into in order to ensure diversity among different groups of beams that will be used by default in the <code>generate</code> method of the model. 1 means no group beam search.`,name:"num_beam_groups"},{anchor:"transformers.PretrainedConfig.diversity_penalty",description:`<strong>diversity_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; Value to control diversity for group beam search. that will be used by default in the <code>generate</code> method of the model. 0 means no diversity penalty. The higher the penalty, the more diverse are the outputs.`,name:"diversity_penalty"},{anchor:"transformers.PretrainedConfig.temperature",description:`<strong>temperature</strong> (<code>float</code>, <em>optional</em>, defaults to 1) &#x2014; The value used to module the next token probabilities that will be used by default in the <code>generate</code> method of the model. Must be strictly positive.`,name:"temperature"},{anchor:"transformers.PretrainedConfig.top_k",description:`<strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to 50) &#x2014; Number of highest probability vocabulary tokens to keep for top-k-filtering that will be used by default in the <code>generate</code> method of the model.`,name:"top_k"},{anchor:"transformers.PretrainedConfig.top_p",description:`<strong>top_p</strong> (<code>float</code>, <em>optional</em>, defaults to 1) &#x2014; Value that will be used by default in the <code>generate</code> method of the model for <code>top_p</code>. If set to float &lt; 1, only the most probable tokens with probabilities that add up to <code>top_p</code> or higher are kept for generation.`,name:"top_p"},{anchor:"transformers.PretrainedConfig.repetition_penalty",description:`<strong>repetition_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1) &#x2014; Parameter for repetition penalty that will be used by default in the <code>generate</code> method of the model. 1.0 means no penalty.`,name:"repetition_penalty"},{anchor:"transformers.PretrainedConfig.length_penalty",description:`<strong>length_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1) &#x2014; Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log likelihood of the sequence (i.e. negative), <code>length_penalty</code> &gt; 0.0 promotes longer sequences, while <code>length_penalty</code> &lt; 0.0 encourages shorter sequences.`,name:"length_penalty"},{anchor:"transformers.PretrainedConfig.no_repeat_ngram_size",description:`<strong>no_repeat_ngram_size</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Value that will be used by default in the &#x2014; <code>generate</code> method of the model for <code>no_repeat_ngram_size</code>. If set to int &gt; 0, all ngrams of that size can only occur once.`,name:"no_repeat_ngram_size"},{anchor:"transformers.PretrainedConfig.encoder_no_repeat_ngram_size",description:`<strong>encoder_no_repeat_ngram_size</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Value that will be used by &#x2014; default in the <code>generate</code> method of the model for <code>encoder_no_repeat_ngram_size</code>. If set to int &gt; 0, all ngrams of that size that occur in the <code>encoder_input_ids</code> cannot occur in the <code>decoder_input_ids</code>.`,name:"encoder_no_repeat_ngram_size"},{anchor:"transformers.PretrainedConfig.bad_words_ids",description:`<strong>bad_words_ids</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; List of token ids that are not allowed to be generated that will be used by default in the <code>generate</code> method of the model. In order to get the tokens of the words that should not appear in the generated text, use <code>tokenizer.encode(bad_word, add_prefix_space=True)</code>.`,name:"bad_words_ids"},{anchor:"transformers.PretrainedConfig.num_return_sequences",description:`<strong>num_return_sequences</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of independently computed returned sequences for each element in the batch that will be used by default in the <code>generate</code> method of the model.`,name:"num_return_sequences"},{anchor:"transformers.PretrainedConfig.output_scores",description:`<strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the model should return the logits when used for generation.`,name:"output_scores"},{anchor:"transformers.PretrainedConfig.return_dict_in_generate",description:`<strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the model should return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a <code>torch.LongTensor</code>.`,name:"return_dict_in_generate"},{anchor:"transformers.PretrainedConfig.forced_bos_token_id",description:`<strong>forced_bos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the token to force as the first generated token after the <code>decoder_start_token_id</code>. Useful for multilingual models like <a href="../model_doc/mbart">mBART</a> where the first generated token needs to be the target language token.`,name:"forced_bos_token_id"},{anchor:"transformers.PretrainedConfig.forced_eos_token_id",description:`<strong>forced_eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached.`,name:"forced_eos_token_id"},{anchor:"transformers.PretrainedConfig.remove_invalid_values",description:`<strong>remove_invalid_values</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to remove possible <em>nan</em> and <em>inf</em> outputs of the model to prevent the generation method to crash. Note that using <code>remove_invalid_values</code> can slow down generation.`,name:"remove_invalid_values"}]},{title:"Parameters for fine-tuning tasks",parametersDescription:[{anchor:"transformers.PretrainedConfig.architectures",description:`<strong>architectures</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; Model architectures that can be used with the model pretrained weights.`,name:"architectures"},{anchor:"transformers.PretrainedConfig.finetuning_task",description:`<strong>finetuning_task</strong> (<code>str</code>, <em>optional</em>) &#x2014; Name of the task used to fine-tune the model. This can be used when converting from an original (TensorFlow or PyTorch) checkpoint.`,name:"finetuning_task"},{anchor:"transformers.PretrainedConfig.id2label",description:`<strong>id2label</strong> (<code>Dict[int, str]</code>, <em>optional</em>) &#x2014; A map from index (for instance prediction index, or target index) to label.`,name:"id2label"},{anchor:"transformers.PretrainedConfig.label2id",description:"<strong>label2id</strong> (<code>Dict[str, int]</code>, <em>optional</em>) &#x2014; A map from label to index for the model.",name:"label2id"},{anchor:"transformers.PretrainedConfig.num_labels",description:`<strong>num_labels</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of labels to use in the last layer added to the model, typically for a classification task.`,name:"num_labels"},{anchor:"transformers.PretrainedConfig.task_specific_params",description:`<strong>task_specific_params</strong> (<code>Dict[str, Any]</code>, <em>optional</em>) &#x2014; Additional keyword arguments to store for the current task.`,name:"task_specific_params"},{anchor:"transformers.PretrainedConfig.problem_type",description:`<strong>problem_type</strong> (<code>str</code>, <em>optional</em>) &#x2014; Problem type for <code>XxxForSequenceClassification</code> models. Can be one of <code>&quot;regression&quot;</code>, <code>&quot;single_label_classification&quot;</code> or <code>&quot;multi_label_classification&quot;</code>.`,name:"problem_type"}]},{title:"Parameters linked to the tokenizer",parametersDescription:[{anchor:"transformers.PretrainedConfig.tokenizer_class",description:`<strong>tokenizer_class</strong> (<code>str</code>, <em>optional</em>) &#x2014; The name of the associated tokenizer class to use (if none is set, will use the tokenizer associated to the model by default).`,name:"tokenizer_class"},{anchor:"transformers.PretrainedConfig.prefix",description:`<strong>prefix</strong> (<code>str</code>, <em>optional</em>) &#x2014; A specific prompt that should be added at the beginning of each text before calling the model.`,name:"prefix"},{anchor:"transformers.PretrainedConfig.bos_token_id",description:"<strong>bos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>beginning-of-stream</em> token.",name:"bos_token_id"},{anchor:"transformers.PretrainedConfig.pad_token_id",description:"<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.",name:"pad_token_id"},{anchor:"transformers.PretrainedConfig.eos_token_id",description:"<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-stream</em> token.",name:"eos_token_id"},{anchor:"transformers.PretrainedConfig.decoder_start_token_id",description:`<strong>decoder_start_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; If an encoder-decoder model starts decoding with a different token than <em>bos</em>, the id of that token.`,name:"decoder_start_token_id"},{anchor:"transformers.PretrainedConfig.sep_token_id",description:"<strong>sep_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>separation</em> token.",name:"sep_token_id"}]},{title:"PyTorch specific parameters",parametersDescription:[{anchor:"transformers.PretrainedConfig.torchscript",description:`<strong>torchscript</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the model should be used with Torchscript.`,name:"torchscript"},{anchor:"transformers.PretrainedConfig.tie_word_embeddings",description:`<strong>tie_word_embeddings</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether the model&#x2019;s input and output word embeddings should be tied. Note that this is only relevant if the model has a output word embedding layer.`,name:"tie_word_embeddings"},{anchor:"transformers.PretrainedConfig.torch_dtype",description:`<strong>torch_dtype</strong> (<code>str</code>, <em>optional</em>) &#x2014; The <code>dtype</code> of the weights. This attribute can be used to initialize the model to a non-default <code>dtype</code> (which is normally <code>float32</code>) and thus allow for optimal storage allocation. For example, if the saved model is <code>float16</code>, ideally we want to load it back using the minimal amount of memory needed to load <code>float16</code> weights. Since the config object is stored in plain text, this attribute contains just the floating type string without the <code>torch.</code> prefix. For example, for <code>torch.float16</code> \`<code>torch_dtype</code> is the <code>&quot;float16&quot;</code> string.</p> <p>This attribute is currently not being used during model loading time, but this may change in the future versions. But we can already start preparing for the future by saving the dtype with save_pretrained.`,name:"torch_dtype"}]},{title:"TensorFlow specific parameters",parametersDescription:[{anchor:"transformers.PretrainedConfig.use_bfloat16",description:`<strong>use_bfloat16</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the model should use BFloat16 scalars (only used by some TensorFlow models).`,name:"use_bfloat16"},{anchor:"transformers.PretrainedConfig.tf_legacy_loss",description:`<strong>tf_legacy_loss</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the model should use legacy TensorFlow losses. Legacy losses have variable output shapes and may not be XLA-compatible. This option is here for backward compatibility and will be removed in Transformers v5.`,name:"tf_legacy_loss"}]}]}}),X=new $a({props:{$$slots:{default:[js]},$$scope:{ctx:j}}}),Ce=new T({props:{name:"push_to_hub",anchor:"transformers.PretrainedConfig.push_to_hub",parameters:[{name:"repo_id",val:": str"},{name:"use_temp_dir",val:": typing.Optional[bool] = None"},{name:"commit_message",val:": typing.Optional[str] = None"},{name:"private",val:": typing.Optional[bool] = None"},{name:"use_auth_token",val:": typing.Union[bool, str, NoneType] = None"},{name:"max_shard_size",val:": typing.Union[int, str, NoneType] = '10GB'"},{name:"create_pr",val:": bool = False"},{name:"**deprecated_kwargs",val:""}],parametersDescription:[{anchor:"transformers.PretrainedConfig.push_to_hub.repo_id",description:`<strong>repo_id</strong> (<code>str</code>) &#x2014; The name of the repository you want to push your config to. It should contain your organization name when pushing to a given organization.`,name:"repo_id"},{anchor:"transformers.PretrainedConfig.push_to_hub.use_temp_dir",description:`<strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to use a temporary directory to store the files saved before they are pushed to the Hub. Will default to <code>True</code> if there is no directory named like <code>repo_id</code>, <code>False</code> otherwise.`,name:"use_temp_dir"},{anchor:"transformers.PretrainedConfig.push_to_hub.commit_message",description:`<strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;Upload config&quot;</code>.`,name:"commit_message"},{anchor:"transformers.PretrainedConfig.push_to_hub.private",description:`<strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).`,name:"private"},{anchor:"transformers.PretrainedConfig.push_to_hub.use_auth_token",description:`<strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>huggingface-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.`,name:"use_auth_token"},{anchor:"transformers.PretrainedConfig.push_to_hub.max_shard_size",description:`<strong>max_shard_size</strong> (<code>int</code> or <code>str</code>, <em>optional</em>, defaults to <code>&quot;10GB&quot;</code>) &#x2014; Only applicable for models. The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like <code>&quot;5MB&quot;</code>).`,name:"max_shard_size"},{anchor:"transformers.PretrainedConfig.push_to_hub.create_pr",description:`<strong>create_pr</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to create a PR with the uploaded files or directly commit.`,name:"create_pr"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/hub.py#L712"}}),oe=new Ps({props:{anchor:"transformers.PretrainedConfig.push_to_hub.example",$$slots:{default:[qs]},$$scope:{ctx:j}}}),xe=new T({props:{name:"dict_torch_dtype_to_str",anchor:"transformers.PretrainedConfig.dict_torch_dtype_to_str",parameters:[{name:"d",val:": typing.Dict[str, typing.Any]"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/configuration_utils.py#L873"}}),Ee=new T({props:{name:"from_dict",anchor:"transformers.PretrainedConfig.from_dict",parameters:[{name:"config_dict",val:": typing.Dict[str, typing.Any]"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PretrainedConfig.from_dict.config_dict",description:`<strong>config_dict</strong> (<code>Dict[str, Any]</code>) &#x2014; Dictionary that will be used to instantiate the configuration object. Such a dictionary can be retrieved from a pretrained checkpoint by leveraging the <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig.get_config_dict">get_config_dict()</a> method.`,name:"config_dict"},{anchor:"transformers.PretrainedConfig.from_dict.kwargs",description:`<strong>kwargs</strong> (<code>Dict[str, Any]</code>) &#x2014; Additional parameters from which to initialize the configuration object.`,name:"kwargs"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/configuration_utils.py#L657",returnDescription:` <p>The configuration object instantiated from those parameters.</p> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig" >PretrainedConfig</a></p> `}}),De=new T({props:{name:"from_json_file",anchor:"transformers.PretrainedConfig.from_json_file",parameters:[{name:"json_file",val:": typing.Union[str, os.PathLike]"}],parametersDescription:[{anchor:"transformers.PretrainedConfig.from_json_file.json_file",description:`<strong>json_file</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Path to the JSON file containing the parameters.`,name:"json_file"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/configuration_utils.py#L711",returnDescription:` <p>The configuration object instantiated from that JSON file.</p> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig" >PretrainedConfig</a></p> `}}),je=new T({props:{name:"from_pretrained",anchor:"transformers.PretrainedConfig.from_pretrained",parameters:[{name:"pretrained_model_name_or_path",val:": typing.Union[str, os.PathLike]"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PretrainedConfig.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; This can be either:</p> <ul> <li>a string, the <em>model id</em> of a pretrained model configuration hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>a path to a <em>directory</em> containing a configuration file saved using the <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig.save_pretrained">save_pretrained()</a> method, e.g., <code>./my_model_directory/</code>.</li> <li>a path or url to a saved configuration JSON <em>file</em>, e.g., <code>./my_model_directory/configuration.json</code>.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.PretrainedConfig.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<code>str</code> or <code>os.PathLike</code>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.PretrainedConfig.from_pretrained.force_download",description:`<strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to force to (re-)download the configuration files and override the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.PretrainedConfig.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.PretrainedConfig.from_pretrained.proxies",description:`<strong>proxies</strong> (<code>Dict[str, str]</code>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <code>{&apos;http&apos;: &apos;foo.bar:3128&apos;, &apos;http://hostname&apos;: &apos;foo.bar:4012&apos;}.</code> The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.PretrainedConfig.from_pretrained.use_auth_token",description:`<strong>use_auth_token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>huggingface-cli login</code> (stored in <code>~/.huggingface</code>).`,name:"use_auth_token"},{anchor:"transformers.PretrainedConfig.from_pretrained.revision",description:`<strong>revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;main&quot;</code>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <code>revision</code> can be any identifier allowed by git.`,name:"revision"},{anchor:"transformers.PretrainedConfig.from_pretrained.return_unused_kwargs",description:`<strong>return_unused_kwargs</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If <code>False</code>, then this function returns just the final configuration object.</p> <p>If <code>True</code>, then this functions returns a <code>Tuple(config, unused_kwargs)</code> where <em>unused_kwargs</em> is a dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the part of <code>kwargs</code> which has not been used to update <code>config</code> and is otherwise ignored.`,name:"return_unused_kwargs"},{anchor:"transformers.PretrainedConfig.from_pretrained.subfolder",description:`<strong>subfolder</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&quot;</code>) &#x2014; In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here.`,name:"subfolder"},{anchor:"transformers.PretrainedConfig.from_pretrained.kwargs",description:`<strong>kwargs</strong> (<code>Dict[str, Any]</code>, <em>optional</em>) &#x2014; The values in kwargs of any keys which are configuration attributes will be used to override the loaded values. Behavior concerning key/value pairs whose keys are <em>not</em> configuration attributes is controlled by the <code>return_unused_kwargs</code> keyword parameter.`,name:"kwargs"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/configuration_utils.py#L454",returnDescription:` <p>The configuration object instantiated from this pretrained model.</p> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig" >PretrainedConfig</a></p> `}}),se=new $a({props:{$$slots:{default:[As]},$$scope:{ctx:j}}}),ie=new Ps({props:{anchor:"transformers.PretrainedConfig.from_pretrained.example",$$slots:{default:[Os]},$$scope:{ctx:j}}}),Ae=new T({props:{name:"get_config_dict",anchor:"transformers.PretrainedConfig.get_config_dict",parameters:[{name:"pretrained_model_name_or_path",val:": typing.Union[str, os.PathLike]"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PretrainedConfig.get_config_dict.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.`,name:"pretrained_model_name_or_path"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/configuration_utils.py#L540",returnDescription:` <p>The dictionary(ies) that will be used to instantiate the configuration object.</p> `,returnType:` <p><code>Tuple[Dict, Dict]</code></p> `}}),Oe=new T({props:{name:"register_for_auto_class",anchor:"transformers.PretrainedConfig.register_for_auto_class",parameters:[{name:"auto_class",val:" = 'AutoConfig'"}],parametersDescription:[{anchor:"transformers.PretrainedConfig.register_for_auto_class.auto_class",description:`<strong>auto_class</strong> (<code>str</code> or <code>type</code>, <em>optional</em>, defaults to <code>&quot;AutoConfig&quot;</code>) &#x2014; The auto class to register this new configuration with.`,name:"auto_class"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/configuration_utils.py#L885"}}),ce=new $a({props:{warning:!0,$$slots:{default:[Fs]},$$scope:{ctx:j}}}),Se=new T({props:{name:"save_pretrained",anchor:"transformers.PretrainedConfig.save_pretrained",parameters:[{name:"save_directory",val:": typing.Union[str, os.PathLike]"},{name:"push_to_hub",val:": bool = False"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PretrainedConfig.save_pretrained.save_directory",description:`<strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Directory where the configuration JSON file will be saved (will be created if it does not exist).`,name:"save_directory"},{anchor:"transformers.PretrainedConfig.save_pretrained.push_to_hub",description:`<strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with <code>repo_id</code> (will default to the name of <code>save_directory</code> in your namespace). kwargs &#x2014; Additional key word arguments passed along to the <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.push_to_hub">push_to_hub()</a> method.`,name:"push_to_hub"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/configuration_utils.py#L412"}}),Ie=new T({props:{name:"to_dict",anchor:"transformers.PretrainedConfig.to_dict",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/configuration_utils.py#L771",returnDescription:` <p>Dictionary of all the attributes that make up this configuration instance.</p> `,returnType:` <p><code>Dict[str, Any]</code></p> `}}),Le=new T({props:{name:"to_diff_dict",anchor:"transformers.PretrainedConfig.to_diff_dict",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/configuration_utils.py#L739",returnDescription:` <p>Dictionary of all the attributes that make up this configuration instance,</p> `,returnType:` <p><code>Dict[str, Any]</code></p> `}}),Ne=new T({props:{name:"to_json_file",anchor:"transformers.PretrainedConfig.to_json_file",parameters:[{name:"json_file_path",val:": typing.Union[str, os.PathLike]"},{name:"use_diff",val:": bool = True"}],parametersDescription:[{anchor:"transformers.PretrainedConfig.to_json_file.json_file_path",description:`<strong>json_file_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Path to the JSON file in which this configuration instance&#x2019;s parameters will be saved.`,name:"json_file_path"},{anchor:"transformers.PretrainedConfig.to_json_file.use_diff",description:`<strong>use_diff</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, only the difference between the config instance and the default <code>PretrainedConfig()</code> is serialized to JSON file.`,name:"use_diff"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/configuration_utils.py#L811"}}),We=new T({props:{name:"to_json_string",anchor:"transformers.PretrainedConfig.to_json_string",parameters:[{name:"use_diff",val:": bool = True"}],parametersDescription:[{anchor:"transformers.PretrainedConfig.to_json_string.use_diff",description:`<strong>use_diff</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, only the difference between the config instance and the default <code>PretrainedConfig()</code> is serialized to JSON string.`,name:"use_diff"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/configuration_utils.py#L793",returnDescription:` <p>String containing all the attributes that make up this configuration instance in JSON format.</p> `,returnType:` <p><code>str</code></p> `}}),Be=new T({props:{name:"update",anchor:"transformers.PretrainedConfig.update",parameters:[{name:"config_dict",val:": typing.Dict[str, typing.Any]"}],parametersDescription:[{anchor:"transformers.PretrainedConfig.update.config_dict",description:"<strong>config_dict</strong> (<code>Dict[str, Any]</code>) &#x2014; Dictionary of attributes that should be updated for this class.",name:"config_dict"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/configuration_utils.py#L825"}}),Ve=new T({props:{name:"update_from_string",anchor:"transformers.PretrainedConfig.update_from_string",parameters:[{name:"update_str",val:": str"}],parametersDescription:[{anchor:"transformers.PretrainedConfig.update_from_string.update_str",description:"<strong>update_str</strong> (<code>str</code>) &#x2014; String with attributes that should be updated for this class.",name:"update_str"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/configuration_utils.py#L835"}}),{c(){f=n("meta"),b=d(),p=n("h1"),g=n("a"),x=n("span"),y(l.$$.fragment),_=d(),O=n("span"),Do=o("Configuration"),ao=d(),G=n("p"),zo=o("The base class "),He=n("a"),jo=o("PretrainedConfig"),qo=o(` implements the common methods for loading/saving a configuration either from a local file or directory, or from a pretrained model configuration provided by the library (downloaded from HuggingFace\u2019s AWS S3 repository).`),so=d(),D=n("p"),Ao=o(`Each derived config class implements model specific attributes. Common attributes present in all config classes are: `),dt=n("code"),Oo=o("hidden_size"),Fo=o(", "),ct=n("code"),So=o("num_attention_heads"),Io=o(", and "),lt=n("code"),Lo=o("num_hidden_layers"),No=o(`. Text models further implement: `),ft=n("code"),Wo=o("vocab_size"),Bo=o("."),io=d(),R=n("h2"),H=n("a"),mt=n("span"),y($e.$$.fragment),Mo=d(),ht=n("span"),Vo=o("PretrainedConfig"),co=d(),m=n("div"),y(Pe.$$.fragment),Ro=d(),pt=n("p"),Uo=o(`Base class for all configuration classes. Handles a few parameters common to all models\u2019 configurations as well as methods for loading/downloading/saving configurations.`),Jo=d(),y(X.$$.fragment),Go=d(),gt=n("p"),Ho=o("Class attributes (overridden by derived classes):"),Xo=d(),F=n("ul"),W=n("li"),ut=n("strong"),Yo=o("model_type"),Ko=o(" ("),_t=n("code"),Qo=o("str"),Zo=o(`) \u2014 An identifier for the model type, serialized into the JSON file, and used to recreate the correct object in `),Xe=n("a"),er=o("AutoConfig"),tr=o("."),or=d(),z=n("li"),bt=n("strong"),rr=o("is_composition"),nr=o(" ("),vt=n("code"),ar=o("bool"),sr=o(`) \u2014 Whether the config class is composed of multiple sub-configs. In this case the config has to be initialized from two or more configs of type `),Ye=n("a"),ir=o("PretrainedConfig"),dr=o(` like: `),Ke=n("a"),cr=o("EncoderDecoderConfig"),lr=o(" or "),Qe=n("a"),fr=o("~RagConfig"),mr=o("."),hr=d(),Y=n("li"),yt=n("strong"),pr=o("keys_to_ignore_at_inference"),gr=o(" ("),wt=n("code"),ur=o("List[str]"),_r=o(`) \u2014 A list of keys to ignore by default when looking at dictionary outputs of the model during inference.`),br=d(),K=n("li"),$t=n("strong"),vr=o("attribute_map"),yr=o(" ("),Pt=n("code"),wr=o("Dict[str, str]"),$r=o(`) \u2014 A dict that maps model specific attribute names to the standardized naming of attributes.`),Pr=d(),Ct=n("p"),Cr=o("Common attributes (present in all subclasses):"),kr=d(),S=n("ul"),Q=n("li"),kt=n("strong"),xr=o("vocab_size"),Er=o(" ("),xt=n("code"),Tr=o("int"),Dr=o(`) \u2014 The number of tokens in the vocabulary, which is also the first dimension of the embeddings matrix (this attribute may be missing for models that don\u2019t have a text modality like ViT).`),zr=d(),Z=n("li"),Et=n("strong"),jr=o("hidden_size"),qr=o(" ("),Tt=n("code"),Ar=o("int"),Or=o(") \u2014 The hidden size of the model."),Fr=d(),ee=n("li"),Dt=n("strong"),Sr=o("num_attention_heads"),Ir=o(" ("),zt=n("code"),Lr=o("int"),Nr=o(`) \u2014 The number of attention heads used in the multi-head attention layers of the model.`),Wr=d(),te=n("li"),jt=n("strong"),Br=o("num_hidden_layers"),Mr=o(" ("),qt=n("code"),Vr=o("int"),Rr=o(") \u2014 The number of blocks in the model."),Ur=d(),B=n("div"),y(Ce.$$.fragment),Jr=d(),ke=n("p"),Gr=o(`Upload the configuration file to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),At=n("code"),Hr=o("repo_path_or_name"),Xr=o("."),Yr=d(),y(oe.$$.fragment),Kr=d(),re=n("div"),y(xe.$$.fragment),Qr=d(),I=n("p"),Zr=o("Checks whether the passed dictionary and its nested dicts have a "),Ot=n("em"),en=o("torch_dtype"),tn=o(` key and if it\u2019s not None, converts torch.dtype to a string of just the type. For example, `),Ft=n("code"),on=o("torch.float32"),rn=o(" get converted into "),St=n("em"),nn=o("\u201Cfloat32\u201D"),an=o(` string, which can then be stored in the json format.`),sn=d(),ne=n("div"),y(Ee.$$.fragment),dn=d(),Te=n("p"),cn=o("Instantiates a "),Ze=n("a"),ln=o("PretrainedConfig"),fn=o(" from a Python dictionary of parameters."),mn=d(),ae=n("div"),y(De.$$.fragment),hn=d(),ze=n("p"),pn=o("Instantiates a "),et=n("a"),gn=o("PretrainedConfig"),un=o(" from the path to a JSON file of parameters."),_n=d(),q=n("div"),y(je.$$.fragment),bn=d(),qe=n("p"),vn=o("Instantiate a "),tt=n("a"),yn=o("PretrainedConfig"),wn=o(" (or a derived class) from a pretrained model configuration."),$n=d(),y(se.$$.fragment),Pn=d(),y(ie.$$.fragment),Cn=d(),de=n("div"),y(Ae.$$.fragment),kn=d(),L=n("p"),xn=o("From a "),It=n("code"),En=o("pretrained_model_name_or_path"),Tn=o(`, resolve to a dictionary of parameters, to be used for instantiating a `),ot=n("a"),Dn=o("PretrainedConfig"),zn=o(" using "),Lt=n("code"),jn=o("from_dict"),qn=o("."),An=d(),M=n("div"),y(Oe.$$.fragment),On=d(),Fe=n("p"),Fn=o(`Register this class with a given auto class. This should only be used for custom configurations as the ones in the library are already mapped with `),Nt=n("code"),Sn=o("AutoConfig"),In=o("."),Ln=d(),y(ce.$$.fragment),Nn=d(),le=n("div"),y(Se.$$.fragment),Wn=d(),U=n("p"),Bn=o("Save a configuration object to the directory "),Wt=n("code"),Mn=o("save_directory"),Vn=o(`, so that it can be re-loaded using the `),rt=n("a"),Rn=o("from_pretrained()"),Un=o(" class method."),Jn=d(),fe=n("div"),y(Ie.$$.fragment),Gn=d(),Bt=n("p"),Hn=o("Serializes this instance to a Python dictionary."),Xn=d(),me=n("div"),y(Le.$$.fragment),Yn=d(),Mt=n("p"),Kn=o(`Removes all attributes from config which correspond to the default config attributes for better readability and serializes to a Python dictionary.`),Qn=d(),he=n("div"),y(Ne.$$.fragment),Zn=d(),Vt=n("p"),ea=o("Save this instance to a JSON file."),ta=d(),pe=n("div"),y(We.$$.fragment),oa=d(),Rt=n("p"),ra=o("Serializes this instance to a JSON string."),na=d(),ge=n("div"),y(Be.$$.fragment),aa=d(),Me=n("p"),sa=o("Updates attributes of this class with attributes from "),Ut=n("code"),ia=o("config_dict"),da=o("."),ca=d(),A=n("div"),y(Ve.$$.fragment),la=d(),Re=n("p"),fa=o("Updates attributes of this class with attributes from "),Jt=n("code"),ma=o("update_str"),ha=o("."),pa=d(),J=n("p"),ga=o("The expected format is ints, floats and strings as is, and for booleans use "),Gt=n("code"),ua=o("true"),_a=o(" or "),Ht=n("code"),ba=o("false"),va=o(`. For example: \u201Cn_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index\u201D`),ya=d(),Xt=n("p"),wa=o("The keys to change have to already exist in the config object."),this.h()},l(i){const v=Ds('[data-svelte="svelte-1phssyn"]',document.head);f=a(v,"META",{name:!0,content:!0}),v.forEach(t),b=c(i),p=a(i,"H1",{class:!0});var Ue=s(p);g=a(Ue,"A",{id:!0,class:!0,href:!0});var Yt=s(g);x=a(Yt,"SPAN",{});var Kt=s(x);w(l.$$.fragment,Kt),Kt.forEach(t),Yt.forEach(t),_=c(Ue),O=a(Ue,"SPAN",{});var Qt=s(O);Do=r(Qt,"Configuration"),Qt.forEach(t),Ue.forEach(t),ao=c(i),G=a(i,"P",{});var Je=s(G);zo=r(Je,"The base class "),He=a(Je,"A",{href:!0});var Pa=s(He);jo=r(Pa,"PretrainedConfig"),Pa.forEach(t),qo=r(Je,` implements the common methods for loading/saving a configuration either from a local file or directory, or from a pretrained model configuration provided by the library (downloaded from HuggingFace\u2019s AWS S3 repository).`),Je.forEach(t),so=c(i),D=a(i,"P",{});var V=s(D);Ao=r(V,`Each derived config class implements model specific attributes. Common attributes present in all config classes are: `),dt=a(V,"CODE",{});var Ca=s(dt);Oo=r(Ca,"hidden_size"),Ca.forEach(t),Fo=r(V,", "),ct=a(V,"CODE",{});var ka=s(ct);So=r(ka,"num_attention_heads"),ka.forEach(t),Io=r(V,", and "),lt=a(V,"CODE",{});var xa=s(lt);Lo=r(xa,"num_hidden_layers"),xa.forEach(t),No=r(V,`. Text models further implement: `),ft=a(V,"CODE",{});var Ea=s(ft);Wo=r(Ea,"vocab_size"),Ea.forEach(t),Bo=r(V,"."),V.forEach(t),io=c(i),R=a(i,"H2",{class:!0});var fo=s(R);H=a(fo,"A",{id:!0,class:!0,href:!0});var Ta=s(H);mt=a(Ta,"SPAN",{});var Da=s(mt);w($e.$$.fragment,Da),Da.forEach(t),Ta.forEach(t),Mo=c(fo),ht=a(fo,"SPAN",{});var za=s(ht);Vo=r(za,"PretrainedConfig"),za.forEach(t),fo.forEach(t),co=c(i),m=a(i,"DIV",{class:!0});var h=s(m);w(Pe.$$.fragment,h),Ro=c(h),pt=a(h,"P",{});var ja=s(pt);Uo=r(ja,`Base class for all configuration classes. Handles a few parameters common to all models\u2019 configurations as well as methods for loading/downloading/saving configurations.`),ja.forEach(t),Jo=c(h),w(X.$$.fragment,h),Go=c(h),gt=a(h,"P",{});var qa=s(gt);Ho=r(qa,"Class attributes (overridden by derived classes):"),qa.forEach(t),Xo=c(h),F=a(h,"UL",{});var ue=s(F);W=a(ue,"LI",{});var Ge=s(W);ut=a(Ge,"STRONG",{});var Aa=s(ut);Yo=r(Aa,"model_type"),Aa.forEach(t),Ko=r(Ge," ("),_t=a(Ge,"CODE",{});var Oa=s(_t);Qo=r(Oa,"str"),Oa.forEach(t),Zo=r(Ge,`) \u2014 An identifier for the model type, serialized into the JSON file, and used to recreate the correct object in `),Xe=a(Ge,"A",{href:!0});var Fa=s(Xe);er=r(Fa,"AutoConfig"),Fa.forEach(t),tr=r(Ge,"."),Ge.forEach(t),or=c(ue),z=a(ue,"LI",{});var N=s(z);bt=a(N,"STRONG",{});var Sa=s(bt);rr=r(Sa,"is_composition"),Sa.forEach(t),nr=r(N," ("),vt=a(N,"CODE",{});var Ia=s(vt);ar=r(Ia,"bool"),Ia.forEach(t),sr=r(N,`) \u2014 Whether the config class is composed of multiple sub-configs. In this case the config has to be initialized from two or more configs of type `),Ye=a(N,"A",{href:!0});var La=s(Ye);ir=r(La,"PretrainedConfig"),La.forEach(t),dr=r(N,` like: `),Ke=a(N,"A",{href:!0});var Na=s(Ke);cr=r(Na,"EncoderDecoderConfig"),Na.forEach(t),lr=r(N," or "),Qe=a(N,"A",{href:!0});var Wa=s(Qe);fr=r(Wa,"~RagConfig"),Wa.forEach(t),mr=r(N,"."),N.forEach(t),hr=c(ue),Y=a(ue,"LI",{});var Zt=s(Y);yt=a(Zt,"STRONG",{});var Ba=s(yt);pr=r(Ba,"keys_to_ignore_at_inference"),Ba.forEach(t),gr=r(Zt," ("),wt=a(Zt,"CODE",{});var Ma=s(wt);ur=r(Ma,"List[str]"),Ma.forEach(t),_r=r(Zt,`) \u2014 A list of keys to ignore by default when looking at dictionary outputs of the model during inference.`),Zt.forEach(t),br=c(ue),K=a(ue,"LI",{});var eo=s(K);$t=a(eo,"STRONG",{});var Va=s($t);vr=r(Va,"attribute_map"),Va.forEach(t),yr=r(eo," ("),Pt=a(eo,"CODE",{});var Ra=s(Pt);wr=r(Ra,"Dict[str, str]"),Ra.forEach(t),$r=r(eo,`) \u2014 A dict that maps model specific attribute names to the standardized naming of attributes.`),eo.forEach(t),ue.forEach(t),Pr=c(h),Ct=a(h,"P",{});var Ua=s(Ct);Cr=r(Ua,"Common attributes (present in all subclasses):"),Ua.forEach(t),kr=c(h),S=a(h,"UL",{});var _e=s(S);Q=a(_e,"LI",{});var to=s(Q);kt=a(to,"STRONG",{});var Ja=s(kt);xr=r(Ja,"vocab_size"),Ja.forEach(t),Er=r(to," ("),xt=a(to,"CODE",{});var Ga=s(xt);Tr=r(Ga,"int"),Ga.forEach(t),Dr=r(to,`) \u2014 The number of tokens in the vocabulary, which is also the first dimension of the embeddings matrix (this attribute may be missing for models that don\u2019t have a text modality like ViT).`),to.forEach(t),zr=c(_e),Z=a(_e,"LI",{});var oo=s(Z);Et=a(oo,"STRONG",{});var Ha=s(Et);jr=r(Ha,"hidden_size"),Ha.forEach(t),qr=r(oo," ("),Tt=a(oo,"CODE",{});var Xa=s(Tt);Ar=r(Xa,"int"),Xa.forEach(t),Or=r(oo,") \u2014 The hidden size of the model."),oo.forEach(t),Fr=c(_e),ee=a(_e,"LI",{});var ro=s(ee);Dt=a(ro,"STRONG",{});var Ya=s(Dt);Sr=r(Ya,"num_attention_heads"),Ya.forEach(t),Ir=r(ro," ("),zt=a(ro,"CODE",{});var Ka=s(zt);Lr=r(Ka,"int"),Ka.forEach(t),Nr=r(ro,`) \u2014 The number of attention heads used in the multi-head attention layers of the model.`),ro.forEach(t),Wr=c(_e),te=a(_e,"LI",{});var no=s(te);jt=a(no,"STRONG",{});var Qa=s(jt);Br=r(Qa,"num_hidden_layers"),Qa.forEach(t),Mr=r(no," ("),qt=a(no,"CODE",{});var Za=s(qt);Vr=r(Za,"int"),Za.forEach(t),Rr=r(no,") \u2014 The number of blocks in the model."),no.forEach(t),_e.forEach(t),Ur=c(h),B=a(h,"DIV",{class:!0});var nt=s(B);w(Ce.$$.fragment,nt),Jr=c(nt),ke=a(nt,"P",{});var mo=s(ke);Gr=r(mo,`Upload the configuration file to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),At=a(mo,"CODE",{});var es=s(At);Hr=r(es,"repo_path_or_name"),es.forEach(t),Xr=r(mo,"."),mo.forEach(t),Yr=c(nt),w(oe.$$.fragment,nt),nt.forEach(t),Kr=c(h),re=a(h,"DIV",{class:!0});var ho=s(re);w(xe.$$.fragment,ho),Qr=c(ho),I=a(ho,"P",{});var be=s(I);Zr=r(be,"Checks whether the passed dictionary and its nested dicts have a "),Ot=a(be,"EM",{});var ts=s(Ot);en=r(ts,"torch_dtype"),ts.forEach(t),tn=r(be,` key and if it\u2019s not None, converts torch.dtype to a string of just the type. For example, `),Ft=a(be,"CODE",{});var os=s(Ft);on=r(os,"torch.float32"),os.forEach(t),rn=r(be," get converted into "),St=a(be,"EM",{});var rs=s(St);nn=r(rs,"\u201Cfloat32\u201D"),rs.forEach(t),an=r(be,` string, which can then be stored in the json format.`),be.forEach(t),ho.forEach(t),sn=c(h),ne=a(h,"DIV",{class:!0});var po=s(ne);w(Ee.$$.fragment,po),dn=c(po),Te=a(po,"P",{});var go=s(Te);cn=r(go,"Instantiates a "),Ze=a(go,"A",{href:!0});var ns=s(Ze);ln=r(ns,"PretrainedConfig"),ns.forEach(t),fn=r(go," from a Python dictionary of parameters."),go.forEach(t),po.forEach(t),mn=c(h),ae=a(h,"DIV",{class:!0});var uo=s(ae);w(De.$$.fragment,uo),hn=c(uo),ze=a(uo,"P",{});var _o=s(ze);pn=r(_o,"Instantiates a "),et=a(_o,"A",{href:!0});var as=s(et);gn=r(as,"PretrainedConfig"),as.forEach(t),un=r(_o," from the path to a JSON file of parameters."),_o.forEach(t),uo.forEach(t),_n=c(h),q=a(h,"DIV",{class:!0});var ve=s(q);w(je.$$.fragment,ve),bn=c(ve),qe=a(ve,"P",{});var bo=s(qe);vn=r(bo,"Instantiate a "),tt=a(bo,"A",{href:!0});var ss=s(tt);yn=r(ss,"PretrainedConfig"),ss.forEach(t),wn=r(bo," (or a derived class) from a pretrained model configuration."),bo.forEach(t),$n=c(ve),w(se.$$.fragment,ve),Pn=c(ve),w(ie.$$.fragment,ve),ve.forEach(t),Cn=c(h),de=a(h,"DIV",{class:!0});var vo=s(de);w(Ae.$$.fragment,vo),kn=c(vo),L=a(vo,"P",{});var ye=s(L);xn=r(ye,"From a "),It=a(ye,"CODE",{});var is=s(It);En=r(is,"pretrained_model_name_or_path"),is.forEach(t),Tn=r(ye,`, resolve to a dictionary of parameters, to be used for instantiating a `),ot=a(ye,"A",{href:!0});var ds=s(ot);Dn=r(ds,"PretrainedConfig"),ds.forEach(t),zn=r(ye," using "),Lt=a(ye,"CODE",{});var cs=s(Lt);jn=r(cs,"from_dict"),cs.forEach(t),qn=r(ye,"."),ye.forEach(t),vo.forEach(t),An=c(h),M=a(h,"DIV",{class:!0});var at=s(M);w(Oe.$$.fragment,at),On=c(at),Fe=a(at,"P",{});var yo=s(Fe);Fn=r(yo,`Register this class with a given auto class. This should only be used for custom configurations as the ones in the library are already mapped with `),Nt=a(yo,"CODE",{});var ls=s(Nt);Sn=r(ls,"AutoConfig"),ls.forEach(t),In=r(yo,"."),yo.forEach(t),Ln=c(at),w(ce.$$.fragment,at),at.forEach(t),Nn=c(h),le=a(h,"DIV",{class:!0});var wo=s(le);w(Se.$$.fragment,wo),Wn=c(wo),U=a(wo,"P",{});var st=s(U);Bn=r(st,"Save a configuration object to the directory "),Wt=a(st,"CODE",{});var fs=s(Wt);Mn=r(fs,"save_directory"),fs.forEach(t),Vn=r(st,`, so that it can be re-loaded using the `),rt=a(st,"A",{href:!0});var ms=s(rt);Rn=r(ms,"from_pretrained()"),ms.forEach(t),Un=r(st," class method."),st.forEach(t),wo.forEach(t),Jn=c(h),fe=a(h,"DIV",{class:!0});var $o=s(fe);w(Ie.$$.fragment,$o),Gn=c($o),Bt=a($o,"P",{});var hs=s(Bt);Hn=r(hs,"Serializes this instance to a Python dictionary."),hs.forEach(t),$o.forEach(t),Xn=c(h),me=a(h,"DIV",{class:!0});var Po=s(me);w(Le.$$.fragment,Po),Yn=c(Po),Mt=a(Po,"P",{});var ps=s(Mt);Kn=r(ps,`Removes all attributes from config which correspond to the default config attributes for better readability and serializes to a Python dictionary.`),ps.forEach(t),Po.forEach(t),Qn=c(h),he=a(h,"DIV",{class:!0});var Co=s(he);w(Ne.$$.fragment,Co),Zn=c(Co),Vt=a(Co,"P",{});var gs=s(Vt);ea=r(gs,"Save this instance to a JSON file."),gs.forEach(t),Co.forEach(t),ta=c(h),pe=a(h,"DIV",{class:!0});var ko=s(pe);w(We.$$.fragment,ko),oa=c(ko),Rt=a(ko,"P",{});var us=s(Rt);ra=r(us,"Serializes this instance to a JSON string."),us.forEach(t),ko.forEach(t),na=c(h),ge=a(h,"DIV",{class:!0});var xo=s(ge);w(Be.$$.fragment,xo),aa=c(xo),Me=a(xo,"P",{});var Eo=s(Me);sa=r(Eo,"Updates attributes of this class with attributes from "),Ut=a(Eo,"CODE",{});var _s=s(Ut);ia=r(_s,"config_dict"),_s.forEach(t),da=r(Eo,"."),Eo.forEach(t),xo.forEach(t),ca=c(h),A=a(h,"DIV",{class:!0});var we=s(A);w(Ve.$$.fragment,we),la=c(we),Re=a(we,"P",{});var To=s(Re);fa=r(To,"Updates attributes of this class with attributes from "),Jt=a(To,"CODE",{});var bs=s(Jt);ma=r(bs,"update_str"),bs.forEach(t),ha=r(To,"."),To.forEach(t),pa=c(we),J=a(we,"P",{});var it=s(J);ga=r(it,"The expected format is ints, floats and strings as is, and for booleans use "),Gt=a(it,"CODE",{});var vs=s(Gt);ua=r(vs,"true"),vs.forEach(t),_a=r(it," or "),Ht=a(it,"CODE",{});var ys=s(Ht);ba=r(ys,"false"),ys.forEach(t),va=r(it,`. For example: \u201Cn_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index\u201D`),it.forEach(t),ya=c(we),Xt=a(we,"P",{});var ws=s(Xt);wa=r(ws,"The keys to change have to already exist in the config object."),ws.forEach(t),we.forEach(t),h.forEach(t),this.h()},h(){u(f,"name","hf:doc:metadata"),u(f,"content",JSON.stringify(Is)),u(g,"id","configuration"),u(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(g,"href","#configuration"),u(p,"class","relative group"),u(He,"href","/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig"),u(H,"id","transformers.PretrainedConfig"),u(H,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(H,"href","#transformers.PretrainedConfig"),u(R,"class","relative group"),u(Xe,"href","/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoConfig"),u(Ye,"href","/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig"),u(Ke,"href","/docs/transformers/pr_19429/en/model_doc/encoder-decoder#transformers.EncoderDecoderConfig"),u(Qe,"href","/docs/transformers/pr_19429/en/model_doc/rag#transformers.RagConfig"),u(B,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),u(re,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),u(Ze,"href","/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig"),u(ne,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),u(et,"href","/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig"),u(ae,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),u(tt,"href","/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig"),u(q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),u(ot,"href","/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig"),u(de,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),u(M,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),u(rt,"href","/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig.from_pretrained"),u(le,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),u(fe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),u(me,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),u(he,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),u(pe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),u(ge,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),u(A,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),u(m,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(i,v){e(document.head,f),E(i,b,v),E(i,p,v),e(p,g),e(g,x),$(l,x,null),e(p,_),e(p,O),e(O,Do),E(i,ao,v),E(i,G,v),e(G,zo),e(G,He),e(He,jo),e(G,qo),E(i,so,v),E(i,D,v),e(D,Ao),e(D,dt),e(dt,Oo),e(D,Fo),e(D,ct),e(ct,So),e(D,Io),e(D,lt),e(lt,Lo),e(D,No),e(D,ft),e(ft,Wo),e(D,Bo),E(i,io,v),E(i,R,v),e(R,H),e(H,mt),$($e,mt,null),e(R,Mo),e(R,ht),e(ht,Vo),E(i,co,v),E(i,m,v),$(Pe,m,null),e(m,Ro),e(m,pt),e(pt,Uo),e(m,Jo),$(X,m,null),e(m,Go),e(m,gt),e(gt,Ho),e(m,Xo),e(m,F),e(F,W),e(W,ut),e(ut,Yo),e(W,Ko),e(W,_t),e(_t,Qo),e(W,Zo),e(W,Xe),e(Xe,er),e(W,tr),e(F,or),e(F,z),e(z,bt),e(bt,rr),e(z,nr),e(z,vt),e(vt,ar),e(z,sr),e(z,Ye),e(Ye,ir),e(z,dr),e(z,Ke),e(Ke,cr),e(z,lr),e(z,Qe),e(Qe,fr),e(z,mr),e(F,hr),e(F,Y),e(Y,yt),e(yt,pr),e(Y,gr),e(Y,wt),e(wt,ur),e(Y,_r),e(F,br),e(F,K),e(K,$t),e($t,vr),e(K,yr),e(K,Pt),e(Pt,wr),e(K,$r),e(m,Pr),e(m,Ct),e(Ct,Cr),e(m,kr),e(m,S),e(S,Q),e(Q,kt),e(kt,xr),e(Q,Er),e(Q,xt),e(xt,Tr),e(Q,Dr),e(S,zr),e(S,Z),e(Z,Et),e(Et,jr),e(Z,qr),e(Z,Tt),e(Tt,Ar),e(Z,Or),e(S,Fr),e(S,ee),e(ee,Dt),e(Dt,Sr),e(ee,Ir),e(ee,zt),e(zt,Lr),e(ee,Nr),e(S,Wr),e(S,te),e(te,jt),e(jt,Br),e(te,Mr),e(te,qt),e(qt,Vr),e(te,Rr),e(m,Ur),e(m,B),$(Ce,B,null),e(B,Jr),e(B,ke),e(ke,Gr),e(ke,At),e(At,Hr),e(ke,Xr),e(B,Yr),$(oe,B,null),e(m,Kr),e(m,re),$(xe,re,null),e(re,Qr),e(re,I),e(I,Zr),e(I,Ot),e(Ot,en),e(I,tn),e(I,Ft),e(Ft,on),e(I,rn),e(I,St),e(St,nn),e(I,an),e(m,sn),e(m,ne),$(Ee,ne,null),e(ne,dn),e(ne,Te),e(Te,cn),e(Te,Ze),e(Ze,ln),e(Te,fn),e(m,mn),e(m,ae),$(De,ae,null),e(ae,hn),e(ae,ze),e(ze,pn),e(ze,et),e(et,gn),e(ze,un),e(m,_n),e(m,q),$(je,q,null),e(q,bn),e(q,qe),e(qe,vn),e(qe,tt),e(tt,yn),e(qe,wn),e(q,$n),$(se,q,null),e(q,Pn),$(ie,q,null),e(m,Cn),e(m,de),$(Ae,de,null),e(de,kn),e(de,L),e(L,xn),e(L,It),e(It,En),e(L,Tn),e(L,ot),e(ot,Dn),e(L,zn),e(L,Lt),e(Lt,jn),e(L,qn),e(m,An),e(m,M),$(Oe,M,null),e(M,On),e(M,Fe),e(Fe,Fn),e(Fe,Nt),e(Nt,Sn),e(Fe,In),e(M,Ln),$(ce,M,null),e(m,Nn),e(m,le),$(Se,le,null),e(le,Wn),e(le,U),e(U,Bn),e(U,Wt),e(Wt,Mn),e(U,Vn),e(U,rt),e(rt,Rn),e(U,Un),e(m,Jn),e(m,fe),$(Ie,fe,null),e(fe,Gn),e(fe,Bt),e(Bt,Hn),e(m,Xn),e(m,me),$(Le,me,null),e(me,Yn),e(me,Mt),e(Mt,Kn),e(m,Qn),e(m,he),$(Ne,he,null),e(he,Zn),e(he,Vt),e(Vt,ea),e(m,ta),e(m,pe),$(We,pe,null),e(pe,oa),e(pe,Rt),e(Rt,ra),e(m,na),e(m,ge),$(Be,ge,null),e(ge,aa),e(ge,Me),e(Me,sa),e(Me,Ut),e(Ut,ia),e(Me,da),e(m,ca),e(m,A),$(Ve,A,null),e(A,la),e(A,Re),e(Re,fa),e(Re,Jt),e(Jt,ma),e(Re,ha),e(A,pa),e(A,J),e(J,ga),e(J,Gt),e(Gt,ua),e(J,_a),e(J,Ht),e(Ht,ba),e(J,va),e(A,ya),e(A,Xt),e(Xt,wa),lo=!0},p(i,[v]){const Ue={};v&2&&(Ue.$$scope={dirty:v,ctx:i}),X.$set(Ue);const Yt={};v&2&&(Yt.$$scope={dirty:v,ctx:i}),oe.$set(Yt);const Kt={};v&2&&(Kt.$$scope={dirty:v,ctx:i}),se.$set(Kt);const Qt={};v&2&&(Qt.$$scope={dirty:v,ctx:i}),ie.$set(Qt);const Je={};v&2&&(Je.$$scope={dirty:v,ctx:i}),ce.$set(Je)},i(i){lo||(P(l.$$.fragment,i),P($e.$$.fragment,i),P(Pe.$$.fragment,i),P(X.$$.fragment,i),P(Ce.$$.fragment,i),P(oe.$$.fragment,i),P(xe.$$.fragment,i),P(Ee.$$.fragment,i),P(De.$$.fragment,i),P(je.$$.fragment,i),P(se.$$.fragment,i),P(ie.$$.fragment,i),P(Ae.$$.fragment,i),P(Oe.$$.fragment,i),P(ce.$$.fragment,i),P(Se.$$.fragment,i),P(Ie.$$.fragment,i),P(Le.$$.fragment,i),P(Ne.$$.fragment,i),P(We.$$.fragment,i),P(Be.$$.fragment,i),P(Ve.$$.fragment,i),lo=!0)},o(i){C(l.$$.fragment,i),C($e.$$.fragment,i),C(Pe.$$.fragment,i),C(X.$$.fragment,i),C(Ce.$$.fragment,i),C(oe.$$.fragment,i),C(xe.$$.fragment,i),C(Ee.$$.fragment,i),C(De.$$.fragment,i),C(je.$$.fragment,i),C(se.$$.fragment,i),C(ie.$$.fragment,i),C(Ae.$$.fragment,i),C(Oe.$$.fragment,i),C(ce.$$.fragment,i),C(Se.$$.fragment,i),C(Ie.$$.fragment,i),C(Le.$$.fragment,i),C(Ne.$$.fragment,i),C(We.$$.fragment,i),C(Be.$$.fragment,i),C(Ve.$$.fragment,i),lo=!1},d(i){t(f),i&&t(b),i&&t(p),k(l),i&&t(ao),i&&t(G),i&&t(so),i&&t(D),i&&t(io),i&&t(R),k($e),i&&t(co),i&&t(m),k(Pe),k(X),k(Ce),k(oe),k(xe),k(Ee),k(De),k(je),k(se),k(ie),k(Ae),k(Oe),k(ce),k(Se),k(Ie),k(Le),k(Ne),k(We),k(Be),k(Ve)}}}const Is={local:"configuration",sections:[{local:"transformers.PretrainedConfig",title:"PretrainedConfig"}],title:"Configuration"};function Ls(j){return zs(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Us extends xs{constructor(f){super();Es(this,f,Ls,Ss,Ts,{})}}export{Us as default,Is as metadata};
14
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/main_classes/tokenizer.mdx-hf-doc-builder.js
import{S as cv,i as lv,s as hv,e as r,k as d,w as h,t as o,M as pv,c as s,d as t,m as c,a,x as p,h as n,b as l,G as e,g as $,y as m,q as f,o as u,B as _,v as mv,L as iv}from"../../chunks/vendor-hf-doc-builder.js";import{T as sv}from"../../chunks/Tip-hf-doc-builder.js";import{D as T}from"../../chunks/Docstring-hf-doc-builder.js";import{C as dv}from"../../chunks/CodeBlock-hf-doc-builder.js";import{I as Gi}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{E as av}from"../../chunks/ExampleCodeBlock-hf-doc-builder.js";function fv(pe){let y,L,q,E,B;return E=new dv({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") # Push the tokenizer to your namespace with the name "my-finetuned-bert". tokenizer.push_to_hub("my-finetuned-bert") # Push the tokenizer to an organization with the name "my-finetuned-bert". tokenizer.push_to_hub("huggingface/my-finetuned-bert")`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the tokenizer to your namespace with the name &quot;my-finetuned-bert&quot;.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the tokenizer to an organization with the name &quot;my-finetuned-bert&quot;.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;huggingface/my-finetuned-bert&quot;</span>)`}}),{c(){y=r("p"),L=o("Examples:"),q=d(),h(E.$$.fragment)},l(v){y=s(v,"P",{});var I=a(y);L=n(I,"Examples:"),I.forEach(t),q=c(v),p(E.$$.fragment,v)},m(v,I){$(v,y,I),e(y,L),$(v,q,I),m(E,v,I),B=!0},p:iv,i(v){B||(f(E.$$.fragment,v),B=!0)},o(v){u(E.$$.fragment,v),B=!1},d(v){v&&t(y),v&&t(q),_(E,v)}}}function uv(pe){let y,L;return{c(){y=r("p"),L=o(`This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put this inside your training loop.`)},l(q){y=s(q,"P",{});var E=a(y);L=n(E,`This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put this inside your training loop.`),E.forEach(t)},m(q,E){$(q,y,E),e(y,L)},d(q){q&&t(y)}}}function _v(pe){let y,L,q,E,B;return E=new dv({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") # Push the tokenizer to your namespace with the name "my-finetuned-bert". tokenizer.push_to_hub("my-finetuned-bert") # Push the tokenizer to an organization with the name "my-finetuned-bert". tokenizer.push_to_hub("huggingface/my-finetuned-bert")`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the tokenizer to your namespace with the name &quot;my-finetuned-bert&quot;.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the tokenizer to an organization with the name &quot;my-finetuned-bert&quot;.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;huggingface/my-finetuned-bert&quot;</span>)`}}),{c(){y=r("p"),L=o("Examples:"),q=d(),h(E.$$.fragment)},l(v){y=s(v,"P",{});var I=a(y);L=n(I,"Examples:"),I.forEach(t),q=c(v),p(E.$$.fragment,v)},m(v,I){$(v,y,I),e(y,L),$(v,q,I),m(E,v,I),B=!0},p:iv,i(v){B||(f(E.$$.fragment,v),B=!0)},o(v){u(E.$$.fragment,v),B=!1},d(v){v&&t(y),v&&t(q),_(E,v)}}}function gv(pe){let y,L;return{c(){y=r("p"),L=o(`This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put this inside your training loop.`)},l(q){y=s(q,"P",{});var E=a(y);L=n(E,`This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put this inside your training loop.`),E.forEach(t)},m(q,E){$(q,y,E),e(y,L)},d(q){q&&t(y)}}}function kv(pe){let y,L,q,E,B,v,I,Bn,ji,Ua,He,Hi,Tt,Mi,Xi,Va,Me,Wn,Yi,Ji,Rn,Ki,Ga,W,Qi,Oo,Zi,ed,So,td,od,Bo,nd,rd,Wo,sd,ad,ja,Ie,Ro,id,dd,Uo,cd,ld,Ha,me,Un,hd,pd,Vn,md,fd,Gn,ud,Ma,D,Vo,_d,gd,Go,kd,bd,jn,vd,Td,Hn,yd,wd,Mn,xd,zd,Xn,Ed,$d,Yn,Pd,qd,yt,Dd,Ld,Xa,Ne,Xe,Jn,wt,Fd,Kn,Id,Ya,g,xt,Nd,Qn,Ad,Cd,zt,Od,jo,Sd,Bd,Wd,Zn,Rd,Ud,er,Vd,Gd,tr,jd,Hd,N,fe,or,Md,Xd,nr,Yd,Jd,rr,Kd,Qd,Zd,R,sr,ec,tc,ar,oc,nc,ir,rc,sc,dr,ac,ic,cr,dc,cc,lc,Q,lr,hc,pc,hr,mc,fc,pr,uc,_c,mr,gc,kc,bc,U,fr,vc,Tc,ur,yc,wc,_r,xc,zc,gr,Ec,$c,Ho,Pc,qc,Dc,Ye,kr,Lc,Fc,br,Ic,Nc,Ac,Z,vr,Cc,Oc,Tr,Sc,Bc,yr,Wc,Rc,wr,Uc,Vc,Gc,ee,xr,jc,Hc,zr,Mc,Xc,Er,Yc,Jc,$r,Kc,Qc,Zc,Je,Et,el,Pr,tl,ol,Ke,$t,nl,qr,rl,sl,ue,Pt,al,Dr,il,dl,qt,cl,Lr,ll,hl,pl,_e,Dt,ml,Fr,fl,ul,Lt,_l,Ir,gl,kl,bl,ge,Ft,vl,It,Tl,Nr,yl,wl,xl,Qe,zl,Ze,Nt,El,Ar,$l,Pl,et,At,ql,Cr,Dl,Ll,tt,Ct,Fl,Or,Il,Nl,ke,Ot,Al,Sr,Cl,Ol,ot,Sl,be,St,Bl,Br,Wl,Rl,Ae,Ul,Wr,Vl,Gl,Rr,jl,Hl,Ml,ve,Bt,Xl,Ur,Yl,Jl,Vr,Kl,Ja,Ce,nt,Gr,Wt,Ql,jr,Zl,Ka,te,eh,Mo,th,oh,Rt,nh,rh,Xo,sh,ah,Qa,k,Ut,ih,Hr,dh,ch,Vt,lh,Yo,hh,ph,mh,Mr,fh,uh,Xr,_h,gh,Yr,kh,bh,A,Te,Jr,vh,Th,Kr,yh,wh,Qr,xh,zh,Eh,V,Zr,$h,Ph,es,qh,Dh,ts,Lh,Fh,os,Ih,Nh,ns,Ah,Ch,Oh,oe,rs,Sh,Bh,ss,Wh,Rh,as,Uh,Vh,is,Gh,jh,Hh,G,ds,Mh,Xh,cs,Yh,Jh,ls,Kh,Qh,hs,Zh,ep,Jo,tp,op,np,rt,ps,rp,sp,ms,ap,ip,dp,ne,fs,cp,lp,us,hp,pp,_s,mp,fp,gs,up,_p,gp,re,ks,kp,bp,bs,vp,Tp,vs,yp,wp,Ts,xp,zp,Ep,st,Gt,$p,ys,Pp,qp,at,jt,Dp,ws,Lp,Fp,ye,Ht,Ip,xs,Np,Ap,Mt,Cp,zs,Op,Sp,Bp,we,Xt,Wp,Es,Rp,Up,Yt,Vp,$s,Gp,jp,Hp,xe,Jt,Mp,Kt,Xp,Ps,Yp,Jp,Kp,it,Qp,dt,Qt,Zp,qs,em,tm,ct,Zt,om,Ds,nm,rm,lt,eo,sm,Ls,am,im,ze,to,dm,Fs,cm,lm,ht,hm,Ee,oo,pm,Is,mm,fm,Ns,um,_m,pt,no,gm,As,km,Za,Oe,mt,Cs,ro,bm,Os,vm,ei,w,so,Tm,ie,ym,ao,Ss,wm,xm,zm,Ko,Em,$m,Qo,Pm,qm,Dm,Bs,Lm,Fm,j,io,Im,Ws,Nm,Am,Rs,Cm,Om,co,Zo,Us,Sm,Bm,Wm,en,Vs,Rm,Um,Vm,Gs,Gm,jm,H,lo,Hm,js,Mm,Xm,Hs,Ym,Jm,ho,tn,Ms,Km,Qm,Zm,on,Xs,ef,tf,of,Ys,nf,rf,ft,po,sf,Js,af,df,$e,mo,cf,Ks,lf,hf,Se,nn,Qs,pf,mf,ff,rn,Zs,uf,_f,gf,sn,ea,kf,bf,vf,ut,fo,Tf,uo,yf,ta,wf,xf,zf,O,_o,Ef,oa,$f,Pf,go,qf,an,Df,Lf,Ff,ko,dn,na,If,Nf,Af,cn,ra,Cf,Of,Sf,sa,Bf,Wf,bo,ln,aa,Rf,Uf,Vf,hn,ia,Gf,jf,Hf,M,vo,Mf,Be,Xf,da,Yf,Jf,ca,Kf,Qf,Zf,la,eu,tu,To,pn,ha,ou,nu,ru,mn,pa,su,au,iu,ma,du,cu,X,yo,lu,fa,hu,pu,ua,mu,fu,wo,fn,_a,uu,_u,gu,un,ga,ku,bu,vu,ka,Tu,yu,_t,xo,wu,ba,xu,zu,gt,zo,Eu,va,$u,Pu,S,Eo,qu,Ta,Du,Lu,ya,Fu,Iu,$o,wa,Nu,Au,xa,Cu,Ou,za,Su,Bu,Po,_n,Ea,Wu,Ru,Uu,gn,$a,Vu,Gu,ju,F,qo,Hu,Pa,Mu,Xu,Do,Yu,kn,Ju,Ku,Qu,Lo,bn,qa,Zu,e_,t_,vn,Da,o_,n_,r_,La,s_,a_,Fo,Tn,Fa,i_,d_,c_,yn,Ia,l_,h_,p_,Na,m_,f_,kt,Io,u_,Aa,__,ti;return v=new Gi({}),wt=new Gi({}),xt=new T({props:{name:"class transformers.PreTrainedTokenizer",anchor:"transformers.PreTrainedTokenizer",parameters:[{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizer.model_max_length",description:`<strong>model_max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum length (in number of tokens) for the inputs to the transformer model. When the tokenizer is loaded with <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.from_pretrained">from_pretrained()</a>, this will be set to the value stored for the associated model in <code>max_model_input_sizes</code> (see above). If no value is provided, will default to VERY_LARGE_INTEGER (<code>int(1e30)</code>).`,name:"model_max_length"},{anchor:"transformers.PreTrainedTokenizer.padding_side",description:`<strong>padding_side</strong> (<code>str</code>, <em>optional</em>) &#x2014; The side on which the model should have padding applied. Should be selected between [&#x2018;right&#x2019;, &#x2018;left&#x2019;]. Default value is picked from the class attribute of the same name.`,name:"padding_side"},{anchor:"transformers.PreTrainedTokenizer.truncation_side",description:`<strong>truncation_side</strong> (<code>str</code>, <em>optional</em>) &#x2014; The side on which the model should have truncation applied. Should be selected between [&#x2018;right&#x2019;, &#x2018;left&#x2019;]. Default value is picked from the class attribute of the same name.`,name:"truncation_side"},{anchor:"transformers.PreTrainedTokenizer.model_input_names",description:`<strong>model_input_names</strong> (<code>List[string]</code>, <em>optional</em>) &#x2014; The list of inputs accepted by the forward pass of the model (like <code>&quot;token_type_ids&quot;</code> or <code>&quot;attention_mask&quot;</code>). Default value is picked from the class attribute of the same name.`,name:"model_input_names"},{anchor:"transformers.PreTrainedTokenizer.bos_token",description:`<strong>bos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the beginning of a sentence. Will be associated to <code>self.bos_token</code> and <code>self.bos_token_id</code>.`,name:"bos_token"},{anchor:"transformers.PreTrainedTokenizer.eos_token",description:`<strong>eos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the end of a sentence. Will be associated to <code>self.eos_token</code> and <code>self.eos_token_id</code>.`,name:"eos_token"},{anchor:"transformers.PreTrainedTokenizer.unk_token",description:`<strong>unk_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing an out-of-vocabulary token. Will be associated to <code>self.unk_token</code> and <code>self.unk_token_id</code>.`,name:"unk_token"},{anchor:"transformers.PreTrainedTokenizer.sep_token",description:`<strong>sep_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token separating two different sentences in the same input (used by BERT for instance). Will be associated to <code>self.sep_token</code> and <code>self.sep_token_id</code>.`,name:"sep_token"},{anchor:"transformers.PreTrainedTokenizer.pad_token",description:`<strong>pad_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. Will be associated to <code>self.pad_token</code> and <code>self.pad_token_id</code>.`,name:"pad_token"},{anchor:"transformers.PreTrainedTokenizer.cls_token",description:`<strong>cls_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the class of the input (used by BERT for instance). Will be associated to <code>self.cls_token</code> and <code>self.cls_token_id</code>.`,name:"cls_token"},{anchor:"transformers.PreTrainedTokenizer.mask_token",description:`<strong>mask_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing a masked token (used by masked-language modeling pretraining objectives, like BERT). Will be associated to <code>self.mask_token</code> and <code>self.mask_token_id</code>.`,name:"mask_token"},{anchor:"transformers.PreTrainedTokenizer.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (tuple or list of <code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A tuple or a list of additional special tokens. Add them here to ensure they won&#x2019;t be split by the tokenization process. Will be associated to <code>self.additional_special_tokens</code> and <code>self.additional_special_tokens_ids</code>.`,name:"additional_special_tokens"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils.py#L333"}}),Et=new T({props:{name:"__call__",anchor:"transformers.PreTrainedTokenizer.__call__",parameters:[{name:"text",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]]] = None"},{name:"text_pair",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]], NoneType] = None"},{name:"text_target",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]]] = None"},{name:"text_pair_target",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]], NoneType] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.utils.generic.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"is_split_into_words",val:": bool = False"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.utils.generic.TensorType, NoneType] = None"},{name:"return_token_type_ids",val:": typing.Optional[bool] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_overflowing_tokens",val:": bool = False"},{name:"return_special_tokens_mask",val:": bool = False"},{name:"return_offsets_mapping",val:": bool = False"},{name:"return_length",val:": bool = False"},{name:"verbose",val:": bool = True"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizer.__call__.text",description:`<strong>text</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).`,name:"text"},{anchor:"transformers.PreTrainedTokenizer.__call__.text_pair",description:`<strong>text_pair</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).`,name:"text_pair"},{anchor:"transformers.PreTrainedTokenizer.__call__.text_target",description:`<strong>text_target</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).`,name:"text_target"},{anchor:"transformers.PreTrainedTokenizer.__call__.text_pair_target",description:`<strong>text_pair_target</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).`,name:"text_pair_target"},{anchor:"transformers.PreTrainedTokenizer.__call__.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.PreTrainedTokenizer.__call__.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.PreTrainedTokenizer.__call__.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.PreTrainedTokenizer.__call__.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.PreTrainedTokenizer.__call__.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.PreTrainedTokenizer.__call__.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.PreTrainedTokenizer.__call__.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.PreTrainedTokenizer.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.PreTrainedTokenizer.__call__.return_token_type_ids",description:`<strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"return_token_type_ids"},{anchor:"transformers.PreTrainedTokenizer.__call__.return_attention_mask",description:`<strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"return_attention_mask"},{anchor:"transformers.PreTrainedTokenizer.__call__.return_overflowing_tokens",description:`<strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.`,name:"return_overflowing_tokens"},{anchor:"transformers.PreTrainedTokenizer.__call__.return_special_tokens_mask",description:`<strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.`,name:"return_special_tokens_mask"},{anchor:"transformers.PreTrainedTokenizer.__call__.return_offsets_mapping",description:`<strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.`,name:"return_offsets_mapping"},{anchor:"transformers.PreTrainedTokenizer.__call__.return_length",description:`<strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.`,name:"return_length"},{anchor:"transformers.PreTrainedTokenizer.__call__.verbose",description:`<strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method`,name:"verbose"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L2410",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> \u2014 List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> \u2014 List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>\u201Ctoken_type_ids\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> \u2014 List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>\u201Cattention_mask\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>overflowing_tokens</strong> \u2014 List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> \u2014 Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> \u2014 List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> \u2014 The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> `}}),$t=new T({props:{name:"batch_decode",anchor:"transformers.PreTrainedTokenizer.batch_decode",parameters:[{name:"sequences",val:": typing.Union[typing.List[int], typing.List[typing.List[int]], ForwardRef('np.ndarray'), ForwardRef('torch.Tensor'), ForwardRef('tf.Tensor')]"},{name:"skip_special_tokens",val:": bool = False"},{name:"clean_up_tokenization_spaces",val:": bool = True"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizer.batch_decode.sequences",description:`<strong>sequences</strong> (<code>Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.`,name:"sequences"},{anchor:"transformers.PreTrainedTokenizer.batch_decode.skip_special_tokens",description:`<strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.`,name:"skip_special_tokens"},{anchor:"transformers.PreTrainedTokenizer.batch_decode.clean_up_tokenization_spaces",description:`<strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.`,name:"clean_up_tokenization_spaces"},{anchor:"transformers.PreTrainedTokenizer.batch_decode.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.`,name:"kwargs"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3370",returnDescription:` <p>The list of decoded sentences.</p> `,returnType:` <p><code>List[str]</code></p> `}}),Pt=new T({props:{name:"decode",anchor:"transformers.PreTrainedTokenizer.decode",parameters:[{name:"token_ids",val:": typing.Union[int, typing.List[int], ForwardRef('np.ndarray'), ForwardRef('torch.Tensor'), ForwardRef('tf.Tensor')]"},{name:"skip_special_tokens",val:": bool = False"},{name:"clean_up_tokenization_spaces",val:": bool = True"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizer.decode.token_ids",description:`<strong>token_ids</strong> (<code>Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.`,name:"token_ids"},{anchor:"transformers.PreTrainedTokenizer.decode.skip_special_tokens",description:`<strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.`,name:"skip_special_tokens"},{anchor:"transformers.PreTrainedTokenizer.decode.clean_up_tokenization_spaces",description:`<strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.`,name:"clean_up_tokenization_spaces"},{anchor:"transformers.PreTrainedTokenizer.decode.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.`,name:"kwargs"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3403",returnDescription:` <p>The decoded sentence.</p> `,returnType:` <p><code>str</code></p> `}}),Dt=new T({props:{name:"encode",anchor:"transformers.PreTrainedTokenizer.encode",parameters:[{name:"text",val:": typing.Union[str, typing.List[str], typing.List[int]]"},{name:"text_pair",val:": typing.Union[str, typing.List[str], typing.List[int], NoneType] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.utils.generic.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"return_tensors",val:": typing.Union[str, transformers.utils.generic.TensorType, NoneType] = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizer.encode.text",description:`<strong>text</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code>) &#x2014; The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).`,name:"text"},{anchor:"transformers.PreTrainedTokenizer.encode.text_pair",description:`<strong>text_pair</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code>, <em>optional</em>) &#x2014; Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).`,name:"text_pair"},{anchor:"transformers.PreTrainedTokenizer.encode.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.PreTrainedTokenizer.encode.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.PreTrainedTokenizer.encode.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.PreTrainedTokenizer.encode.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.PreTrainedTokenizer.encode.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.PreTrainedTokenizer.encode.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.PreTrainedTokenizer.encode.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.PreTrainedTokenizer.encode.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul> <p>**kwargs &#x2014; Passed along to the <code>.tokenize()</code> method.`,name:"return_tensors"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L2220",returnDescription:` <p>The tokenized ids of the text.</p> `,returnType:` <p><code>List[int]</code>, <code>torch.Tensor</code>, <code>tf.Tensor</code> or <code>np.ndarray</code></p> `}}),Ft=new T({props:{name:"push_to_hub",anchor:"transformers.PreTrainedTokenizer.push_to_hub",parameters:[{name:"repo_id",val:": str"},{name:"use_temp_dir",val:": typing.Optional[bool] = None"},{name:"commit_message",val:": typing.Optional[str] = None"},{name:"private",val:": typing.Optional[bool] = None"},{name:"use_auth_token",val:": typing.Union[bool, str, NoneType] = None"},{name:"max_shard_size",val:": typing.Union[int, str, NoneType] = '10GB'"},{name:"create_pr",val:": bool = False"},{name:"**deprecated_kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizer.push_to_hub.repo_id",description:`<strong>repo_id</strong> (<code>str</code>) &#x2014; The name of the repository you want to push your tokenizer to. It should contain your organization name when pushing to a given organization.`,name:"repo_id"},{anchor:"transformers.PreTrainedTokenizer.push_to_hub.use_temp_dir",description:`<strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to use a temporary directory to store the files saved before they are pushed to the Hub. Will default to <code>True</code> if there is no directory named like <code>repo_id</code>, <code>False</code> otherwise.`,name:"use_temp_dir"},{anchor:"transformers.PreTrainedTokenizer.push_to_hub.commit_message",description:`<strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;Upload tokenizer&quot;</code>.`,name:"commit_message"},{anchor:"transformers.PreTrainedTokenizer.push_to_hub.private",description:`<strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).`,name:"private"},{anchor:"transformers.PreTrainedTokenizer.push_to_hub.use_auth_token",description:`<strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>huggingface-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.`,name:"use_auth_token"},{anchor:"transformers.PreTrainedTokenizer.push_to_hub.max_shard_size",description:`<strong>max_shard_size</strong> (<code>int</code> or <code>str</code>, <em>optional</em>, defaults to <code>&quot;10GB&quot;</code>) &#x2014; Only applicable for models. The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like <code>&quot;5MB&quot;</code>).`,name:"max_shard_size"},{anchor:"transformers.PreTrainedTokenizer.push_to_hub.create_pr",description:`<strong>create_pr</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to create a PR with the uploaded files or directly commit.`,name:"create_pr"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/hub.py#L712"}}),Qe=new av({props:{anchor:"transformers.PreTrainedTokenizer.push_to_hub.example",$$slots:{default:[fv]},$$scope:{ctx:pe}}}),Nt=new T({props:{name:"convert_ids_to_tokens",anchor:"transformers.PreTrainedTokenizer.convert_ids_to_tokens",parameters:[{name:"ids",val:": typing.Union[int, typing.List[int]]"},{name:"skip_special_tokens",val:": bool = False"}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizer.convert_ids_to_tokens.ids",description:`<strong>ids</strong> (<code>int</code> or <code>List[int]</code>) &#x2014; The token id (or token ids) to convert to tokens.`,name:"ids"},{anchor:"transformers.PreTrainedTokenizer.convert_ids_to_tokens.skip_special_tokens",description:`<strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.`,name:"skip_special_tokens"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils.py#L883",returnDescription:` <p>The decoded token(s).</p> `,returnType:` <p><code>str</code> or <code>List[str]</code></p> `}}),At=new T({props:{name:"convert_tokens_to_ids",anchor:"transformers.PreTrainedTokenizer.convert_tokens_to_ids",parameters:[{name:"tokens",val:": typing.Union[str, typing.List[str]]"}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizer.convert_tokens_to_ids.tokens",description:"<strong>tokens</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several token(s) to convert to token id(s).",name:"tokens"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils.py#L560",returnDescription:` <p>The token id or list of token ids.</p> `,returnType:` <p><code>int</code> or <code>List[int]</code></p> `}}),Ct=new T({props:{name:"get_added_vocab",anchor:"transformers.PreTrainedTokenizer.get_added_vocab",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils.py#L369",returnDescription:` <p>The added tokens.</p> `,returnType:` <p><code>Dict[str, int]</code></p> `}}),Ot=new T({props:{name:"num_special_tokens_to_add",anchor:"transformers.PreTrainedTokenizer.num_special_tokens_to_add",parameters:[{name:"pair",val:": bool = False"}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizer.num_special_tokens_to_add.pair",description:`<strong>pair</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the number of added tokens should be computed in the case of a sequence pair or a single sequence.`,name:"pair"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils.py#L458",returnDescription:` <p>Number of special tokens added to sequences.</p> `,returnType:` <p><code>int</code></p> `}}),ot=new sv({props:{$$slots:{default:[uv]},$$scope:{ctx:pe}}}),St=new T({props:{name:"prepare_for_tokenization",anchor:"transformers.PreTrainedTokenizer.prepare_for_tokenization",parameters:[{name:"text",val:": str"},{name:"is_split_into_words",val:": bool = False"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizer.prepare_for_tokenization.text",description:`<strong>text</strong> (<code>str</code>) &#x2014; The text to prepare.`,name:"text"},{anchor:"transformers.PreTrainedTokenizer.prepare_for_tokenization.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification. kwargs &#x2014; Keyword arguments to use for the tokenization.`,name:"is_split_into_words"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils.py#L821",returnDescription:` <p>The prepared text and the unused kwargs.</p> `,returnType:` <p><code>Tuple[str, Dict[str, Any]]</code></p> `}}),Bt=new T({props:{name:"tokenize",anchor:"transformers.PreTrainedTokenizer.tokenize",parameters:[{name:"text",val:": str"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizer.tokenize.text",description:`<strong>text</strong> (<code>str</code>) &#x2014; The sequence to be encoded.`,name:"text"},{anchor:"transformers.PreTrainedTokenizer.tokenize.*kwargs",description:`*<strong>*kwargs</strong> (additional keyword arguments) &#x2014; Passed along to the model-specific <code>prepare_for_tokenization</code> preprocessing method.`,name:"*kwargs"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils.py#L481",returnDescription:` <p>The list of tokens.</p> `,returnType:` <p><code>List[str]</code></p> `}}),Wt=new Gi({}),Ut=new T({props:{name:"class transformers.PreTrainedTokenizerFast",anchor:"transformers.PreTrainedTokenizerFast",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerFast.model_max_length",description:`<strong>model_max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum length (in number of tokens) for the inputs to the transformer model. When the tokenizer is loaded with <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.from_pretrained">from_pretrained()</a>, this will be set to the value stored for the associated model in <code>max_model_input_sizes</code> (see above). If no value is provided, will default to VERY_LARGE_INTEGER (<code>int(1e30)</code>).`,name:"model_max_length"},{anchor:"transformers.PreTrainedTokenizerFast.padding_side",description:`<strong>padding_side</strong> (<code>str</code>, <em>optional</em>) &#x2014; The side on which the model should have padding applied. Should be selected between [&#x2018;right&#x2019;, &#x2018;left&#x2019;]. Default value is picked from the class attribute of the same name.`,name:"padding_side"},{anchor:"transformers.PreTrainedTokenizerFast.truncation_side",description:`<strong>truncation_side</strong> (<code>str</code>, <em>optional</em>) &#x2014; The side on which the model should have truncation applied. Should be selected between [&#x2018;right&#x2019;, &#x2018;left&#x2019;]. Default value is picked from the class attribute of the same name.`,name:"truncation_side"},{anchor:"transformers.PreTrainedTokenizerFast.model_input_names",description:`<strong>model_input_names</strong> (<code>List[string]</code>, <em>optional</em>) &#x2014; The list of inputs accepted by the forward pass of the model (like <code>&quot;token_type_ids&quot;</code> or <code>&quot;attention_mask&quot;</code>). Default value is picked from the class attribute of the same name.`,name:"model_input_names"},{anchor:"transformers.PreTrainedTokenizerFast.bos_token",description:`<strong>bos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the beginning of a sentence. Will be associated to <code>self.bos_token</code> and <code>self.bos_token_id</code>.`,name:"bos_token"},{anchor:"transformers.PreTrainedTokenizerFast.eos_token",description:`<strong>eos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the end of a sentence. Will be associated to <code>self.eos_token</code> and <code>self.eos_token_id</code>.`,name:"eos_token"},{anchor:"transformers.PreTrainedTokenizerFast.unk_token",description:`<strong>unk_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing an out-of-vocabulary token. Will be associated to <code>self.unk_token</code> and <code>self.unk_token_id</code>.`,name:"unk_token"},{anchor:"transformers.PreTrainedTokenizerFast.sep_token",description:`<strong>sep_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token separating two different sentences in the same input (used by BERT for instance). Will be associated to <code>self.sep_token</code> and <code>self.sep_token_id</code>.`,name:"sep_token"},{anchor:"transformers.PreTrainedTokenizerFast.pad_token",description:`<strong>pad_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. Will be associated to <code>self.pad_token</code> and <code>self.pad_token_id</code>.`,name:"pad_token"},{anchor:"transformers.PreTrainedTokenizerFast.cls_token",description:`<strong>cls_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the class of the input (used by BERT for instance). Will be associated to <code>self.cls_token</code> and <code>self.cls_token_id</code>.`,name:"cls_token"},{anchor:"transformers.PreTrainedTokenizerFast.mask_token",description:`<strong>mask_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing a masked token (used by masked-language modeling pretraining objectives, like BERT). Will be associated to <code>self.mask_token</code> and <code>self.mask_token_id</code>.`,name:"mask_token"},{anchor:"transformers.PreTrainedTokenizerFast.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (tuple or list of <code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A tuple or a list of additional special tokens. Add them here to ensure they won&#x2019;t be split by the tokenization process. Will be associated to <code>self.additional_special_tokens</code> and <code>self.additional_special_tokens_ids</code>.`,name:"additional_special_tokens"},{anchor:"transformers.PreTrainedTokenizerFast.tokenizer_object",description:`<strong>tokenizer_object</strong> (<a href="https://huggingface.co/docs/tokenizers/main/en/api/tokenizer#tokenizers.Tokenizer" rel="nofollow">tokenizers.Tokenizer</a>) &#x2014; A <a href="https://huggingface.co/docs/tokenizers/main/en/api/tokenizer#tokenizers.Tokenizer" rel="nofollow">tokenizers.Tokenizer</a> object from &#x1F917; tokenizers to instantiate from. See <a href="../fast_tokenizers">Using tokenizers from &#x1F917; tokenizers</a> for more information.`,name:"tokenizer_object"},{anchor:"transformers.PreTrainedTokenizerFast.tokenizer_file",description:`<strong>tokenizer_file</strong> (<code>str</code>) &#x2014; A path to a local JSON file representing a previously serialized <a href="https://huggingface.co/docs/tokenizers/main/en/api/tokenizer#tokenizers.Tokenizer" rel="nofollow">tokenizers.Tokenizer</a> object from &#x1F917; tokenizers.`,name:"tokenizer_file"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_fast.py#L78"}}),Gt=new T({props:{name:"__call__",anchor:"transformers.PreTrainedTokenizerFast.__call__",parameters:[{name:"text",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]]] = None"},{name:"text_pair",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]], NoneType] = None"},{name:"text_target",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]]] = None"},{name:"text_pair_target",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]], NoneType] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.utils.generic.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"is_split_into_words",val:": bool = False"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.utils.generic.TensorType, NoneType] = None"},{name:"return_token_type_ids",val:": typing.Optional[bool] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_overflowing_tokens",val:": bool = False"},{name:"return_special_tokens_mask",val:": bool = False"},{name:"return_offsets_mapping",val:": bool = False"},{name:"return_length",val:": bool = False"},{name:"verbose",val:": bool = True"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerFast.__call__.text",description:`<strong>text</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).`,name:"text"},{anchor:"transformers.PreTrainedTokenizerFast.__call__.text_pair",description:`<strong>text_pair</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).`,name:"text_pair"},{anchor:"transformers.PreTrainedTokenizerFast.__call__.text_target",description:`<strong>text_target</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).`,name:"text_target"},{anchor:"transformers.PreTrainedTokenizerFast.__call__.text_pair_target",description:`<strong>text_pair_target</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).`,name:"text_pair_target"},{anchor:"transformers.PreTrainedTokenizerFast.__call__.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.PreTrainedTokenizerFast.__call__.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.PreTrainedTokenizerFast.__call__.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.PreTrainedTokenizerFast.__call__.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.PreTrainedTokenizerFast.__call__.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.PreTrainedTokenizerFast.__call__.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.PreTrainedTokenizerFast.__call__.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.PreTrainedTokenizerFast.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.PreTrainedTokenizerFast.__call__.return_token_type_ids",description:`<strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"return_token_type_ids"},{anchor:"transformers.PreTrainedTokenizerFast.__call__.return_attention_mask",description:`<strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"return_attention_mask"},{anchor:"transformers.PreTrainedTokenizerFast.__call__.return_overflowing_tokens",description:`<strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.`,name:"return_overflowing_tokens"},{anchor:"transformers.PreTrainedTokenizerFast.__call__.return_special_tokens_mask",description:`<strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.`,name:"return_special_tokens_mask"},{anchor:"transformers.PreTrainedTokenizerFast.__call__.return_offsets_mapping",description:`<strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.`,name:"return_offsets_mapping"},{anchor:"transformers.PreTrainedTokenizerFast.__call__.return_length",description:`<strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.`,name:"return_length"},{anchor:"transformers.PreTrainedTokenizerFast.__call__.verbose",description:`<strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method`,name:"verbose"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L2410",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> \u2014 List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> \u2014 List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>\u201Ctoken_type_ids\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> \u2014 List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>\u201Cattention_mask\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>overflowing_tokens</strong> \u2014 List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> \u2014 Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> \u2014 List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> \u2014 The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> `}}),jt=new T({props:{name:"batch_decode",anchor:"transformers.PreTrainedTokenizerFast.batch_decode",parameters:[{name:"sequences",val:": typing.Union[typing.List[int], typing.List[typing.List[int]], ForwardRef('np.ndarray'), ForwardRef('torch.Tensor'), ForwardRef('tf.Tensor')]"},{name:"skip_special_tokens",val:": bool = False"},{name:"clean_up_tokenization_spaces",val:": bool = True"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerFast.batch_decode.sequences",description:`<strong>sequences</strong> (<code>Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.`,name:"sequences"},{anchor:"transformers.PreTrainedTokenizerFast.batch_decode.skip_special_tokens",description:`<strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.`,name:"skip_special_tokens"},{anchor:"transformers.PreTrainedTokenizerFast.batch_decode.clean_up_tokenization_spaces",description:`<strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.`,name:"clean_up_tokenization_spaces"},{anchor:"transformers.PreTrainedTokenizerFast.batch_decode.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.`,name:"kwargs"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3370",returnDescription:` <p>The list of decoded sentences.</p> `,returnType:` <p><code>List[str]</code></p> `}}),Ht=new T({props:{name:"decode",anchor:"transformers.PreTrainedTokenizerFast.decode",parameters:[{name:"token_ids",val:": typing.Union[int, typing.List[int], ForwardRef('np.ndarray'), ForwardRef('torch.Tensor'), ForwardRef('tf.Tensor')]"},{name:"skip_special_tokens",val:": bool = False"},{name:"clean_up_tokenization_spaces",val:": bool = True"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerFast.decode.token_ids",description:`<strong>token_ids</strong> (<code>Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.`,name:"token_ids"},{anchor:"transformers.PreTrainedTokenizerFast.decode.skip_special_tokens",description:`<strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.`,name:"skip_special_tokens"},{anchor:"transformers.PreTrainedTokenizerFast.decode.clean_up_tokenization_spaces",description:`<strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.`,name:"clean_up_tokenization_spaces"},{anchor:"transformers.PreTrainedTokenizerFast.decode.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.`,name:"kwargs"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3403",returnDescription:` <p>The decoded sentence.</p> `,returnType:` <p><code>str</code></p> `}}),Xt=new T({props:{name:"encode",anchor:"transformers.PreTrainedTokenizerFast.encode",parameters:[{name:"text",val:": typing.Union[str, typing.List[str], typing.List[int]]"},{name:"text_pair",val:": typing.Union[str, typing.List[str], typing.List[int], NoneType] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.utils.generic.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"return_tensors",val:": typing.Union[str, transformers.utils.generic.TensorType, NoneType] = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerFast.encode.text",description:`<strong>text</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code>) &#x2014; The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).`,name:"text"},{anchor:"transformers.PreTrainedTokenizerFast.encode.text_pair",description:`<strong>text_pair</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code>, <em>optional</em>) &#x2014; Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).`,name:"text_pair"},{anchor:"transformers.PreTrainedTokenizerFast.encode.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.PreTrainedTokenizerFast.encode.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.PreTrainedTokenizerFast.encode.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.PreTrainedTokenizerFast.encode.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.PreTrainedTokenizerFast.encode.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.PreTrainedTokenizerFast.encode.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.PreTrainedTokenizerFast.encode.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.PreTrainedTokenizerFast.encode.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul> <p>**kwargs &#x2014; Passed along to the <code>.tokenize()</code> method.`,name:"return_tensors"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L2220",returnDescription:` <p>The tokenized ids of the text.</p> `,returnType:` <p><code>List[int]</code>, <code>torch.Tensor</code>, <code>tf.Tensor</code> or <code>np.ndarray</code></p> `}}),Jt=new T({props:{name:"push_to_hub",anchor:"transformers.PreTrainedTokenizerFast.push_to_hub",parameters:[{name:"repo_id",val:": str"},{name:"use_temp_dir",val:": typing.Optional[bool] = None"},{name:"commit_message",val:": typing.Optional[str] = None"},{name:"private",val:": typing.Optional[bool] = None"},{name:"use_auth_token",val:": typing.Union[bool, str, NoneType] = None"},{name:"max_shard_size",val:": typing.Union[int, str, NoneType] = '10GB'"},{name:"create_pr",val:": bool = False"},{name:"**deprecated_kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerFast.push_to_hub.repo_id",description:`<strong>repo_id</strong> (<code>str</code>) &#x2014; The name of the repository you want to push your tokenizer to. It should contain your organization name when pushing to a given organization.`,name:"repo_id"},{anchor:"transformers.PreTrainedTokenizerFast.push_to_hub.use_temp_dir",description:`<strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to use a temporary directory to store the files saved before they are pushed to the Hub. Will default to <code>True</code> if there is no directory named like <code>repo_id</code>, <code>False</code> otherwise.`,name:"use_temp_dir"},{anchor:"transformers.PreTrainedTokenizerFast.push_to_hub.commit_message",description:`<strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;Upload tokenizer&quot;</code>.`,name:"commit_message"},{anchor:"transformers.PreTrainedTokenizerFast.push_to_hub.private",description:`<strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).`,name:"private"},{anchor:"transformers.PreTrainedTokenizerFast.push_to_hub.use_auth_token",description:`<strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>huggingface-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.`,name:"use_auth_token"},{anchor:"transformers.PreTrainedTokenizerFast.push_to_hub.max_shard_size",description:`<strong>max_shard_size</strong> (<code>int</code> or <code>str</code>, <em>optional</em>, defaults to <code>&quot;10GB&quot;</code>) &#x2014; Only applicable for models. The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like <code>&quot;5MB&quot;</code>).`,name:"max_shard_size"},{anchor:"transformers.PreTrainedTokenizerFast.push_to_hub.create_pr",description:`<strong>create_pr</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to create a PR with the uploaded files or directly commit.`,name:"create_pr"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/hub.py#L712"}}),it=new av({props:{anchor:"transformers.PreTrainedTokenizerFast.push_to_hub.example",$$slots:{default:[_v]},$$scope:{ctx:pe}}}),Qt=new T({props:{name:"convert_ids_to_tokens",anchor:"transformers.PreTrainedTokenizerFast.convert_ids_to_tokens",parameters:[{name:"ids",val:": typing.Union[int, typing.List[int]]"},{name:"skip_special_tokens",val:": bool = False"}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerFast.convert_ids_to_tokens.ids",description:`<strong>ids</strong> (<code>int</code> or <code>List[int]</code>) &#x2014; The token id (or token ids) to convert to tokens.`,name:"ids"},{anchor:"transformers.PreTrainedTokenizerFast.convert_ids_to_tokens.skip_special_tokens",description:`<strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.`,name:"skip_special_tokens"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_fast.py#L293",returnDescription:` <p>The decoded token(s).</p> `,returnType:` <p><code>str</code> or <code>List[str]</code></p> `}}),Zt=new T({props:{name:"convert_tokens_to_ids",anchor:"transformers.PreTrainedTokenizerFast.convert_tokens_to_ids",parameters:[{name:"tokens",val:": typing.Union[str, typing.List[str]]"}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerFast.convert_tokens_to_ids.tokens",description:"<strong>tokens</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several token(s) to convert to token id(s).",name:"tokens"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_fast.py#L235",returnDescription:` <p>The token id or list of token ids.</p> `,returnType:` <p><code>int</code> or <code>List[int]</code></p> `}}),eo=new T({props:{name:"get_added_vocab",anchor:"transformers.PreTrainedTokenizerFast.get_added_vocab",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_fast.py#L156",returnDescription:` <p>The added tokens.</p> `,returnType:` <p><code>Dict[str, int]</code></p> `}}),to=new T({props:{name:"num_special_tokens_to_add",anchor:"transformers.PreTrainedTokenizerFast.num_special_tokens_to_add",parameters:[{name:"pair",val:": bool = False"}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerFast.num_special_tokens_to_add.pair",description:`<strong>pair</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the number of added tokens should be computed in the case of a sequence pair or a single sequence.`,name:"pair"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_fast.py#L272",returnDescription:` <p>Number of special tokens added to sequences.</p> `,returnType:` <p><code>int</code></p> `}}),ht=new sv({props:{$$slots:{default:[gv]},$$scope:{ctx:pe}}}),oo=new T({props:{name:"set_truncation_and_padding",anchor:"transformers.PreTrainedTokenizerFast.set_truncation_and_padding",parameters:[{name:"padding_strategy",val:": PaddingStrategy"},{name:"truncation_strategy",val:": TruncationStrategy"},{name:"max_length",val:": int"},{name:"stride",val:": int"},{name:"pad_to_multiple_of",val:": typing.Optional[int]"}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerFast.set_truncation_and_padding.padding_strategy",description:`<strong>padding_strategy</strong> (<a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>) &#x2014; The kind of padding that will be applied to the input`,name:"padding_strategy"},{anchor:"transformers.PreTrainedTokenizerFast.set_truncation_and_padding.truncation_strategy",description:`<strong>truncation_strategy</strong> (<a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>) &#x2014; The kind of truncation that will be applied to the input`,name:"truncation_strategy"},{anchor:"transformers.PreTrainedTokenizerFast.set_truncation_and_padding.max_length",description:`<strong>max_length</strong> (<code>int</code>) &#x2014; The maximum size of a sequence.`,name:"max_length"},{anchor:"transformers.PreTrainedTokenizerFast.set_truncation_and_padding.stride",description:`<strong>stride</strong> (<code>int</code>) &#x2014; The stride to use when handling overflow.`,name:"stride"},{anchor:"transformers.PreTrainedTokenizerFast.set_truncation_and_padding.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_fast.py#L322"}}),no=new T({props:{name:"train_new_from_iterator",anchor:"transformers.PreTrainedTokenizerFast.train_new_from_iterator",parameters:[{name:"text_iterator",val:""},{name:"vocab_size",val:""},{name:"length",val:" = None"},{name:"new_special_tokens",val:" = None"},{name:"special_tokens_map",val:" = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerFast.train_new_from_iterator.text_iterator",description:`<strong>text_iterator</strong> (generator of <code>List[str]</code>) &#x2014; The training corpus. Should be a generator of batches of texts, for instance a list of lists of texts if you have everything in memory.`,name:"text_iterator"},{anchor:"transformers.PreTrainedTokenizerFast.train_new_from_iterator.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>) &#x2014; The size of the vocabulary you want for your tokenizer.`,name:"vocab_size"},{anchor:"transformers.PreTrainedTokenizerFast.train_new_from_iterator.length",description:`<strong>length</strong> (<code>int</code>, <em>optional</em>) &#x2014; The total number of sequences in the iterator. This is used to provide meaningful progress tracking`,name:"length"},{anchor:"transformers.PreTrainedTokenizerFast.train_new_from_iterator.new_special_tokens",description:`<strong>new_special_tokens</strong> (list of <code>str</code> or <code>AddedToken</code>, <em>optional</em>) &#x2014; A list of new special tokens to add to the tokenizer you are training.`,name:"new_special_tokens"},{anchor:"transformers.PreTrainedTokenizerFast.train_new_from_iterator.special_tokens_map",description:`<strong>special_tokens_map</strong> (<code>Dict[str, str]</code>, <em>optional</em>) &#x2014; If you want to rename some of the special tokens this tokenizer uses, pass along a mapping old special token name to new special token name in this argument. kwargs &#x2014; Additional keyword arguments passed along to the trainer from the &#x1F917; Tokenizers library.`,name:"special_tokens_map"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_fast.py#L605",returnDescription:` <p>A new tokenizer of the same type as the original one, trained on <code>text_iterator</code>.</p> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast" >PreTrainedTokenizerFast</a></p> `}}),ro=new Gi({}),so=new T({props:{name:"class transformers.BatchEncoding",anchor:"transformers.BatchEncoding",parameters:[{name:"data",val:": typing.Union[typing.Dict[str, typing.Any], NoneType] = None"},{name:"encoding",val:": typing.Union[tokenizers.Encoding, typing.Sequence[tokenizers.Encoding], NoneType] = None"},{name:"tensor_type",val:": typing.Union[NoneType, str, transformers.utils.generic.TensorType] = None"},{name:"prepend_batch_axis",val:": bool = False"},{name:"n_sequences",val:": typing.Optional[int] = None"}],parametersDescription:[{anchor:"transformers.BatchEncoding.data",description:`<strong>data</strong> (<code>dict</code>) &#x2014; Dictionary of lists/arrays/tensors returned by the <code>__call__</code>/<code>encode_plus</code>/<code>batch_encode_plus</code> methods (&#x2018;input_ids&#x2019;, &#x2018;attention_mask&#x2019;, etc.).`,name:"data"},{anchor:"transformers.BatchEncoding.encoding",description:`<strong>encoding</strong> (<code>tokenizers.Encoding</code> or <code>Sequence[tokenizers.Encoding]</code>, <em>optional</em>) &#x2014; If the tokenizer is a fast tokenizer which outputs additional information like mapping from word/character space to token space the <code>tokenizers.Encoding</code> instance or list of instance (for batches) hold this information.`,name:"encoding"},{anchor:"transformers.BatchEncoding.tensor_type",description:`<strong>tensor_type</strong> (<code>Union[None, str, TensorType]</code>, <em>optional</em>) &#x2014; You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at initialization.`,name:"tensor_type"},{anchor:"transformers.BatchEncoding.prepend_batch_axis",description:`<strong>prepend_batch_axis</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to add a batch axis when converting to tensors (see <code>tensor_type</code> above).`,name:"prepend_batch_axis"},{anchor:"transformers.BatchEncoding.n_sequences",description:`<strong>n_sequences</strong> (<code>Optional[int]</code>, <em>optional</em>) &#x2014; You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at initialization.`,name:"n_sequences"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L159"}}),io=new T({props:{name:"char_to_token",anchor:"transformers.BatchEncoding.char_to_token",parameters:[{name:"batch_or_char_index",val:": int"},{name:"char_index",val:": typing.Optional[int] = None"},{name:"sequence_index",val:": int = 0"}],parametersDescription:[{anchor:"transformers.BatchEncoding.char_to_token.batch_or_char_index",description:`<strong>batch_or_char_index</strong> (<code>int</code>) &#x2014; Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the word in the sequence`,name:"batch_or_char_index"},{anchor:"transformers.BatchEncoding.char_to_token.char_index",description:`<strong>char_index</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a batch index is provided in <em>batch_or_token_index</em>, this can be the index of the word in the sequence.`,name:"char_index"},{anchor:"transformers.BatchEncoding.char_to_token.sequence_index",description:`<strong>sequence_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 or 1) the provided character index belongs to.`,name:"sequence_index"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L531",returnDescription:` <p>Index of the token.</p> `,returnType:` <p><code>int</code></p> `}}),lo=new T({props:{name:"char_to_word",anchor:"transformers.BatchEncoding.char_to_word",parameters:[{name:"batch_or_char_index",val:": int"},{name:"char_index",val:": typing.Optional[int] = None"},{name:"sequence_index",val:": int = 0"}],parametersDescription:[{anchor:"transformers.BatchEncoding.char_to_word.batch_or_char_index",description:`<strong>batch_or_char_index</strong> (<code>int</code>) &#x2014; Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the character in the original string.`,name:"batch_or_char_index"},{anchor:"transformers.BatchEncoding.char_to_word.char_index",description:`<strong>char_index</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a batch index is provided in <em>batch_or_token_index</em>, this can be the index of the character in the original string.`,name:"char_index"},{anchor:"transformers.BatchEncoding.char_to_word.sequence_index",description:`<strong>sequence_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 or 1) the provided character index belongs to.`,name:"sequence_index"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L617",returnDescription:` <p>Index or indices of the associated encoded token(s).</p> `,returnType:` <p><code>int</code> or <code>List[int]</code></p> `}}),po=new T({props:{name:"convert_to_tensors",anchor:"transformers.BatchEncoding.convert_to_tensors",parameters:[{name:"tensor_type",val:": typing.Union[str, transformers.utils.generic.TensorType, NoneType] = None"},{name:"prepend_batch_axis",val:": bool = False"}],parametersDescription:[{anchor:"transformers.BatchEncoding.convert_to_tensors.tensor_type",description:`<strong>tensor_type</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; The type of tensors to use. If <code>str</code>, should be one of the values of the enum <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>. If <code>None</code>, no modification is done.`,name:"tensor_type"},{anchor:"transformers.BatchEncoding.convert_to_tensors.prepend_batch_axis",description:`<strong>prepend_batch_axis</strong> (<code>int</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to add the batch dimension during the conversion.`,name:"prepend_batch_axis"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L656"}}),mo=new T({props:{name:"sequence_ids",anchor:"transformers.BatchEncoding.sequence_ids",parameters:[{name:"batch_index",val:": int = 0"}],parametersDescription:[{anchor:"transformers.BatchEncoding.sequence_ids.batch_index",description:"<strong>batch_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The index to access in the batch.",name:"batch_index"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L297",returnDescription:` <p>A list indicating the sequence id corresponding to each token. Special tokens added by the tokenizer are mapped to <code>None</code> and other tokens are mapped to the index of their corresponding sequence.</p> `,returnType:` <p><code>List[Optional[int]]</code></p> `}}),fo=new T({props:{name:"to",anchor:"transformers.BatchEncoding.to",parameters:[{name:"device",val:": typing.Union[str, ForwardRef('torch.device')]"}],parametersDescription:[{anchor:"transformers.BatchEncoding.to.device",description:"<strong>device</strong> (<code>str</code> or <code>torch.device</code>) &#x2014; The device to put the tensors on.",name:"device"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L741",returnDescription:` <p>The same instance after modification.</p> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> `}}),_o=new T({props:{name:"token_to_chars",anchor:"transformers.BatchEncoding.token_to_chars",parameters:[{name:"batch_or_token_index",val:": int"},{name:"token_index",val:": typing.Optional[int] = None"}],parametersDescription:[{anchor:"transformers.BatchEncoding.token_to_chars.batch_or_token_index",description:`<strong>batch_or_token_index</strong> (<code>int</code>) &#x2014; Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the token in the sequence.`,name:"batch_or_token_index"},{anchor:"transformers.BatchEncoding.token_to_chars.token_index",description:`<strong>token_index</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a batch index is provided in <em>batch_or_token_index</em>, this can be the index of the token or tokens in the sequence.`,name:"token_index"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L492",returnDescription:` <p>Span of characters in the original string, or None, if the token (e.g. <s>, </s>) doesn\u2019t correspond to any chars in the origin string.</p> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.CharSpan" >CharSpan</a></p> `}}),vo=new T({props:{name:"token_to_sequence",anchor:"transformers.BatchEncoding.token_to_sequence",parameters:[{name:"batch_or_token_index",val:": int"},{name:"token_index",val:": typing.Optional[int] = None"}],parametersDescription:[{anchor:"transformers.BatchEncoding.token_to_sequence.batch_or_token_index",description:`<strong>batch_or_token_index</strong> (<code>int</code>) &#x2014; Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of the token in the sequence.`,name:"batch_or_token_index"},{anchor:"transformers.BatchEncoding.token_to_sequence.token_index",description:`<strong>token_index</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a batch index is provided in <em>batch_or_token_index</em>, this can be the index of the token in the sequence.`,name:"token_index"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L364",returnDescription:` <p>Index of the word in the input sequence.</p> `,returnType:` <p><code>int</code></p> `}}),yo=new T({props:{name:"token_to_word",anchor:"transformers.BatchEncoding.token_to_word",parameters:[{name:"batch_or_token_index",val:": int"},{name:"token_index",val:": typing.Optional[int] = None"}],parametersDescription:[{anchor:"transformers.BatchEncoding.token_to_word.batch_or_token_index",description:`<strong>batch_or_token_index</strong> (<code>int</code>) &#x2014; Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the token in the sequence.`,name:"batch_or_token_index"},{anchor:"transformers.BatchEncoding.token_to_word.token_index",description:`<strong>token_index</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a batch index is provided in <em>batch_or_token_index</em>, this can be the index of the token in the sequence.`,name:"token_index"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L403",returnDescription:` <p>Index of the word in the input sequence.</p> `,returnType:` <p><code>int</code></p> `}}),xo=new T({props:{name:"tokens",anchor:"transformers.BatchEncoding.tokens",parameters:[{name:"batch_index",val:": int = 0"}],parametersDescription:[{anchor:"transformers.BatchEncoding.tokens.batch_index",description:"<strong>batch_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The index to access in the batch.",name:"batch_index"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L279",returnDescription:` <p>The list of tokens at that index.</p> `,returnType:` <p><code>List[str]</code></p> `}}),zo=new T({props:{name:"word_ids",anchor:"transformers.BatchEncoding.word_ids",parameters:[{name:"batch_index",val:": int = 0"}],parametersDescription:[{anchor:"transformers.BatchEncoding.word_ids.batch_index",description:"<strong>batch_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The index to access in the batch.",name:"batch_index"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L345",returnDescription:` <p>A list indicating the word corresponding to each token. Special tokens added by the tokenizer are mapped to <code>None</code> and other tokens are mapped to the index of their corresponding word (several tokens will be mapped to the same word index if they are parts of that word).</p> `,returnType:` <p><code>List[Optional[int]]</code></p> `}}),Eo=new T({props:{name:"word_to_chars",anchor:"transformers.BatchEncoding.word_to_chars",parameters:[{name:"batch_or_word_index",val:": int"},{name:"word_index",val:": typing.Optional[int] = None"},{name:"sequence_index",val:": int = 0"}],parametersDescription:[{anchor:"transformers.BatchEncoding.word_to_chars.batch_or_word_index",description:`<strong>batch_or_word_index</strong> (<code>int</code>) &#x2014; Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the word in the sequence`,name:"batch_or_word_index"},{anchor:"transformers.BatchEncoding.word_to_chars.word_index",description:`<strong>word_index</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a batch index is provided in <em>batch_or_token_index</em>, this can be the index of the word in the sequence.`,name:"word_index"},{anchor:"transformers.BatchEncoding.word_to_chars.sequence_index",description:`<strong>sequence_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 or 1) the provided word index belongs to.`,name:"sequence_index"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L572",returnDescription:` <p>Span(s) of the associated character or characters in the string. CharSpan are NamedTuple with:</p> <ul> <li>start: index of the first character associated to the token in the original string</li> <li>end: index of the character following the last character associated to the token in the original string</li> </ul> `,returnType:` <p><code>CharSpan</code> or <code>List[CharSpan]</code></p> `}}),qo=new T({props:{name:"word_to_tokens",anchor:"transformers.BatchEncoding.word_to_tokens",parameters:[{name:"batch_or_word_index",val:": int"},{name:"word_index",val:": typing.Optional[int] = None"},{name:"sequence_index",val:": int = 0"}],parametersDescription:[{anchor:"transformers.BatchEncoding.word_to_tokens.batch_or_word_index",description:`<strong>batch_or_word_index</strong> (<code>int</code>) &#x2014; Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of the word in the sequence.`,name:"batch_or_word_index"},{anchor:"transformers.BatchEncoding.word_to_tokens.word_index",description:`<strong>word_index</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a batch index is provided in <em>batch_or_token_index</em>, this can be the index of the word in the sequence.`,name:"word_index"},{anchor:"transformers.BatchEncoding.word_to_tokens.sequence_index",description:`<strong>sequence_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 or 1) the provided word index belongs to.`,name:"sequence_index"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L441",returnDescription:` <p>Optional <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.TokenSpan" >TokenSpan</a> Span of tokens in the encoded sequence. Returns <code>None</code> if no tokens correspond to the word.</p> `}}),Io=new T({props:{name:"words",anchor:"transformers.BatchEncoding.words",parameters:[{name:"batch_index",val:": int = 0"}],parametersDescription:[{anchor:"transformers.BatchEncoding.words.batch_index",description:"<strong>batch_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The index to access in the batch.",name:"batch_index"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L321",returnDescription:` <p>A list indicating the word corresponding to each token. Special tokens added by the tokenizer are mapped to <code>None</code> and other tokens are mapped to the index of their corresponding word (several tokens will be mapped to the same word index if they are parts of that word).</p> `,returnType:` <p><code>List[Optional[int]]</code></p> `}}),{c(){y=r("meta"),L=d(),q=r("h1"),E=r("a"),B=r("span"),h(v.$$.fragment),I=d(),Bn=r("span"),ji=o("Tokenizer"),Ua=d(),He=r("p"),Hi=o(`A tokenizer is in charge of preparing the inputs for a model. The library contains tokenizers for all the models. Most of the tokenizers are available in two flavors: a full python implementation and a \u201CFast\u201D implementation based on the Rust library `),Tt=r("a"),Mi=o("\u{1F917} Tokenizers"),Xi=o(". The \u201CFast\u201D implementations allows:"),Va=d(),Me=r("ol"),Wn=r("li"),Yi=o("a significant speed-up in particular when doing batched tokenization and"),Ji=d(),Rn=r("li"),Ki=o(`additional methods to map between the original string (character and words) and the token space (e.g. getting the index of the token comprising a given character or the span of characters corresponding to a given token).`),Ga=d(),W=r("p"),Qi=o("The base classes "),Oo=r("a"),Zi=o("PreTrainedTokenizer"),ed=o(" and "),So=r("a"),td=o("PreTrainedTokenizerFast"),od=o(` implement the common methods for encoding string inputs in model inputs (see below) and instantiating/saving python and \u201CFast\u201D tokenizers either from a local file or directory or from a pretrained tokenizer provided by the library (downloaded from HuggingFace\u2019s AWS S3 repository). They both rely on `),Bo=r("a"),nd=o("PreTrainedTokenizerBase"),rd=o(` that contains the common methods, and `),Wo=r("a"),sd=o("SpecialTokensMixin"),ad=o("."),ja=d(),Ie=r("p"),Ro=r("a"),id=o("PreTrainedTokenizer"),dd=o(" and "),Uo=r("a"),cd=o("PreTrainedTokenizerFast"),ld=o(` thus implement the main methods for using all the tokenizers:`),Ha=d(),me=r("ul"),Un=r("li"),hd=o(`Tokenizing (splitting strings in sub-word token strings), converting tokens strings to ids and back, and encoding/decoding (i.e., tokenizing and converting to integers).`),pd=d(),Vn=r("li"),md=o("Adding new tokens to the vocabulary in a way that is independent of the underlying structure (BPE, SentencePiece\u2026)."),fd=d(),Gn=r("li"),ud=o(`Managing special tokens (like mask, beginning-of-sentence, etc.): adding them, assigning them to attributes in the tokenizer for easy access and making sure they are not split during tokenization.`),Ma=d(),D=r("p"),Vo=r("a"),_d=o("BatchEncoding"),gd=o(` holds the output of the `),Go=r("a"),kd=o("PreTrainedTokenizerBase"),bd=o("\u2019s encoding methods ("),jn=r("code"),vd=o("__call__"),Td=o(`, `),Hn=r("code"),yd=o("encode_plus"),wd=o(" and "),Mn=r("code"),xd=o("batch_encode_plus"),zd=o(`) and is derived from a Python dictionary. When the tokenizer is a pure python tokenizer, this class behaves just like a standard python dictionary and holds the various model inputs computed by these methods (`),Xn=r("code"),Ed=o("input_ids"),$d=o(", "),Yn=r("code"),Pd=o("attention_mask"),qd=o(`\u2026). When the tokenizer is a \u201CFast\u201D tokenizer (i.e., backed by HuggingFace `),yt=r("a"),Dd=o("tokenizers library"),Ld=o(`), this class provides in addition several advanced alignment methods which can be used to map between the original string (character and words) and the token space (e.g., getting the index of the token comprising a given character or the span of characters corresponding to a given token).`),Xa=d(),Ne=r("h2"),Xe=r("a"),Jn=r("span"),h(wt.$$.fragment),Fd=d(),Kn=r("span"),Id=o("PreTrainedTokenizer"),Ya=d(),g=r("div"),h(xt.$$.fragment),Nd=d(),Qn=r("p"),Ad=o("Base class for all slow tokenizers."),Cd=d(),zt=r("p"),Od=o("Inherits from "),jo=r("a"),Sd=o("PreTrainedTokenizerBase"),Bd=o("."),Wd=d(),Zn=r("p"),Rd=o(`Handle all the shared methods for tokenization and special tokens as well as methods downloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary.`),Ud=d(),er=r("p"),Vd=o(`This class also contain the added tokens in a unified way on top of all tokenizers so we don\u2019t have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece\u2026).`),Gd=d(),tr=r("p"),jd=o("Class attributes (overridden by derived classes)"),Hd=d(),N=r("ul"),fe=r("li"),or=r("strong"),Md=o("vocab_files_names"),Xd=o(" ("),nr=r("code"),Yd=o("Dict[str, str]"),Jd=o(") \u2014 A dictionary with, as keys, the "),rr=r("code"),Kd=o("__init__"),Qd=o(` keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string).`),Zd=d(),R=r("li"),sr=r("strong"),ec=o("pretrained_vocab_files_map"),tc=o(" ("),ar=r("code"),oc=o("Dict[str, Dict[str, str]]"),nc=o(`) \u2014 A dictionary of dictionaries, with the high-level keys being the `),ir=r("code"),rc=o("__init__"),sc=o(` keyword name of each vocabulary file required by the model, the low-level being the `),dr=r("code"),ac=o("short-cut-names"),ic=o(" of the pretrained models with, as associated values, the "),cr=r("code"),dc=o("url"),cc=o(` to the associated pretrained vocabulary file.`),lc=d(),Q=r("li"),lr=r("strong"),hc=o("max_model_input_sizes"),pc=o(" ("),hr=r("code"),mc=o("Dict[str, Optional[int]]"),fc=o(") \u2014 A dictionary with, as keys, the "),pr=r("code"),uc=o("short-cut-names"),_c=o(` of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or `),mr=r("code"),gc=o("None"),kc=o(" if the model has no maximum input size."),bc=d(),U=r("li"),fr=r("strong"),vc=o("pretrained_init_configuration"),Tc=o(" ("),ur=r("code"),yc=o("Dict[str, Dict[str, Any]]"),wc=o(`) \u2014 A dictionary with, as keys, the `),_r=r("code"),xc=o("short-cut-names"),zc=o(` of the pretrained models, and as associated values, a dictionary of specific arguments to pass to the `),gr=r("code"),Ec=o("__init__"),$c=o(` method of the tokenizer class for this pretrained model when loading the tokenizer with the `),Ho=r("a"),Pc=o("from_pretrained()"),qc=o(" method."),Dc=d(),Ye=r("li"),kr=r("strong"),Lc=o("model_input_names"),Fc=o(" ("),br=r("code"),Ic=o("List[str]"),Nc=o(") \u2014 A list of inputs expected in the forward pass of the model."),Ac=d(),Z=r("li"),vr=r("strong"),Cc=o("padding_side"),Oc=o(" ("),Tr=r("code"),Sc=o("str"),Bc=o(`) \u2014 The default value for the side on which the model should have padding applied. Should be `),yr=r("code"),Wc=o("'right'"),Rc=o(" or "),wr=r("code"),Uc=o("'left'"),Vc=o("."),Gc=d(),ee=r("li"),xr=r("strong"),jc=o("truncation_side"),Hc=o(" ("),zr=r("code"),Mc=o("str"),Xc=o(`) \u2014 The default value for the side on which the model should have truncation applied. Should be `),Er=r("code"),Yc=o("'right'"),Jc=o(" or "),$r=r("code"),Kc=o("'left'"),Qc=o("."),Zc=d(),Je=r("div"),h(Et.$$.fragment),el=d(),Pr=r("p"),tl=o(`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences.`),ol=d(),Ke=r("div"),h($t.$$.fragment),nl=d(),qr=r("p"),rl=o("Convert a list of lists of token ids into a list of strings by calling decode."),sl=d(),ue=r("div"),h(Pt.$$.fragment),al=d(),Dr=r("p"),il=o(`Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces.`),dl=d(),qt=r("p"),cl=o("Similar to doing "),Lr=r("code"),ll=o("self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))"),hl=o("."),pl=d(),_e=r("div"),h(Dt.$$.fragment),ml=d(),Fr=r("p"),fl=o("Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary."),ul=d(),Lt=r("p"),_l=o("Same as doing "),Ir=r("code"),gl=o("self.convert_tokens_to_ids(self.tokenize(text))"),kl=o("."),bl=d(),ge=r("div"),h(Ft.$$.fragment),vl=d(),It=r("p"),Tl=o(`Upload the tokenizer files to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),Nr=r("code"),yl=o("repo_path_or_name"),wl=o("."),xl=d(),h(Qe.$$.fragment),zl=d(),Ze=r("div"),h(Nt.$$.fragment),El=d(),Ar=r("p"),$l=o(`Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and added tokens.`),Pl=d(),et=r("div"),h(At.$$.fragment),ql=d(),Cr=r("p"),Dl=o(`Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the vocabulary.`),Ll=d(),tt=r("div"),h(Ct.$$.fragment),Fl=d(),Or=r("p"),Il=o("Returns the added tokens in the vocabulary as a dictionary of token to index."),Nl=d(),ke=r("div"),h(Ot.$$.fragment),Al=d(),Sr=r("p"),Cl=o("Returns the number of added tokens when encoding a sequence with special tokens."),Ol=d(),h(ot.$$.fragment),Sl=d(),be=r("div"),h(St.$$.fragment),Bl=d(),Br=r("p"),Wl=o("Performs any necessary transformations before tokenization."),Rl=d(),Ae=r("p"),Ul=o("This method should pop the arguments from kwargs and return the remaining "),Wr=r("code"),Vl=o("kwargs"),Gl=o(` as well. We test the `),Rr=r("code"),jl=o("kwargs"),Hl=o(" at the end of the encoding process to be sure all the arguments have been used."),Ml=d(),ve=r("div"),h(Bt.$$.fragment),Xl=d(),Ur=r("p"),Yl=o("Converts a string in a sequence of tokens, using the tokenizer."),Jl=d(),Vr=r("p"),Kl=o(`Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces). Takes care of added tokens.`),Ja=d(),Ce=r("h2"),nt=r("a"),Gr=r("span"),h(Wt.$$.fragment),Ql=d(),jr=r("span"),Zl=o("PreTrainedTokenizerFast"),Ka=d(),te=r("p"),eh=o("The "),Mo=r("a"),th=o("PreTrainedTokenizerFast"),oh=o(" depend on the "),Rt=r("a"),nh=o("tokenizers"),rh=o(` library. The tokenizers obtained from the \u{1F917} tokenizers library can be loaded very simply into \u{1F917} transformers. Take a look at the `),Xo=r("a"),sh=o("Using tokenizers from \u{1F917} tokenizers"),ah=o(" page to understand how this is done."),Qa=d(),k=r("div"),h(Ut.$$.fragment),ih=d(),Hr=r("p"),dh=o("Base class for all fast tokenizers (wrapping HuggingFace tokenizers library)."),ch=d(),Vt=r("p"),lh=o("Inherits from "),Yo=r("a"),hh=o("PreTrainedTokenizerBase"),ph=o("."),mh=d(),Mr=r("p"),fh=o(`Handles all the shared methods for tokenization and special tokens, as well as methods for downloading/caching/loading pretrained tokenizers, as well as adding tokens to the vocabulary.`),uh=d(),Xr=r("p"),_h=o(`This class also contains the added tokens in a unified way on top of all tokenizers so we don\u2019t have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece\u2026).`),gh=d(),Yr=r("p"),kh=o("Class attributes (overridden by derived classes)"),bh=d(),A=r("ul"),Te=r("li"),Jr=r("strong"),vh=o("vocab_files_names"),Th=o(" ("),Kr=r("code"),yh=o("Dict[str, str]"),wh=o(") \u2014 A dictionary with, as keys, the "),Qr=r("code"),xh=o("__init__"),zh=o(` keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string).`),Eh=d(),V=r("li"),Zr=r("strong"),$h=o("pretrained_vocab_files_map"),Ph=o(" ("),es=r("code"),qh=o("Dict[str, Dict[str, str]]"),Dh=o(`) \u2014 A dictionary of dictionaries, with the high-level keys being the `),ts=r("code"),Lh=o("__init__"),Fh=o(` keyword name of each vocabulary file required by the model, the low-level being the `),os=r("code"),Ih=o("short-cut-names"),Nh=o(" of the pretrained models with, as associated values, the "),ns=r("code"),Ah=o("url"),Ch=o(` to the associated pretrained vocabulary file.`),Oh=d(),oe=r("li"),rs=r("strong"),Sh=o("max_model_input_sizes"),Bh=o(" ("),ss=r("code"),Wh=o("Dict[str, Optional[int]]"),Rh=o(") \u2014 A dictionary with, as keys, the "),as=r("code"),Uh=o("short-cut-names"),Vh=o(` of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or `),is=r("code"),Gh=o("None"),jh=o(" if the model has no maximum input size."),Hh=d(),G=r("li"),ds=r("strong"),Mh=o("pretrained_init_configuration"),Xh=o(" ("),cs=r("code"),Yh=o("Dict[str, Dict[str, Any]]"),Jh=o(`) \u2014 A dictionary with, as keys, the `),ls=r("code"),Kh=o("short-cut-names"),Qh=o(` of the pretrained models, and as associated values, a dictionary of specific arguments to pass to the `),hs=r("code"),Zh=o("__init__"),ep=o(` method of the tokenizer class for this pretrained model when loading the tokenizer with the `),Jo=r("a"),tp=o("from_pretrained()"),op=o(" method."),np=d(),rt=r("li"),ps=r("strong"),rp=o("model_input_names"),sp=o(" ("),ms=r("code"),ap=o("List[str]"),ip=o(") \u2014 A list of inputs expected in the forward pass of the model."),dp=d(),ne=r("li"),fs=r("strong"),cp=o("padding_side"),lp=o(" ("),us=r("code"),hp=o("str"),pp=o(`) \u2014 The default value for the side on which the model should have padding applied. Should be `),_s=r("code"),mp=o("'right'"),fp=o(" or "),gs=r("code"),up=o("'left'"),_p=o("."),gp=d(),re=r("li"),ks=r("strong"),kp=o("truncation_side"),bp=o(" ("),bs=r("code"),vp=o("str"),Tp=o(`) \u2014 The default value for the side on which the model should have truncation applied. Should be `),vs=r("code"),yp=o("'right'"),wp=o(" or "),Ts=r("code"),xp=o("'left'"),zp=o("."),Ep=d(),st=r("div"),h(Gt.$$.fragment),$p=d(),ys=r("p"),Pp=o(`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences.`),qp=d(),at=r("div"),h(jt.$$.fragment),Dp=d(),ws=r("p"),Lp=o("Convert a list of lists of token ids into a list of strings by calling decode."),Fp=d(),ye=r("div"),h(Ht.$$.fragment),Ip=d(),xs=r("p"),Np=o(`Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces.`),Ap=d(),Mt=r("p"),Cp=o("Similar to doing "),zs=r("code"),Op=o("self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))"),Sp=o("."),Bp=d(),we=r("div"),h(Xt.$$.fragment),Wp=d(),Es=r("p"),Rp=o("Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary."),Up=d(),Yt=r("p"),Vp=o("Same as doing "),$s=r("code"),Gp=o("self.convert_tokens_to_ids(self.tokenize(text))"),jp=o("."),Hp=d(),xe=r("div"),h(Jt.$$.fragment),Mp=d(),Kt=r("p"),Xp=o(`Upload the tokenizer files to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),Ps=r("code"),Yp=o("repo_path_or_name"),Jp=o("."),Kp=d(),h(it.$$.fragment),Qp=d(),dt=r("div"),h(Qt.$$.fragment),Zp=d(),qs=r("p"),em=o(`Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and added tokens.`),tm=d(),ct=r("div"),h(Zt.$$.fragment),om=d(),Ds=r("p"),nm=o(`Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the vocabulary.`),rm=d(),lt=r("div"),h(eo.$$.fragment),sm=d(),Ls=r("p"),am=o("Returns the added tokens in the vocabulary as a dictionary of token to index."),im=d(),ze=r("div"),h(to.$$.fragment),dm=d(),Fs=r("p"),cm=o("Returns the number of added tokens when encoding a sequence with special tokens."),lm=d(),h(ht.$$.fragment),hm=d(),Ee=r("div"),h(oo.$$.fragment),pm=d(),Is=r("p"),mm=o(`Define the truncation and the padding strategies for fast tokenizers (provided by HuggingFace tokenizers library) and restore the tokenizer settings afterwards.`),fm=d(),Ns=r("p"),um=o(`The provided tokenizer has no padding / truncation strategy before the managed section. If your tokenizer set a padding / truncation strategy before, then it will be reset to no padding / truncation when exiting the managed section.`),_m=d(),pt=r("div"),h(no.$$.fragment),gm=d(),As=r("p"),km=o(`Trains a tokenizer on a new corpus with the same defaults (in terms of special tokens or tokenization pipeline) as the current one.`),Za=d(),Oe=r("h2"),mt=r("a"),Cs=r("span"),h(ro.$$.fragment),bm=d(),Os=r("span"),vm=o("BatchEncoding"),ei=d(),w=r("div"),h(so.$$.fragment),Tm=d(),ie=r("p"),ym=o("Holds the output of the "),ao=r("a"),Ss=r("strong"),wm=o("call"),xm=o("()"),zm=o(`, `),Ko=r("a"),Em=o("encode_plus()"),$m=o(` and `),Qo=r("a"),Pm=o("batch_encode_plus()"),qm=o(" methods (tokens, attention_masks, etc)."),Dm=d(),Bs=r("p"),Lm=o(`This class is derived from a python dictionary and can be used as a dictionary. In addition, this class exposes utility methods to map from word/character space to token space.`),Fm=d(),j=r("div"),h(io.$$.fragment),Im=d(),Ws=r("p"),Nm=o(`Get the index of the token in the encoded output comprising a character in the original string for a sequence of the batch.`),Am=d(),Rs=r("p"),Cm=o("Can be called as:"),Om=d(),co=r("ul"),Zo=r("li"),Us=r("code"),Sm=o("self.char_to_token(char_index)"),Bm=o(" if batch size is 1"),Wm=d(),en=r("li"),Vs=r("code"),Rm=o("self.char_to_token(batch_index, char_index)"),Um=o(" if batch size is greater or equal to 1"),Vm=d(),Gs=r("p"),Gm=o(`This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.`),jm=d(),H=r("div"),h(lo.$$.fragment),Hm=d(),js=r("p"),Mm=o(`Get the word in the original string corresponding to a character in the original string of a sequence of the batch.`),Xm=d(),Hs=r("p"),Ym=o("Can be called as:"),Jm=d(),ho=r("ul"),tn=r("li"),Ms=r("code"),Km=o("self.char_to_word(char_index)"),Qm=o(" if batch size is 1"),Zm=d(),on=r("li"),Xs=r("code"),ef=o("self.char_to_word(batch_index, char_index)"),tf=o(" if batch size is greater than 1"),of=d(),Ys=r("p"),nf=o(`This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.`),rf=d(),ft=r("div"),h(po.$$.fragment),sf=d(),Js=r("p"),af=o("Convert the inner content to tensors."),df=d(),$e=r("div"),h(mo.$$.fragment),cf=d(),Ks=r("p"),lf=o("Return a list mapping the tokens to the id of their original sentences:"),hf=d(),Se=r("ul"),nn=r("li"),Qs=r("code"),pf=o("None"),mf=o(" for special tokens added around or between sequences,"),ff=d(),rn=r("li"),Zs=r("code"),uf=o("0"),_f=o(" for tokens corresponding to words in the first sequence,"),gf=d(),sn=r("li"),ea=r("code"),kf=o("1"),bf=o(` for tokens corresponding to words in the second sequence when a pair of sequences was jointly encoded.`),vf=d(),ut=r("div"),h(fo.$$.fragment),Tf=d(),uo=r("p"),yf=o("Send all values to device by calling "),ta=r("code"),wf=o("v.to(device)"),xf=o(" (PyTorch only)."),zf=d(),O=r("div"),h(_o.$$.fragment),Ef=d(),oa=r("p"),$f=o("Get the character span corresponding to an encoded token in a sequence of the batch."),Pf=d(),go=r("p"),qf=o("Character spans are returned as a "),an=r("a"),Df=o("CharSpan"),Lf=o(" with:"),Ff=d(),ko=r("ul"),dn=r("li"),na=r("strong"),If=o("start"),Nf=o(" \u2014 Index of the first character in the original string associated to the token."),Af=d(),cn=r("li"),ra=r("strong"),Cf=o("end"),Of=o(` \u2014 Index of the character following the last character in the original string associated to the token.`),Sf=d(),sa=r("p"),Bf=o("Can be called as:"),Wf=d(),bo=r("ul"),ln=r("li"),aa=r("code"),Rf=o("self.token_to_chars(token_index)"),Uf=o(" if batch size is 1"),Vf=d(),hn=r("li"),ia=r("code"),Gf=o("self.token_to_chars(batch_index, token_index)"),jf=o(" if batch size is greater or equal to 1"),Hf=d(),M=r("div"),h(vo.$$.fragment),Mf=d(),Be=r("p"),Xf=o("Get the index of the sequence represented by the given token. In the general use case, this method returns "),da=r("code"),Yf=o("0"),Jf=o(` for a single sequence or the first sequence of a pair, and `),ca=r("code"),Kf=o("1"),Qf=o(" for the second sequence of a pair"),Zf=d(),la=r("p"),eu=o("Can be called as:"),tu=d(),To=r("ul"),pn=r("li"),ha=r("code"),ou=o("self.token_to_sequence(token_index)"),nu=o(" if batch size is 1"),ru=d(),mn=r("li"),pa=r("code"),su=o("self.token_to_sequence(batch_index, token_index)"),au=o(" if batch size is greater than 1"),iu=d(),ma=r("p"),du=o(`This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e., words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.`),cu=d(),X=r("div"),h(yo.$$.fragment),lu=d(),fa=r("p"),hu=o("Get the index of the word corresponding (i.e. comprising) to an encoded token in a sequence of the batch."),pu=d(),ua=r("p"),mu=o("Can be called as:"),fu=d(),wo=r("ul"),fn=r("li"),_a=r("code"),uu=o("self.token_to_word(token_index)"),_u=o(" if batch size is 1"),gu=d(),un=r("li"),ga=r("code"),ku=o("self.token_to_word(batch_index, token_index)"),bu=o(" if batch size is greater than 1"),vu=d(),ka=r("p"),Tu=o(`This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e., words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.`),yu=d(),_t=r("div"),h(xo.$$.fragment),wu=d(),ba=r("p"),xu=o(`Return the list of tokens (sub-parts of the input strings after word/subword splitting and before conversion to integer indices) at a given batch index (only works for the output of a fast tokenizer).`),zu=d(),gt=r("div"),h(zo.$$.fragment),Eu=d(),va=r("p"),$u=o("Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer."),Pu=d(),S=r("div"),h(Eo.$$.fragment),qu=d(),Ta=r("p"),Du=o("Get the character span in the original string corresponding to given word in a sequence of the batch."),Lu=d(),ya=r("p"),Fu=o("Character spans are returned as a CharSpan NamedTuple with:"),Iu=d(),$o=r("ul"),wa=r("li"),Nu=o("start: index of the first character in the original string"),Au=d(),xa=r("li"),Cu=o("end: index of the character following the last character in the original string"),Ou=d(),za=r("p"),Su=o("Can be called as:"),Bu=d(),Po=r("ul"),_n=r("li"),Ea=r("code"),Wu=o("self.word_to_chars(word_index)"),Ru=o(" if batch size is 1"),Uu=d(),gn=r("li"),$a=r("code"),Vu=o("self.word_to_chars(batch_index, word_index)"),Gu=o(" if batch size is greater or equal to 1"),ju=d(),F=r("div"),h(qo.$$.fragment),Hu=d(),Pa=r("p"),Mu=o("Get the encoded token span corresponding to a word in a sequence of the batch."),Xu=d(),Do=r("p"),Yu=o("Token spans are returned as a "),kn=r("a"),Ju=o("TokenSpan"),Ku=o(" with:"),Qu=d(),Lo=r("ul"),bn=r("li"),qa=r("strong"),Zu=o("start"),e_=o(" \u2014 Index of the first token."),t_=d(),vn=r("li"),Da=r("strong"),o_=o("end"),n_=o(" \u2014 Index of the token following the last token."),r_=d(),La=r("p"),s_=o("Can be called as:"),a_=d(),Fo=r("ul"),Tn=r("li"),Fa=r("code"),i_=o("self.word_to_tokens(word_index, sequence_index: int = 0)"),d_=o(" if batch size is 1"),c_=d(),yn=r("li"),Ia=r("code"),l_=o("self.word_to_tokens(batch_index, word_index, sequence_index: int = 0)"),h_=o(` if batch size is greater or equal to 1`),p_=d(),Na=r("p"),m_=o(`This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.`),f_=d(),kt=r("div"),h(Io.$$.fragment),u_=d(),Aa=r("p"),__=o("Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer."),this.h()},l(i){const b=pv('[data-svelte="svelte-1phssyn"]',document.head);y=s(b,"META",{name:!0,content:!0}),b.forEach(t),L=c(i),q=s(i,"H1",{class:!0});var No=a(q);E=s(No,"A",{id:!0,class:!0,href:!0});var Ca=a(E);B=s(Ca,"SPAN",{});var Oa=a(B);p(v.$$.fragment,Oa),Oa.forEach(t),Ca.forEach(t),I=c(No),Bn=s(No,"SPAN",{});var Sa=a(Bn);ji=n(Sa,"Tokenizer"),Sa.forEach(t),No.forEach(t),Ua=c(i),He=s(i,"P",{});var oi=a(He);Hi=n(oi,`A tokenizer is in charge of preparing the inputs for a model. The library contains tokenizers for all the models. Most of the tokenizers are available in two flavors: a full python implementation and a \u201CFast\u201D implementation based on the Rust library `),Tt=s(oi,"A",{href:!0,rel:!0});var B_=a(Tt);Mi=n(B_,"\u{1F917} Tokenizers"),B_.forEach(t),Xi=n(oi,". The \u201CFast\u201D implementations allows:"),oi.forEach(t),Va=c(i),Me=s(i,"OL",{});var ni=a(Me);Wn=s(ni,"LI",{});var W_=a(Wn);Yi=n(W_,"a significant speed-up in particular when doing batched tokenization and"),W_.forEach(t),Ji=c(ni),Rn=s(ni,"LI",{});var R_=a(Rn);Ki=n(R_,`additional methods to map between the original string (character and words) and the token space (e.g. getting the index of the token comprising a given character or the span of characters corresponding to a given token).`),R_.forEach(t),ni.forEach(t),Ga=c(i),W=s(i,"P",{});var Pe=a(W);Qi=n(Pe,"The base classes "),Oo=s(Pe,"A",{href:!0});var U_=a(Oo);Zi=n(U_,"PreTrainedTokenizer"),U_.forEach(t),ed=n(Pe," and "),So=s(Pe,"A",{href:!0});var V_=a(So);td=n(V_,"PreTrainedTokenizerFast"),V_.forEach(t),od=n(Pe,` implement the common methods for encoding string inputs in model inputs (see below) and instantiating/saving python and \u201CFast\u201D tokenizers either from a local file or directory or from a pretrained tokenizer provided by the library (downloaded from HuggingFace\u2019s AWS S3 repository). They both rely on `),Bo=s(Pe,"A",{href:!0});var G_=a(Bo);nd=n(G_,"PreTrainedTokenizerBase"),G_.forEach(t),rd=n(Pe,` that contains the common methods, and `),Wo=s(Pe,"A",{href:!0});var j_=a(Wo);sd=n(j_,"SpecialTokensMixin"),j_.forEach(t),ad=n(Pe,"."),Pe.forEach(t),ja=c(i),Ie=s(i,"P",{});var Ba=a(Ie);Ro=s(Ba,"A",{href:!0});var H_=a(Ro);id=n(H_,"PreTrainedTokenizer"),H_.forEach(t),dd=n(Ba," and "),Uo=s(Ba,"A",{href:!0});var M_=a(Uo);cd=n(M_,"PreTrainedTokenizerFast"),M_.forEach(t),ld=n(Ba,` thus implement the main methods for using all the tokenizers:`),Ba.forEach(t),Ha=c(i),me=s(i,"UL",{});var wn=a(me);Un=s(wn,"LI",{});var X_=a(Un);hd=n(X_,`Tokenizing (splitting strings in sub-word token strings), converting tokens strings to ids and back, and encoding/decoding (i.e., tokenizing and converting to integers).`),X_.forEach(t),pd=c(wn),Vn=s(wn,"LI",{});var Y_=a(Vn);md=n(Y_,"Adding new tokens to the vocabulary in a way that is independent of the underlying structure (BPE, SentencePiece\u2026)."),Y_.forEach(t),fd=c(wn),Gn=s(wn,"LI",{});var J_=a(Gn);ud=n(J_,`Managing special tokens (like mask, beginning-of-sentence, etc.): adding them, assigning them to attributes in the tokenizer for easy access and making sure they are not split during tokenization.`),J_.forEach(t),wn.forEach(t),Ma=c(i),D=s(i,"P",{});var C=a(D);Vo=s(C,"A",{href:!0});var K_=a(Vo);_d=n(K_,"BatchEncoding"),K_.forEach(t),gd=n(C,` holds the output of the `),Go=s(C,"A",{href:!0});var Q_=a(Go);kd=n(Q_,"PreTrainedTokenizerBase"),Q_.forEach(t),bd=n(C,"\u2019s encoding methods ("),jn=s(C,"CODE",{});var Z_=a(jn);vd=n(Z_,"__call__"),Z_.forEach(t),Td=n(C,`, `),Hn=s(C,"CODE",{});var eg=a(Hn);yd=n(eg,"encode_plus"),eg.forEach(t),wd=n(C," and "),Mn=s(C,"CODE",{});var tg=a(Mn);xd=n(tg,"batch_encode_plus"),tg.forEach(t),zd=n(C,`) and is derived from a Python dictionary. When the tokenizer is a pure python tokenizer, this class behaves just like a standard python dictionary and holds the various model inputs computed by these methods (`),Xn=s(C,"CODE",{});var og=a(Xn);Ed=n(og,"input_ids"),og.forEach(t),$d=n(C,", "),Yn=s(C,"CODE",{});var ng=a(Yn);Pd=n(ng,"attention_mask"),ng.forEach(t),qd=n(C,`\u2026). When the tokenizer is a \u201CFast\u201D tokenizer (i.e., backed by HuggingFace `),yt=s(C,"A",{href:!0,rel:!0});var rg=a(yt);Dd=n(rg,"tokenizers library"),rg.forEach(t),Ld=n(C,`), this class provides in addition several advanced alignment methods which can be used to map between the original string (character and words) and the token space (e.g., getting the index of the token comprising a given character or the span of characters corresponding to a given token).`),C.forEach(t),Xa=c(i),Ne=s(i,"H2",{class:!0});var ri=a(Ne);Xe=s(ri,"A",{id:!0,class:!0,href:!0});var sg=a(Xe);Jn=s(sg,"SPAN",{});var ag=a(Jn);p(wt.$$.fragment,ag),ag.forEach(t),sg.forEach(t),Fd=c(ri),Kn=s(ri,"SPAN",{});var ig=a(Kn);Id=n(ig,"PreTrainedTokenizer"),ig.forEach(t),ri.forEach(t),Ya=c(i),g=s(i,"DIV",{class:!0});var x=a(g);p(xt.$$.fragment,x),Nd=c(x),Qn=s(x,"P",{});var dg=a(Qn);Ad=n(dg,"Base class for all slow tokenizers."),dg.forEach(t),Cd=c(x),zt=s(x,"P",{});var si=a(zt);Od=n(si,"Inherits from "),jo=s(si,"A",{href:!0});var cg=a(jo);Sd=n(cg,"PreTrainedTokenizerBase"),cg.forEach(t),Bd=n(si,"."),si.forEach(t),Wd=c(x),Zn=s(x,"P",{});var lg=a(Zn);Rd=n(lg,`Handle all the shared methods for tokenization and special tokens as well as methods downloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary.`),lg.forEach(t),Ud=c(x),er=s(x,"P",{});var hg=a(er);Vd=n(hg,`This class also contain the added tokens in a unified way on top of all tokenizers so we don\u2019t have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece\u2026).`),hg.forEach(t),Gd=c(x),tr=s(x,"P",{});var pg=a(tr);jd=n(pg,"Class attributes (overridden by derived classes)"),pg.forEach(t),Hd=c(x),N=s(x,"UL",{});var Y=a(N);fe=s(Y,"LI",{});var Ao=a(fe);or=s(Ao,"STRONG",{});var mg=a(or);Md=n(mg,"vocab_files_names"),mg.forEach(t),Xd=n(Ao," ("),nr=s(Ao,"CODE",{});var fg=a(nr);Yd=n(fg,"Dict[str, str]"),fg.forEach(t),Jd=n(Ao,") \u2014 A dictionary with, as keys, the "),rr=s(Ao,"CODE",{});var ug=a(rr);Kd=n(ug,"__init__"),ug.forEach(t),Qd=n(Ao,` keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string).`),Ao.forEach(t),Zd=c(Y),R=s(Y,"LI",{});var de=a(R);sr=s(de,"STRONG",{});var _g=a(sr);ec=n(_g,"pretrained_vocab_files_map"),_g.forEach(t),tc=n(de," ("),ar=s(de,"CODE",{});var gg=a(ar);oc=n(gg,"Dict[str, Dict[str, str]]"),gg.forEach(t),nc=n(de,`) \u2014 A dictionary of dictionaries, with the high-level keys being the `),ir=s(de,"CODE",{});var kg=a(ir);rc=n(kg,"__init__"),kg.forEach(t),sc=n(de,` keyword name of each vocabulary file required by the model, the low-level being the `),dr=s(de,"CODE",{});var bg=a(dr);ac=n(bg,"short-cut-names"),bg.forEach(t),ic=n(de," of the pretrained models with, as associated values, the "),cr=s(de,"CODE",{});var vg=a(cr);dc=n(vg,"url"),vg.forEach(t),cc=n(de,` to the associated pretrained vocabulary file.`),de.forEach(t),lc=c(Y),Q=s(Y,"LI",{});var We=a(Q);lr=s(We,"STRONG",{});var Tg=a(lr);hc=n(Tg,"max_model_input_sizes"),Tg.forEach(t),pc=n(We," ("),hr=s(We,"CODE",{});var yg=a(hr);mc=n(yg,"Dict[str, Optional[int]]"),yg.forEach(t),fc=n(We,") \u2014 A dictionary with, as keys, the "),pr=s(We,"CODE",{});var wg=a(pr);uc=n(wg,"short-cut-names"),wg.forEach(t),_c=n(We,` of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or `),mr=s(We,"CODE",{});var xg=a(mr);gc=n(xg,"None"),xg.forEach(t),kc=n(We," if the model has no maximum input size."),We.forEach(t),bc=c(Y),U=s(Y,"LI",{});var ce=a(U);fr=s(ce,"STRONG",{});var zg=a(fr);vc=n(zg,"pretrained_init_configuration"),zg.forEach(t),Tc=n(ce," ("),ur=s(ce,"CODE",{});var Eg=a(ur);yc=n(Eg,"Dict[str, Dict[str, Any]]"),Eg.forEach(t),wc=n(ce,`) \u2014 A dictionary with, as keys, the `),_r=s(ce,"CODE",{});var $g=a(_r);xc=n($g,"short-cut-names"),$g.forEach(t),zc=n(ce,` of the pretrained models, and as associated values, a dictionary of specific arguments to pass to the `),gr=s(ce,"CODE",{});var Pg=a(gr);Ec=n(Pg,"__init__"),Pg.forEach(t),$c=n(ce,` method of the tokenizer class for this pretrained model when loading the tokenizer with the `),Ho=s(ce,"A",{href:!0});var qg=a(Ho);Pc=n(qg,"from_pretrained()"),qg.forEach(t),qc=n(ce," method."),ce.forEach(t),Dc=c(Y),Ye=s(Y,"LI",{});var Wa=a(Ye);kr=s(Wa,"STRONG",{});var Dg=a(kr);Lc=n(Dg,"model_input_names"),Dg.forEach(t),Fc=n(Wa," ("),br=s(Wa,"CODE",{});var Lg=a(br);Ic=n(Lg,"List[str]"),Lg.forEach(t),Nc=n(Wa,") \u2014 A list of inputs expected in the forward pass of the model."),Wa.forEach(t),Ac=c(Y),Z=s(Y,"LI",{});var Re=a(Z);vr=s(Re,"STRONG",{});var Fg=a(vr);Cc=n(Fg,"padding_side"),Fg.forEach(t),Oc=n(Re," ("),Tr=s(Re,"CODE",{});var Ig=a(Tr);Sc=n(Ig,"str"),Ig.forEach(t),Bc=n(Re,`) \u2014 The default value for the side on which the model should have padding applied. Should be `),yr=s(Re,"CODE",{});var Ng=a(yr);Wc=n(Ng,"'right'"),Ng.forEach(t),Rc=n(Re," or "),wr=s(Re,"CODE",{});var Ag=a(wr);Uc=n(Ag,"'left'"),Ag.forEach(t),Vc=n(Re,"."),Re.forEach(t),Gc=c(Y),ee=s(Y,"LI",{});var Ue=a(ee);xr=s(Ue,"STRONG",{});var Cg=a(xr);jc=n(Cg,"truncation_side"),Cg.forEach(t),Hc=n(Ue," ("),zr=s(Ue,"CODE",{});var Og=a(zr);Mc=n(Og,"str"),Og.forEach(t),Xc=n(Ue,`) \u2014 The default value for the side on which the model should have truncation applied. Should be `),Er=s(Ue,"CODE",{});var Sg=a(Er);Yc=n(Sg,"'right'"),Sg.forEach(t),Jc=n(Ue," or "),$r=s(Ue,"CODE",{});var Bg=a($r);Kc=n(Bg,"'left'"),Bg.forEach(t),Qc=n(Ue,"."),Ue.forEach(t),Y.forEach(t),Zc=c(x),Je=s(x,"DIV",{class:!0});var ai=a(Je);p(Et.$$.fragment,ai),el=c(ai),Pr=s(ai,"P",{});var Wg=a(Pr);tl=n(Wg,`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences.`),Wg.forEach(t),ai.forEach(t),ol=c(x),Ke=s(x,"DIV",{class:!0});var ii=a(Ke);p($t.$$.fragment,ii),nl=c(ii),qr=s(ii,"P",{});var Rg=a(qr);rl=n(Rg,"Convert a list of lists of token ids into a list of strings by calling decode."),Rg.forEach(t),ii.forEach(t),sl=c(x),ue=s(x,"DIV",{class:!0});var xn=a(ue);p(Pt.$$.fragment,xn),al=c(xn),Dr=s(xn,"P",{});var Ug=a(Dr);il=n(Ug,`Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces.`),Ug.forEach(t),dl=c(xn),qt=s(xn,"P",{});var di=a(qt);cl=n(di,"Similar to doing "),Lr=s(di,"CODE",{});var Vg=a(Lr);ll=n(Vg,"self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))"),Vg.forEach(t),hl=n(di,"."),di.forEach(t),xn.forEach(t),pl=c(x),_e=s(x,"DIV",{class:!0});var zn=a(_e);p(Dt.$$.fragment,zn),ml=c(zn),Fr=s(zn,"P",{});var Gg=a(Fr);fl=n(Gg,"Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary."),Gg.forEach(t),ul=c(zn),Lt=s(zn,"P",{});var ci=a(Lt);_l=n(ci,"Same as doing "),Ir=s(ci,"CODE",{});var jg=a(Ir);gl=n(jg,"self.convert_tokens_to_ids(self.tokenize(text))"),jg.forEach(t),kl=n(ci,"."),ci.forEach(t),zn.forEach(t),bl=c(x),ge=s(x,"DIV",{class:!0});var En=a(ge);p(Ft.$$.fragment,En),vl=c(En),It=s(En,"P",{});var li=a(It);Tl=n(li,`Upload the tokenizer files to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),Nr=s(li,"CODE",{});var Hg=a(Nr);yl=n(Hg,"repo_path_or_name"),Hg.forEach(t),wl=n(li,"."),li.forEach(t),xl=c(En),p(Qe.$$.fragment,En),En.forEach(t),zl=c(x),Ze=s(x,"DIV",{class:!0});var hi=a(Ze);p(Nt.$$.fragment,hi),El=c(hi),Ar=s(hi,"P",{});var Mg=a(Ar);$l=n(Mg,`Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and added tokens.`),Mg.forEach(t),hi.forEach(t),Pl=c(x),et=s(x,"DIV",{class:!0});var pi=a(et);p(At.$$.fragment,pi),ql=c(pi),Cr=s(pi,"P",{});var Xg=a(Cr);Dl=n(Xg,`Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the vocabulary.`),Xg.forEach(t),pi.forEach(t),Ll=c(x),tt=s(x,"DIV",{class:!0});var mi=a(tt);p(Ct.$$.fragment,mi),Fl=c(mi),Or=s(mi,"P",{});var Yg=a(Or);Il=n(Yg,"Returns the added tokens in the vocabulary as a dictionary of token to index."),Yg.forEach(t),mi.forEach(t),Nl=c(x),ke=s(x,"DIV",{class:!0});var $n=a(ke);p(Ot.$$.fragment,$n),Al=c($n),Sr=s($n,"P",{});var Jg=a(Sr);Cl=n(Jg,"Returns the number of added tokens when encoding a sequence with special tokens."),Jg.forEach(t),Ol=c($n),p(ot.$$.fragment,$n),$n.forEach(t),Sl=c(x),be=s(x,"DIV",{class:!0});var Pn=a(be);p(St.$$.fragment,Pn),Bl=c(Pn),Br=s(Pn,"P",{});var Kg=a(Br);Wl=n(Kg,"Performs any necessary transformations before tokenization."),Kg.forEach(t),Rl=c(Pn),Ae=s(Pn,"P",{});var qn=a(Ae);Ul=n(qn,"This method should pop the arguments from kwargs and return the remaining "),Wr=s(qn,"CODE",{});var Qg=a(Wr);Vl=n(Qg,"kwargs"),Qg.forEach(t),Gl=n(qn,` as well. We test the `),Rr=s(qn,"CODE",{});var Zg=a(Rr);jl=n(Zg,"kwargs"),Zg.forEach(t),Hl=n(qn," at the end of the encoding process to be sure all the arguments have been used."),qn.forEach(t),Pn.forEach(t),Ml=c(x),ve=s(x,"DIV",{class:!0});var Dn=a(ve);p(Bt.$$.fragment,Dn),Xl=c(Dn),Ur=s(Dn,"P",{});var ek=a(Ur);Yl=n(ek,"Converts a string in a sequence of tokens, using the tokenizer."),ek.forEach(t),Jl=c(Dn),Vr=s(Dn,"P",{});var tk=a(Vr);Kl=n(tk,`Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces). Takes care of added tokens.`),tk.forEach(t),Dn.forEach(t),x.forEach(t),Ja=c(i),Ce=s(i,"H2",{class:!0});var fi=a(Ce);nt=s(fi,"A",{id:!0,class:!0,href:!0});var ok=a(nt);Gr=s(ok,"SPAN",{});var nk=a(Gr);p(Wt.$$.fragment,nk),nk.forEach(t),ok.forEach(t),Ql=c(fi),jr=s(fi,"SPAN",{});var rk=a(jr);Zl=n(rk,"PreTrainedTokenizerFast"),rk.forEach(t),fi.forEach(t),Ka=c(i),te=s(i,"P",{});var bt=a(te);eh=n(bt,"The "),Mo=s(bt,"A",{href:!0});var sk=a(Mo);th=n(sk,"PreTrainedTokenizerFast"),sk.forEach(t),oh=n(bt," depend on the "),Rt=s(bt,"A",{href:!0,rel:!0});var ak=a(Rt);nh=n(ak,"tokenizers"),ak.forEach(t),rh=n(bt,` library. The tokenizers obtained from the \u{1F917} tokenizers library can be loaded very simply into \u{1F917} transformers. Take a look at the `),Xo=s(bt,"A",{href:!0});var ik=a(Xo);sh=n(ik,"Using tokenizers from \u{1F917} tokenizers"),ik.forEach(t),ah=n(bt," page to understand how this is done."),bt.forEach(t),Qa=c(i),k=s(i,"DIV",{class:!0});var z=a(k);p(Ut.$$.fragment,z),ih=c(z),Hr=s(z,"P",{});var dk=a(Hr);dh=n(dk,"Base class for all fast tokenizers (wrapping HuggingFace tokenizers library)."),dk.forEach(t),ch=c(z),Vt=s(z,"P",{});var ui=a(Vt);lh=n(ui,"Inherits from "),Yo=s(ui,"A",{href:!0});var ck=a(Yo);hh=n(ck,"PreTrainedTokenizerBase"),ck.forEach(t),ph=n(ui,"."),ui.forEach(t),mh=c(z),Mr=s(z,"P",{});var lk=a(Mr);fh=n(lk,`Handles all the shared methods for tokenization and special tokens, as well as methods for downloading/caching/loading pretrained tokenizers, as well as adding tokens to the vocabulary.`),lk.forEach(t),uh=c(z),Xr=s(z,"P",{});var hk=a(Xr);_h=n(hk,`This class also contains the added tokens in a unified way on top of all tokenizers so we don\u2019t have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece\u2026).`),hk.forEach(t),gh=c(z),Yr=s(z,"P",{});var pk=a(Yr);kh=n(pk,"Class attributes (overridden by derived classes)"),pk.forEach(t),bh=c(z),A=s(z,"UL",{});var J=a(A);Te=s(J,"LI",{});var Co=a(Te);Jr=s(Co,"STRONG",{});var mk=a(Jr);vh=n(mk,"vocab_files_names"),mk.forEach(t),Th=n(Co," ("),Kr=s(Co,"CODE",{});var fk=a(Kr);yh=n(fk,"Dict[str, str]"),fk.forEach(t),wh=n(Co,") \u2014 A dictionary with, as keys, the "),Qr=s(Co,"CODE",{});var uk=a(Qr);xh=n(uk,"__init__"),uk.forEach(t),zh=n(Co,` keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string).`),Co.forEach(t),Eh=c(J),V=s(J,"LI",{});var le=a(V);Zr=s(le,"STRONG",{});var _k=a(Zr);$h=n(_k,"pretrained_vocab_files_map"),_k.forEach(t),Ph=n(le," ("),es=s(le,"CODE",{});var gk=a(es);qh=n(gk,"Dict[str, Dict[str, str]]"),gk.forEach(t),Dh=n(le,`) \u2014 A dictionary of dictionaries, with the high-level keys being the `),ts=s(le,"CODE",{});var kk=a(ts);Lh=n(kk,"__init__"),kk.forEach(t),Fh=n(le,` keyword name of each vocabulary file required by the model, the low-level being the `),os=s(le,"CODE",{});var bk=a(os);Ih=n(bk,"short-cut-names"),bk.forEach(t),Nh=n(le," of the pretrained models with, as associated values, the "),ns=s(le,"CODE",{});var vk=a(ns);Ah=n(vk,"url"),vk.forEach(t),Ch=n(le,` to the associated pretrained vocabulary file.`),le.forEach(t),Oh=c(J),oe=s(J,"LI",{});var Ve=a(oe);rs=s(Ve,"STRONG",{});var Tk=a(rs);Sh=n(Tk,"max_model_input_sizes"),Tk.forEach(t),Bh=n(Ve," ("),ss=s(Ve,"CODE",{});var yk=a(ss);Wh=n(yk,"Dict[str, Optional[int]]"),yk.forEach(t),Rh=n(Ve,") \u2014 A dictionary with, as keys, the "),as=s(Ve,"CODE",{});var wk=a(as);Uh=n(wk,"short-cut-names"),wk.forEach(t),Vh=n(Ve,` of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or `),is=s(Ve,"CODE",{});var xk=a(is);Gh=n(xk,"None"),xk.forEach(t),jh=n(Ve," if the model has no maximum input size."),Ve.forEach(t),Hh=c(J),G=s(J,"LI",{});var he=a(G);ds=s(he,"STRONG",{});var zk=a(ds);Mh=n(zk,"pretrained_init_configuration"),zk.forEach(t),Xh=n(he," ("),cs=s(he,"CODE",{});var Ek=a(cs);Yh=n(Ek,"Dict[str, Dict[str, Any]]"),Ek.forEach(t),Jh=n(he,`) \u2014 A dictionary with, as keys, the `),ls=s(he,"CODE",{});var $k=a(ls);Kh=n($k,"short-cut-names"),$k.forEach(t),Qh=n(he,` of the pretrained models, and as associated values, a dictionary of specific arguments to pass to the `),hs=s(he,"CODE",{});var Pk=a(hs);Zh=n(Pk,"__init__"),Pk.forEach(t),ep=n(he,` method of the tokenizer class for this pretrained model when loading the tokenizer with the `),Jo=s(he,"A",{href:!0});var qk=a(Jo);tp=n(qk,"from_pretrained()"),qk.forEach(t),op=n(he," method."),he.forEach(t),np=c(J),rt=s(J,"LI",{});var Ra=a(rt);ps=s(Ra,"STRONG",{});var Dk=a(ps);rp=n(Dk,"model_input_names"),Dk.forEach(t),sp=n(Ra," ("),ms=s(Ra,"CODE",{});var Lk=a(ms);ap=n(Lk,"List[str]"),Lk.forEach(t),ip=n(Ra,") \u2014 A list of inputs expected in the forward pass of the model."),Ra.forEach(t),dp=c(J),ne=s(J,"LI",{});var Ge=a(ne);fs=s(Ge,"STRONG",{});var Fk=a(fs);cp=n(Fk,"padding_side"),Fk.forEach(t),lp=n(Ge," ("),us=s(Ge,"CODE",{});var Ik=a(us);hp=n(Ik,"str"),Ik.forEach(t),pp=n(Ge,`) \u2014 The default value for the side on which the model should have padding applied. Should be `),_s=s(Ge,"CODE",{});var Nk=a(_s);mp=n(Nk,"'right'"),Nk.forEach(t),fp=n(Ge," or "),gs=s(Ge,"CODE",{});var Ak=a(gs);up=n(Ak,"'left'"),Ak.forEach(t),_p=n(Ge,"."),Ge.forEach(t),gp=c(J),re=s(J,"LI",{});var je=a(re);ks=s(je,"STRONG",{});var Ck=a(ks);kp=n(Ck,"truncation_side"),Ck.forEach(t),bp=n(je," ("),bs=s(je,"CODE",{});var Ok=a(bs);vp=n(Ok,"str"),Ok.forEach(t),Tp=n(je,`) \u2014 The default value for the side on which the model should have truncation applied. Should be `),vs=s(je,"CODE",{});var Sk=a(vs);yp=n(Sk,"'right'"),Sk.forEach(t),wp=n(je," or "),Ts=s(je,"CODE",{});var Bk=a(Ts);xp=n(Bk,"'left'"),Bk.forEach(t),zp=n(je,"."),je.forEach(t),J.forEach(t),Ep=c(z),st=s(z,"DIV",{class:!0});var _i=a(st);p(Gt.$$.fragment,_i),$p=c(_i),ys=s(_i,"P",{});var Wk=a(ys);Pp=n(Wk,`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences.`),Wk.forEach(t),_i.forEach(t),qp=c(z),at=s(z,"DIV",{class:!0});var gi=a(at);p(jt.$$.fragment,gi),Dp=c(gi),ws=s(gi,"P",{});var Rk=a(ws);Lp=n(Rk,"Convert a list of lists of token ids into a list of strings by calling decode."),Rk.forEach(t),gi.forEach(t),Fp=c(z),ye=s(z,"DIV",{class:!0});var Ln=a(ye);p(Ht.$$.fragment,Ln),Ip=c(Ln),xs=s(Ln,"P",{});var Uk=a(xs);Np=n(Uk,`Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces.`),Uk.forEach(t),Ap=c(Ln),Mt=s(Ln,"P",{});var ki=a(Mt);Cp=n(ki,"Similar to doing "),zs=s(ki,"CODE",{});var Vk=a(zs);Op=n(Vk,"self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))"),Vk.forEach(t),Sp=n(ki,"."),ki.forEach(t),Ln.forEach(t),Bp=c(z),we=s(z,"DIV",{class:!0});var Fn=a(we);p(Xt.$$.fragment,Fn),Wp=c(Fn),Es=s(Fn,"P",{});var Gk=a(Es);Rp=n(Gk,"Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary."),Gk.forEach(t),Up=c(Fn),Yt=s(Fn,"P",{});var bi=a(Yt);Vp=n(bi,"Same as doing "),$s=s(bi,"CODE",{});var jk=a($s);Gp=n(jk,"self.convert_tokens_to_ids(self.tokenize(text))"),jk.forEach(t),jp=n(bi,"."),bi.forEach(t),Fn.forEach(t),Hp=c(z),xe=s(z,"DIV",{class:!0});var In=a(xe);p(Jt.$$.fragment,In),Mp=c(In),Kt=s(In,"P",{});var vi=a(Kt);Xp=n(vi,`Upload the tokenizer files to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),Ps=s(vi,"CODE",{});var Hk=a(Ps);Yp=n(Hk,"repo_path_or_name"),Hk.forEach(t),Jp=n(vi,"."),vi.forEach(t),Kp=c(In),p(it.$$.fragment,In),In.forEach(t),Qp=c(z),dt=s(z,"DIV",{class:!0});var Ti=a(dt);p(Qt.$$.fragment,Ti),Zp=c(Ti),qs=s(Ti,"P",{});var Mk=a(qs);em=n(Mk,`Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and added tokens.`),Mk.forEach(t),Ti.forEach(t),tm=c(z),ct=s(z,"DIV",{class:!0});var yi=a(ct);p(Zt.$$.fragment,yi),om=c(yi),Ds=s(yi,"P",{});var Xk=a(Ds);nm=n(Xk,`Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the vocabulary.`),Xk.forEach(t),yi.forEach(t),rm=c(z),lt=s(z,"DIV",{class:!0});var wi=a(lt);p(eo.$$.fragment,wi),sm=c(wi),Ls=s(wi,"P",{});var Yk=a(Ls);am=n(Yk,"Returns the added tokens in the vocabulary as a dictionary of token to index."),Yk.forEach(t),wi.forEach(t),im=c(z),ze=s(z,"DIV",{class:!0});var Nn=a(ze);p(to.$$.fragment,Nn),dm=c(Nn),Fs=s(Nn,"P",{});var Jk=a(Fs);cm=n(Jk,"Returns the number of added tokens when encoding a sequence with special tokens."),Jk.forEach(t),lm=c(Nn),p(ht.$$.fragment,Nn),Nn.forEach(t),hm=c(z),Ee=s(z,"DIV",{class:!0});var An=a(Ee);p(oo.$$.fragment,An),pm=c(An),Is=s(An,"P",{});var Kk=a(Is);mm=n(Kk,`Define the truncation and the padding strategies for fast tokenizers (provided by HuggingFace tokenizers library) and restore the tokenizer settings afterwards.`),Kk.forEach(t),fm=c(An),Ns=s(An,"P",{});var Qk=a(Ns);um=n(Qk,`The provided tokenizer has no padding / truncation strategy before the managed section. If your tokenizer set a padding / truncation strategy before, then it will be reset to no padding / truncation when exiting the managed section.`),Qk.forEach(t),An.forEach(t),_m=c(z),pt=s(z,"DIV",{class:!0});var xi=a(pt);p(no.$$.fragment,xi),gm=c(xi),As=s(xi,"P",{});var Zk=a(As);km=n(Zk,`Trains a tokenizer on a new corpus with the same defaults (in terms of special tokens or tokenization pipeline) as the current one.`),Zk.forEach(t),xi.forEach(t),z.forEach(t),Za=c(i),Oe=s(i,"H2",{class:!0});var zi=a(Oe);mt=s(zi,"A",{id:!0,class:!0,href:!0});var eb=a(mt);Cs=s(eb,"SPAN",{});var tb=a(Cs);p(ro.$$.fragment,tb),tb.forEach(t),eb.forEach(t),bm=c(zi),Os=s(zi,"SPAN",{});var ob=a(Os);vm=n(ob,"BatchEncoding"),ob.forEach(t),zi.forEach(t),ei=c(i),w=s(i,"DIV",{class:!0});var P=a(w);p(so.$$.fragment,P),Tm=c(P),ie=s(P,"P",{});var vt=a(ie);ym=n(vt,"Holds the output of the "),ao=s(vt,"A",{href:!0});var g_=a(ao);Ss=s(g_,"STRONG",{});var nb=a(Ss);wm=n(nb,"call"),nb.forEach(t),xm=n(g_,"()"),g_.forEach(t),zm=n(vt,`, `),Ko=s(vt,"A",{href:!0});var rb=a(Ko);Em=n(rb,"encode_plus()"),rb.forEach(t),$m=n(vt,` and `),Qo=s(vt,"A",{href:!0});var sb=a(Qo);Pm=n(sb,"batch_encode_plus()"),sb.forEach(t),qm=n(vt," methods (tokens, attention_masks, etc)."),vt.forEach(t),Dm=c(P),Bs=s(P,"P",{});var ab=a(Bs);Lm=n(ab,`This class is derived from a python dictionary and can be used as a dictionary. In addition, this class exposes utility methods to map from word/character space to token space.`),ab.forEach(t),Fm=c(P),j=s(P,"DIV",{class:!0});var qe=a(j);p(io.$$.fragment,qe),Im=c(qe),Ws=s(qe,"P",{});var ib=a(Ws);Nm=n(ib,`Get the index of the token in the encoded output comprising a character in the original string for a sequence of the batch.`),ib.forEach(t),Am=c(qe),Rs=s(qe,"P",{});var db=a(Rs);Cm=n(db,"Can be called as:"),db.forEach(t),Om=c(qe),co=s(qe,"UL",{});var Ei=a(co);Zo=s(Ei,"LI",{});var k_=a(Zo);Us=s(k_,"CODE",{});var cb=a(Us);Sm=n(cb,"self.char_to_token(char_index)"),cb.forEach(t),Bm=n(k_," if batch size is 1"),k_.forEach(t),Wm=c(Ei),en=s(Ei,"LI",{});var b_=a(en);Vs=s(b_,"CODE",{});var lb=a(Vs);Rm=n(lb,"self.char_to_token(batch_index, char_index)"),lb.forEach(t),Um=n(b_," if batch size is greater or equal to 1"),b_.forEach(t),Ei.forEach(t),Vm=c(qe),Gs=s(qe,"P",{});var hb=a(Gs);Gm=n(hb,`This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.`),hb.forEach(t),qe.forEach(t),jm=c(P),H=s(P,"DIV",{class:!0});var De=a(H);p(lo.$$.fragment,De),Hm=c(De),js=s(De,"P",{});var pb=a(js);Mm=n(pb,`Get the word in the original string corresponding to a character in the original string of a sequence of the batch.`),pb.forEach(t),Xm=c(De),Hs=s(De,"P",{});var mb=a(Hs);Ym=n(mb,"Can be called as:"),mb.forEach(t),Jm=c(De),ho=s(De,"UL",{});var $i=a(ho);tn=s($i,"LI",{});var v_=a(tn);Ms=s(v_,"CODE",{});var fb=a(Ms);Km=n(fb,"self.char_to_word(char_index)"),fb.forEach(t),Qm=n(v_," if batch size is 1"),v_.forEach(t),Zm=c($i),on=s($i,"LI",{});var T_=a(on);Xs=s(T_,"CODE",{});var ub=a(Xs);ef=n(ub,"self.char_to_word(batch_index, char_index)"),ub.forEach(t),tf=n(T_," if batch size is greater than 1"),T_.forEach(t),$i.forEach(t),of=c(De),Ys=s(De,"P",{});var _b=a(Ys);nf=n(_b,`This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.`),_b.forEach(t),De.forEach(t),rf=c(P),ft=s(P,"DIV",{class:!0});var Pi=a(ft);p(po.$$.fragment,Pi),sf=c(Pi),Js=s(Pi,"P",{});var gb=a(Js);af=n(gb,"Convert the inner content to tensors."),gb.forEach(t),Pi.forEach(t),df=c(P),$e=s(P,"DIV",{class:!0});var Cn=a($e);p(mo.$$.fragment,Cn),cf=c(Cn),Ks=s(Cn,"P",{});var kb=a(Ks);lf=n(kb,"Return a list mapping the tokens to the id of their original sentences:"),kb.forEach(t),hf=c(Cn),Se=s(Cn,"UL",{});var On=a(Se);nn=s(On,"LI",{});var y_=a(nn);Qs=s(y_,"CODE",{});var bb=a(Qs);pf=n(bb,"None"),bb.forEach(t),mf=n(y_," for special tokens added around or between sequences,"),y_.forEach(t),ff=c(On),rn=s(On,"LI",{});var w_=a(rn);Zs=s(w_,"CODE",{});var vb=a(Zs);uf=n(vb,"0"),vb.forEach(t),_f=n(w_," for tokens corresponding to words in the first sequence,"),w_.forEach(t),gf=c(On),sn=s(On,"LI",{});var x_=a(sn);ea=s(x_,"CODE",{});var Tb=a(ea);kf=n(Tb,"1"),Tb.forEach(t),bf=n(x_,` for tokens corresponding to words in the second sequence when a pair of sequences was jointly encoded.`),x_.forEach(t),On.forEach(t),Cn.forEach(t),vf=c(P),ut=s(P,"DIV",{class:!0});var qi=a(ut);p(fo.$$.fragment,qi),Tf=c(qi),uo=s(qi,"P",{});var Di=a(uo);yf=n(Di,"Send all values to device by calling "),ta=s(Di,"CODE",{});var yb=a(ta);wf=n(yb,"v.to(device)"),yb.forEach(t),xf=n(Di," (PyTorch only)."),Di.forEach(t),qi.forEach(t),zf=c(P),O=s(P,"DIV",{class:!0});var se=a(O);p(_o.$$.fragment,se),Ef=c(se),oa=s(se,"P",{});var wb=a(oa);$f=n(wb,"Get the character span corresponding to an encoded token in a sequence of the batch."),wb.forEach(t),Pf=c(se),go=s(se,"P",{});var Li=a(go);qf=n(Li,"Character spans are returned as a "),an=s(Li,"A",{href:!0});var xb=a(an);Df=n(xb,"CharSpan"),xb.forEach(t),Lf=n(Li," with:"),Li.forEach(t),Ff=c(se),ko=s(se,"UL",{});var Fi=a(ko);dn=s(Fi,"LI",{});var z_=a(dn);na=s(z_,"STRONG",{});var zb=a(na);If=n(zb,"start"),zb.forEach(t),Nf=n(z_," \u2014 Index of the first character in the original string associated to the token."),z_.forEach(t),Af=c(Fi),cn=s(Fi,"LI",{});var E_=a(cn);ra=s(E_,"STRONG",{});var Eb=a(ra);Cf=n(Eb,"end"),Eb.forEach(t),Of=n(E_,` \u2014 Index of the character following the last character in the original string associated to the token.`),E_.forEach(t),Fi.forEach(t),Sf=c(se),sa=s(se,"P",{});var $b=a(sa);Bf=n($b,"Can be called as:"),$b.forEach(t),Wf=c(se),bo=s(se,"UL",{});var Ii=a(bo);ln=s(Ii,"LI",{});var $_=a(ln);aa=s($_,"CODE",{});var Pb=a(aa);Rf=n(Pb,"self.token_to_chars(token_index)"),Pb.forEach(t),Uf=n($_," if batch size is 1"),$_.forEach(t),Vf=c(Ii),hn=s(Ii,"LI",{});var P_=a(hn);ia=s(P_,"CODE",{});var qb=a(ia);Gf=n(qb,"self.token_to_chars(batch_index, token_index)"),qb.forEach(t),jf=n(P_," if batch size is greater or equal to 1"),P_.forEach(t),Ii.forEach(t),se.forEach(t),Hf=c(P),M=s(P,"DIV",{class:!0});var Le=a(M);p(vo.$$.fragment,Le),Mf=c(Le),Be=s(Le,"P",{});var Sn=a(Be);Xf=n(Sn,"Get the index of the sequence represented by the given token. In the general use case, this method returns "),da=s(Sn,"CODE",{});var Db=a(da);Yf=n(Db,"0"),Db.forEach(t),Jf=n(Sn,` for a single sequence or the first sequence of a pair, and `),ca=s(Sn,"CODE",{});var Lb=a(ca);Kf=n(Lb,"1"),Lb.forEach(t),Qf=n(Sn," for the second sequence of a pair"),Sn.forEach(t),Zf=c(Le),la=s(Le,"P",{});var Fb=a(la);eu=n(Fb,"Can be called as:"),Fb.forEach(t),tu=c(Le),To=s(Le,"UL",{});var Ni=a(To);pn=s(Ni,"LI",{});var q_=a(pn);ha=s(q_,"CODE",{});var Ib=a(ha);ou=n(Ib,"self.token_to_sequence(token_index)"),Ib.forEach(t),nu=n(q_," if batch size is 1"),q_.forEach(t),ru=c(Ni),mn=s(Ni,"LI",{});var D_=a(mn);pa=s(D_,"CODE",{});var Nb=a(pa);su=n(Nb,"self.token_to_sequence(batch_index, token_index)"),Nb.forEach(t),au=n(D_," if batch size is greater than 1"),D_.forEach(t),Ni.forEach(t),iu=c(Le),ma=s(Le,"P",{});var Ab=a(ma);du=n(Ab,`This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e., words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.`),Ab.forEach(t),Le.forEach(t),cu=c(P),X=s(P,"DIV",{class:!0});var Fe=a(X);p(yo.$$.fragment,Fe),lu=c(Fe),fa=s(Fe,"P",{});var Cb=a(fa);hu=n(Cb,"Get the index of the word corresponding (i.e. comprising) to an encoded token in a sequence of the batch."),Cb.forEach(t),pu=c(Fe),ua=s(Fe,"P",{});var Ob=a(ua);mu=n(Ob,"Can be called as:"),Ob.forEach(t),fu=c(Fe),wo=s(Fe,"UL",{});var Ai=a(wo);fn=s(Ai,"LI",{});var L_=a(fn);_a=s(L_,"CODE",{});var Sb=a(_a);uu=n(Sb,"self.token_to_word(token_index)"),Sb.forEach(t),_u=n(L_," if batch size is 1"),L_.forEach(t),gu=c(Ai),un=s(Ai,"LI",{});var F_=a(un);ga=s(F_,"CODE",{});var Bb=a(ga);ku=n(Bb,"self.token_to_word(batch_index, token_index)"),Bb.forEach(t),bu=n(F_," if batch size is greater than 1"),F_.forEach(t),Ai.forEach(t),vu=c(Fe),ka=s(Fe,"P",{});var Wb=a(ka);Tu=n(Wb,`This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e., words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.`),Wb.forEach(t),Fe.forEach(t),yu=c(P),_t=s(P,"DIV",{class:!0});var Ci=a(_t);p(xo.$$.fragment,Ci),wu=c(Ci),ba=s(Ci,"P",{});var Rb=a(ba);xu=n(Rb,`Return the list of tokens (sub-parts of the input strings after word/subword splitting and before conversion to integer indices) at a given batch index (only works for the output of a fast tokenizer).`),Rb.forEach(t),Ci.forEach(t),zu=c(P),gt=s(P,"DIV",{class:!0});var Oi=a(gt);p(zo.$$.fragment,Oi),Eu=c(Oi),va=s(Oi,"P",{});var Ub=a(va);$u=n(Ub,"Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer."),Ub.forEach(t),Oi.forEach(t),Pu=c(P),S=s(P,"DIV",{class:!0});var ae=a(S);p(Eo.$$.fragment,ae),qu=c(ae),Ta=s(ae,"P",{});var Vb=a(Ta);Du=n(Vb,"Get the character span in the original string corresponding to given word in a sequence of the batch."),Vb.forEach(t),Lu=c(ae),ya=s(ae,"P",{});var Gb=a(ya);Fu=n(Gb,"Character spans are returned as a CharSpan NamedTuple with:"),Gb.forEach(t),Iu=c(ae),$o=s(ae,"UL",{});var Si=a($o);wa=s(Si,"LI",{});var jb=a(wa);Nu=n(jb,"start: index of the first character in the original string"),jb.forEach(t),Au=c(Si),xa=s(Si,"LI",{});var Hb=a(xa);Cu=n(Hb,"end: index of the character following the last character in the original string"),Hb.forEach(t),Si.forEach(t),Ou=c(ae),za=s(ae,"P",{});var Mb=a(za);Su=n(Mb,"Can be called as:"),Mb.forEach(t),Bu=c(ae),Po=s(ae,"UL",{});var Bi=a(Po);_n=s(Bi,"LI",{});var I_=a(_n);Ea=s(I_,"CODE",{});var Xb=a(Ea);Wu=n(Xb,"self.word_to_chars(word_index)"),Xb.forEach(t),Ru=n(I_," if batch size is 1"),I_.forEach(t),Uu=c(Bi),gn=s(Bi,"LI",{});var N_=a(gn);$a=s(N_,"CODE",{});var Yb=a($a);Vu=n(Yb,"self.word_to_chars(batch_index, word_index)"),Yb.forEach(t),Gu=n(N_," if batch size is greater or equal to 1"),N_.forEach(t),Bi.forEach(t),ae.forEach(t),ju=c(P),F=s(P,"DIV",{class:!0});var K=a(F);p(qo.$$.fragment,K),Hu=c(K),Pa=s(K,"P",{});var Jb=a(Pa);Mu=n(Jb,"Get the encoded token span corresponding to a word in a sequence of the batch."),Jb.forEach(t),Xu=c(K),Do=s(K,"P",{});var Wi=a(Do);Yu=n(Wi,"Token spans are returned as a "),kn=s(Wi,"A",{href:!0});var Kb=a(kn);Ju=n(Kb,"TokenSpan"),Kb.forEach(t),Ku=n(Wi," with:"),Wi.forEach(t),Qu=c(K),Lo=s(K,"UL",{});var Ri=a(Lo);bn=s(Ri,"LI",{});var A_=a(bn);qa=s(A_,"STRONG",{});var Qb=a(qa);Zu=n(Qb,"start"),Qb.forEach(t),e_=n(A_," \u2014 Index of the first token."),A_.forEach(t),t_=c(Ri),vn=s(Ri,"LI",{});var C_=a(vn);Da=s(C_,"STRONG",{});var Zb=a(Da);o_=n(Zb,"end"),Zb.forEach(t),n_=n(C_," \u2014 Index of the token following the last token."),C_.forEach(t),Ri.forEach(t),r_=c(K),La=s(K,"P",{});var ev=a(La);s_=n(ev,"Can be called as:"),ev.forEach(t),a_=c(K),Fo=s(K,"UL",{});var Ui=a(Fo);Tn=s(Ui,"LI",{});var O_=a(Tn);Fa=s(O_,"CODE",{});var tv=a(Fa);i_=n(tv,"self.word_to_tokens(word_index, sequence_index: int = 0)"),tv.forEach(t),d_=n(O_," if batch size is 1"),O_.forEach(t),c_=c(Ui),yn=s(Ui,"LI",{});var S_=a(yn);Ia=s(S_,"CODE",{});var ov=a(Ia);l_=n(ov,"self.word_to_tokens(batch_index, word_index, sequence_index: int = 0)"),ov.forEach(t),h_=n(S_,` if batch size is greater or equal to 1`),S_.forEach(t),Ui.forEach(t),p_=c(K),Na=s(K,"P",{});var nv=a(Na);m_=n(nv,`This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.`),nv.forEach(t),K.forEach(t),f_=c(P),kt=s(P,"DIV",{class:!0});var Vi=a(kt);p(Io.$$.fragment,Vi),u_=c(Vi),Aa=s(Vi,"P",{});var rv=a(Aa);__=n(rv,"Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer."),rv.forEach(t),Vi.forEach(t),P.forEach(t),this.h()},h(){l(y,"name","hf:doc:metadata"),l(y,"content",JSON.stringify(bv)),l(E,"id","tokenizer"),l(E,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(E,"href","#tokenizer"),l(q,"class","relative group"),l(Tt,"href","https://github.com/huggingface/tokenizers"),l(Tt,"rel","nofollow"),l(Oo,"href","/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),l(So,"href","/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),l(Bo,"href","/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase"),l(Wo,"href","/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.SpecialTokensMixin"),l(Ro,"href","/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),l(Uo,"href","/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),l(Vo,"href","/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding"),l(Go,"href","/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase"),l(yt,"href","https://github.com/huggingface/tokenizers"),l(yt,"rel","nofollow"),l(Xe,"id","transformers.PreTrainedTokenizer"),l(Xe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Xe,"href","#transformers.PreTrainedTokenizer"),l(Ne,"class","relative group"),l(jo,"href","/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase"),l(Ho,"href","/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.from_pretrained"),l(Je,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(Ke,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(ue,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(_e,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(ge,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(Ze,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(et,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(tt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(ke,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(be,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(ve,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(g,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(nt,"id","transformers.PreTrainedTokenizerFast"),l(nt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(nt,"href","#transformers.PreTrainedTokenizerFast"),l(Ce,"class","relative group"),l(Mo,"href","/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),l(Rt,"href","https://huggingface.co/docs/tokenizers"),l(Rt,"rel","nofollow"),l(Xo,"href","../fast_tokenizers"),l(Yo,"href","/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase"),l(Jo,"href","/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.from_pretrained"),l(st,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(at,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(ye,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(we,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(xe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(dt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(ct,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(lt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(ze,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(Ee,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(pt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(k,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(mt,"id","transformers.BatchEncoding"),l(mt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(mt,"href","#transformers.BatchEncoding"),l(Oe,"class","relative group"),l(ao,"href","/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__"),l(Ko,"href","/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode_plus"),l(Qo,"href","/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.batch_encode_plus"),l(j,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(H,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(ft,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l($e,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(ut,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(an,"href","/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.CharSpan"),l(O,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(M,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(X,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(_t,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(gt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(S,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(kn,"href","/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.TokenSpan"),l(F,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(kt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(w,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(i,b){e(document.head,y),$(i,L,b),$(i,q,b),e(q,E),e(E,B),m(v,B,null),e(q,I),e(q,Bn),e(Bn,ji),$(i,Ua,b),$(i,He,b),e(He,Hi),e(He,Tt),e(Tt,Mi),e(He,Xi),$(i,Va,b),$(i,Me,b),e(Me,Wn),e(Wn,Yi),e(Me,Ji),e(Me,Rn),e(Rn,Ki),$(i,Ga,b),$(i,W,b),e(W,Qi),e(W,Oo),e(Oo,Zi),e(W,ed),e(W,So),e(So,td),e(W,od),e(W,Bo),e(Bo,nd),e(W,rd),e(W,Wo),e(Wo,sd),e(W,ad),$(i,ja,b),$(i,Ie,b),e(Ie,Ro),e(Ro,id),e(Ie,dd),e(Ie,Uo),e(Uo,cd),e(Ie,ld),$(i,Ha,b),$(i,me,b),e(me,Un),e(Un,hd),e(me,pd),e(me,Vn),e(Vn,md),e(me,fd),e(me,Gn),e(Gn,ud),$(i,Ma,b),$(i,D,b),e(D,Vo),e(Vo,_d),e(D,gd),e(D,Go),e(Go,kd),e(D,bd),e(D,jn),e(jn,vd),e(D,Td),e(D,Hn),e(Hn,yd),e(D,wd),e(D,Mn),e(Mn,xd),e(D,zd),e(D,Xn),e(Xn,Ed),e(D,$d),e(D,Yn),e(Yn,Pd),e(D,qd),e(D,yt),e(yt,Dd),e(D,Ld),$(i,Xa,b),$(i,Ne,b),e(Ne,Xe),e(Xe,Jn),m(wt,Jn,null),e(Ne,Fd),e(Ne,Kn),e(Kn,Id),$(i,Ya,b),$(i,g,b),m(xt,g,null),e(g,Nd),e(g,Qn),e(Qn,Ad),e(g,Cd),e(g,zt),e(zt,Od),e(zt,jo),e(jo,Sd),e(zt,Bd),e(g,Wd),e(g,Zn),e(Zn,Rd),e(g,Ud),e(g,er),e(er,Vd),e(g,Gd),e(g,tr),e(tr,jd),e(g,Hd),e(g,N),e(N,fe),e(fe,or),e(or,Md),e(fe,Xd),e(fe,nr),e(nr,Yd),e(fe,Jd),e(fe,rr),e(rr,Kd),e(fe,Qd),e(N,Zd),e(N,R),e(R,sr),e(sr,ec),e(R,tc),e(R,ar),e(ar,oc),e(R,nc),e(R,ir),e(ir,rc),e(R,sc),e(R,dr),e(dr,ac),e(R,ic),e(R,cr),e(cr,dc),e(R,cc),e(N,lc),e(N,Q),e(Q,lr),e(lr,hc),e(Q,pc),e(Q,hr),e(hr,mc),e(Q,fc),e(Q,pr),e(pr,uc),e(Q,_c),e(Q,mr),e(mr,gc),e(Q,kc),e(N,bc),e(N,U),e(U,fr),e(fr,vc),e(U,Tc),e(U,ur),e(ur,yc),e(U,wc),e(U,_r),e(_r,xc),e(U,zc),e(U,gr),e(gr,Ec),e(U,$c),e(U,Ho),e(Ho,Pc),e(U,qc),e(N,Dc),e(N,Ye),e(Ye,kr),e(kr,Lc),e(Ye,Fc),e(Ye,br),e(br,Ic),e(Ye,Nc),e(N,Ac),e(N,Z),e(Z,vr),e(vr,Cc),e(Z,Oc),e(Z,Tr),e(Tr,Sc),e(Z,Bc),e(Z,yr),e(yr,Wc),e(Z,Rc),e(Z,wr),e(wr,Uc),e(Z,Vc),e(N,Gc),e(N,ee),e(ee,xr),e(xr,jc),e(ee,Hc),e(ee,zr),e(zr,Mc),e(ee,Xc),e(ee,Er),e(Er,Yc),e(ee,Jc),e(ee,$r),e($r,Kc),e(ee,Qc),e(g,Zc),e(g,Je),m(Et,Je,null),e(Je,el),e(Je,Pr),e(Pr,tl),e(g,ol),e(g,Ke),m($t,Ke,null),e(Ke,nl),e(Ke,qr),e(qr,rl),e(g,sl),e(g,ue),m(Pt,ue,null),e(ue,al),e(ue,Dr),e(Dr,il),e(ue,dl),e(ue,qt),e(qt,cl),e(qt,Lr),e(Lr,ll),e(qt,hl),e(g,pl),e(g,_e),m(Dt,_e,null),e(_e,ml),e(_e,Fr),e(Fr,fl),e(_e,ul),e(_e,Lt),e(Lt,_l),e(Lt,Ir),e(Ir,gl),e(Lt,kl),e(g,bl),e(g,ge),m(Ft,ge,null),e(ge,vl),e(ge,It),e(It,Tl),e(It,Nr),e(Nr,yl),e(It,wl),e(ge,xl),m(Qe,ge,null),e(g,zl),e(g,Ze),m(Nt,Ze,null),e(Ze,El),e(Ze,Ar),e(Ar,$l),e(g,Pl),e(g,et),m(At,et,null),e(et,ql),e(et,Cr),e(Cr,Dl),e(g,Ll),e(g,tt),m(Ct,tt,null),e(tt,Fl),e(tt,Or),e(Or,Il),e(g,Nl),e(g,ke),m(Ot,ke,null),e(ke,Al),e(ke,Sr),e(Sr,Cl),e(ke,Ol),m(ot,ke,null),e(g,Sl),e(g,be),m(St,be,null),e(be,Bl),e(be,Br),e(Br,Wl),e(be,Rl),e(be,Ae),e(Ae,Ul),e(Ae,Wr),e(Wr,Vl),e(Ae,Gl),e(Ae,Rr),e(Rr,jl),e(Ae,Hl),e(g,Ml),e(g,ve),m(Bt,ve,null),e(ve,Xl),e(ve,Ur),e(Ur,Yl),e(ve,Jl),e(ve,Vr),e(Vr,Kl),$(i,Ja,b),$(i,Ce,b),e(Ce,nt),e(nt,Gr),m(Wt,Gr,null),e(Ce,Ql),e(Ce,jr),e(jr,Zl),$(i,Ka,b),$(i,te,b),e(te,eh),e(te,Mo),e(Mo,th),e(te,oh),e(te,Rt),e(Rt,nh),e(te,rh),e(te,Xo),e(Xo,sh),e(te,ah),$(i,Qa,b),$(i,k,b),m(Ut,k,null),e(k,ih),e(k,Hr),e(Hr,dh),e(k,ch),e(k,Vt),e(Vt,lh),e(Vt,Yo),e(Yo,hh),e(Vt,ph),e(k,mh),e(k,Mr),e(Mr,fh),e(k,uh),e(k,Xr),e(Xr,_h),e(k,gh),e(k,Yr),e(Yr,kh),e(k,bh),e(k,A),e(A,Te),e(Te,Jr),e(Jr,vh),e(Te,Th),e(Te,Kr),e(Kr,yh),e(Te,wh),e(Te,Qr),e(Qr,xh),e(Te,zh),e(A,Eh),e(A,V),e(V,Zr),e(Zr,$h),e(V,Ph),e(V,es),e(es,qh),e(V,Dh),e(V,ts),e(ts,Lh),e(V,Fh),e(V,os),e(os,Ih),e(V,Nh),e(V,ns),e(ns,Ah),e(V,Ch),e(A,Oh),e(A,oe),e(oe,rs),e(rs,Sh),e(oe,Bh),e(oe,ss),e(ss,Wh),e(oe,Rh),e(oe,as),e(as,Uh),e(oe,Vh),e(oe,is),e(is,Gh),e(oe,jh),e(A,Hh),e(A,G),e(G,ds),e(ds,Mh),e(G,Xh),e(G,cs),e(cs,Yh),e(G,Jh),e(G,ls),e(ls,Kh),e(G,Qh),e(G,hs),e(hs,Zh),e(G,ep),e(G,Jo),e(Jo,tp),e(G,op),e(A,np),e(A,rt),e(rt,ps),e(ps,rp),e(rt,sp),e(rt,ms),e(ms,ap),e(rt,ip),e(A,dp),e(A,ne),e(ne,fs),e(fs,cp),e(ne,lp),e(ne,us),e(us,hp),e(ne,pp),e(ne,_s),e(_s,mp),e(ne,fp),e(ne,gs),e(gs,up),e(ne,_p),e(A,gp),e(A,re),e(re,ks),e(ks,kp),e(re,bp),e(re,bs),e(bs,vp),e(re,Tp),e(re,vs),e(vs,yp),e(re,wp),e(re,Ts),e(Ts,xp),e(re,zp),e(k,Ep),e(k,st),m(Gt,st,null),e(st,$p),e(st,ys),e(ys,Pp),e(k,qp),e(k,at),m(jt,at,null),e(at,Dp),e(at,ws),e(ws,Lp),e(k,Fp),e(k,ye),m(Ht,ye,null),e(ye,Ip),e(ye,xs),e(xs,Np),e(ye,Ap),e(ye,Mt),e(Mt,Cp),e(Mt,zs),e(zs,Op),e(Mt,Sp),e(k,Bp),e(k,we),m(Xt,we,null),e(we,Wp),e(we,Es),e(Es,Rp),e(we,Up),e(we,Yt),e(Yt,Vp),e(Yt,$s),e($s,Gp),e(Yt,jp),e(k,Hp),e(k,xe),m(Jt,xe,null),e(xe,Mp),e(xe,Kt),e(Kt,Xp),e(Kt,Ps),e(Ps,Yp),e(Kt,Jp),e(xe,Kp),m(it,xe,null),e(k,Qp),e(k,dt),m(Qt,dt,null),e(dt,Zp),e(dt,qs),e(qs,em),e(k,tm),e(k,ct),m(Zt,ct,null),e(ct,om),e(ct,Ds),e(Ds,nm),e(k,rm),e(k,lt),m(eo,lt,null),e(lt,sm),e(lt,Ls),e(Ls,am),e(k,im),e(k,ze),m(to,ze,null),e(ze,dm),e(ze,Fs),e(Fs,cm),e(ze,lm),m(ht,ze,null),e(k,hm),e(k,Ee),m(oo,Ee,null),e(Ee,pm),e(Ee,Is),e(Is,mm),e(Ee,fm),e(Ee,Ns),e(Ns,um),e(k,_m),e(k,pt),m(no,pt,null),e(pt,gm),e(pt,As),e(As,km),$(i,Za,b),$(i,Oe,b),e(Oe,mt),e(mt,Cs),m(ro,Cs,null),e(Oe,bm),e(Oe,Os),e(Os,vm),$(i,ei,b),$(i,w,b),m(so,w,null),e(w,Tm),e(w,ie),e(ie,ym),e(ie,ao),e(ao,Ss),e(Ss,wm),e(ao,xm),e(ie,zm),e(ie,Ko),e(Ko,Em),e(ie,$m),e(ie,Qo),e(Qo,Pm),e(ie,qm),e(w,Dm),e(w,Bs),e(Bs,Lm),e(w,Fm),e(w,j),m(io,j,null),e(j,Im),e(j,Ws),e(Ws,Nm),e(j,Am),e(j,Rs),e(Rs,Cm),e(j,Om),e(j,co),e(co,Zo),e(Zo,Us),e(Us,Sm),e(Zo,Bm),e(co,Wm),e(co,en),e(en,Vs),e(Vs,Rm),e(en,Um),e(j,Vm),e(j,Gs),e(Gs,Gm),e(w,jm),e(w,H),m(lo,H,null),e(H,Hm),e(H,js),e(js,Mm),e(H,Xm),e(H,Hs),e(Hs,Ym),e(H,Jm),e(H,ho),e(ho,tn),e(tn,Ms),e(Ms,Km),e(tn,Qm),e(ho,Zm),e(ho,on),e(on,Xs),e(Xs,ef),e(on,tf),e(H,of),e(H,Ys),e(Ys,nf),e(w,rf),e(w,ft),m(po,ft,null),e(ft,sf),e(ft,Js),e(Js,af),e(w,df),e(w,$e),m(mo,$e,null),e($e,cf),e($e,Ks),e(Ks,lf),e($e,hf),e($e,Se),e(Se,nn),e(nn,Qs),e(Qs,pf),e(nn,mf),e(Se,ff),e(Se,rn),e(rn,Zs),e(Zs,uf),e(rn,_f),e(Se,gf),e(Se,sn),e(sn,ea),e(ea,kf),e(sn,bf),e(w,vf),e(w,ut),m(fo,ut,null),e(ut,Tf),e(ut,uo),e(uo,yf),e(uo,ta),e(ta,wf),e(uo,xf),e(w,zf),e(w,O),m(_o,O,null),e(O,Ef),e(O,oa),e(oa,$f),e(O,Pf),e(O,go),e(go,qf),e(go,an),e(an,Df),e(go,Lf),e(O,Ff),e(O,ko),e(ko,dn),e(dn,na),e(na,If),e(dn,Nf),e(ko,Af),e(ko,cn),e(cn,ra),e(ra,Cf),e(cn,Of),e(O,Sf),e(O,sa),e(sa,Bf),e(O,Wf),e(O,bo),e(bo,ln),e(ln,aa),e(aa,Rf),e(ln,Uf),e(bo,Vf),e(bo,hn),e(hn,ia),e(ia,Gf),e(hn,jf),e(w,Hf),e(w,M),m(vo,M,null),e(M,Mf),e(M,Be),e(Be,Xf),e(Be,da),e(da,Yf),e(Be,Jf),e(Be,ca),e(ca,Kf),e(Be,Qf),e(M,Zf),e(M,la),e(la,eu),e(M,tu),e(M,To),e(To,pn),e(pn,ha),e(ha,ou),e(pn,nu),e(To,ru),e(To,mn),e(mn,pa),e(pa,su),e(mn,au),e(M,iu),e(M,ma),e(ma,du),e(w,cu),e(w,X),m(yo,X,null),e(X,lu),e(X,fa),e(fa,hu),e(X,pu),e(X,ua),e(ua,mu),e(X,fu),e(X,wo),e(wo,fn),e(fn,_a),e(_a,uu),e(fn,_u),e(wo,gu),e(wo,un),e(un,ga),e(ga,ku),e(un,bu),e(X,vu),e(X,ka),e(ka,Tu),e(w,yu),e(w,_t),m(xo,_t,null),e(_t,wu),e(_t,ba),e(ba,xu),e(w,zu),e(w,gt),m(zo,gt,null),e(gt,Eu),e(gt,va),e(va,$u),e(w,Pu),e(w,S),m(Eo,S,null),e(S,qu),e(S,Ta),e(Ta,Du),e(S,Lu),e(S,ya),e(ya,Fu),e(S,Iu),e(S,$o),e($o,wa),e(wa,Nu),e($o,Au),e($o,xa),e(xa,Cu),e(S,Ou),e(S,za),e(za,Su),e(S,Bu),e(S,Po),e(Po,_n),e(_n,Ea),e(Ea,Wu),e(_n,Ru),e(Po,Uu),e(Po,gn),e(gn,$a),e($a,Vu),e(gn,Gu),e(w,ju),e(w,F),m(qo,F,null),e(F,Hu),e(F,Pa),e(Pa,Mu),e(F,Xu),e(F,Do),e(Do,Yu),e(Do,kn),e(kn,Ju),e(Do,Ku),e(F,Qu),e(F,Lo),e(Lo,bn),e(bn,qa),e(qa,Zu),e(bn,e_),e(Lo,t_),e(Lo,vn),e(vn,Da),e(Da,o_),e(vn,n_),e(F,r_),e(F,La),e(La,s_),e(F,a_),e(F,Fo),e(Fo,Tn),e(Tn,Fa),e(Fa,i_),e(Tn,d_),e(Fo,c_),e(Fo,yn),e(yn,Ia),e(Ia,l_),e(yn,h_),e(F,p_),e(F,Na),e(Na,m_),e(w,f_),e(w,kt),m(Io,kt,null),e(kt,u_),e(kt,Aa),e(Aa,__),ti=!0},p(i,[b]){const No={};b&2&&(No.$$scope={dirty:b,ctx:i}),Qe.$set(No);const Ca={};b&2&&(Ca.$$scope={dirty:b,ctx:i}),ot.$set(Ca);const Oa={};b&2&&(Oa.$$scope={dirty:b,ctx:i}),it.$set(Oa);const Sa={};b&2&&(Sa.$$scope={dirty:b,ctx:i}),ht.$set(Sa)},i(i){ti||(f(v.$$.fragment,i),f(wt.$$.fragment,i),f(xt.$$.fragment,i),f(Et.$$.fragment,i),f($t.$$.fragment,i),f(Pt.$$.fragment,i),f(Dt.$$.fragment,i),f(Ft.$$.fragment,i),f(Qe.$$.fragment,i),f(Nt.$$.fragment,i),f(At.$$.fragment,i),f(Ct.$$.fragment,i),f(Ot.$$.fragment,i),f(ot.$$.fragment,i),f(St.$$.fragment,i),f(Bt.$$.fragment,i),f(Wt.$$.fragment,i),f(Ut.$$.fragment,i),f(Gt.$$.fragment,i),f(jt.$$.fragment,i),f(Ht.$$.fragment,i),f(Xt.$$.fragment,i),f(Jt.$$.fragment,i),f(it.$$.fragment,i),f(Qt.$$.fragment,i),f(Zt.$$.fragment,i),f(eo.$$.fragment,i),f(to.$$.fragment,i),f(ht.$$.fragment,i),f(oo.$$.fragment,i),f(no.$$.fragment,i),f(ro.$$.fragment,i),f(so.$$.fragment,i),f(io.$$.fragment,i),f(lo.$$.fragment,i),f(po.$$.fragment,i),f(mo.$$.fragment,i),f(fo.$$.fragment,i),f(_o.$$.fragment,i),f(vo.$$.fragment,i),f(yo.$$.fragment,i),f(xo.$$.fragment,i),f(zo.$$.fragment,i),f(Eo.$$.fragment,i),f(qo.$$.fragment,i),f(Io.$$.fragment,i),ti=!0)},o(i){u(v.$$.fragment,i),u(wt.$$.fragment,i),u(xt.$$.fragment,i),u(Et.$$.fragment,i),u($t.$$.fragment,i),u(Pt.$$.fragment,i),u(Dt.$$.fragment,i),u(Ft.$$.fragment,i),u(Qe.$$.fragment,i),u(Nt.$$.fragment,i),u(At.$$.fragment,i),u(Ct.$$.fragment,i),u(Ot.$$.fragment,i),u(ot.$$.fragment,i),u(St.$$.fragment,i),u(Bt.$$.fragment,i),u(Wt.$$.fragment,i),u(Ut.$$.fragment,i),u(Gt.$$.fragment,i),u(jt.$$.fragment,i),u(Ht.$$.fragment,i),u(Xt.$$.fragment,i),u(Jt.$$.fragment,i),u(it.$$.fragment,i),u(Qt.$$.fragment,i),u(Zt.$$.fragment,i),u(eo.$$.fragment,i),u(to.$$.fragment,i),u(ht.$$.fragment,i),u(oo.$$.fragment,i),u(no.$$.fragment,i),u(ro.$$.fragment,i),u(so.$$.fragment,i),u(io.$$.fragment,i),u(lo.$$.fragment,i),u(po.$$.fragment,i),u(mo.$$.fragment,i),u(fo.$$.fragment,i),u(_o.$$.fragment,i),u(vo.$$.fragment,i),u(yo.$$.fragment,i),u(xo.$$.fragment,i),u(zo.$$.fragment,i),u(Eo.$$.fragment,i),u(qo.$$.fragment,i),u(Io.$$.fragment,i),ti=!1},d(i){t(y),i&&t(L),i&&t(q),_(v),i&&t(Ua),i&&t(He),i&&t(Va),i&&t(Me),i&&t(Ga),i&&t(W),i&&t(ja),i&&t(Ie),i&&t(Ha),i&&t(me),i&&t(Ma),i&&t(D),i&&t(Xa),i&&t(Ne),_(wt),i&&t(Ya),i&&t(g),_(xt),_(Et),_($t),_(Pt),_(Dt),_(Ft),_(Qe),_(Nt),_(At),_(Ct),_(Ot),_(ot),_(St),_(Bt),i&&t(Ja),i&&t(Ce),_(Wt),i&&t(Ka),i&&t(te),i&&t(Qa),i&&t(k),_(Ut),_(Gt),_(jt),_(Ht),_(Xt),_(Jt),_(it),_(Qt),_(Zt),_(eo),_(to),_(ht),_(oo),_(no),i&&t(Za),i&&t(Oe),_(ro),i&&t(ei),i&&t(w),_(so),_(io),_(lo),_(po),_(mo),_(fo),_(_o),_(vo),_(yo),_(xo),_(zo),_(Eo),_(qo),_(Io)}}}const bv={local:"tokenizer",sections:[{local:"transformers.PreTrainedTokenizer",title:"PreTrainedTokenizer"},{local:"transformers.PreTrainedTokenizerFast",title:"PreTrainedTokenizerFast"},{local:"transformers.BatchEncoding",title:"BatchEncoding"}],title:"Tokenizer"};function vv(pe){return mv(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class $v extends cv{constructor(y){super();lv(this,y,vv,kv,hv,{})}}export{$v as default,bv as metadata};
15
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/main_classes/trainer.mdx-hf-doc-builder.js
import{S as eM,i as tM,s as oM,e as n,k as l,w as h,t as r,M as rM,c as s,d as o,m as d,a as i,x as u,h as a,b as m,G as e,g as p,y as f,q as g,o as _,B as v,v as aM,L as nM}from"../../chunks/vendor-hf-doc-builder.js";import{T as gp}from"../../chunks/Tip-hf-doc-builder.js";import{D as x}from"../../chunks/Docstring-hf-doc-builder.js";import{C as O}from"../../chunks/CodeBlock-hf-doc-builder.js";import{I as Y}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{E as sM}from"../../chunks/ExampleCodeBlock-hf-doc-builder.js";function iM(Z){let T,D,$,k,L,A,S,W,fe,oe,G,se,ie,re,le,H,Ze,ge,z,I,st,ae,it,lt,_e,Ia,Ua,Ke,Pe,Na,ve,za,Fa;return{c(){T=n("p"),D=r("The "),$=n("a"),k=r("Trainer"),L=r(` class is optimized for \u{1F917} Transformers models and can have surprising behaviors when you use it on other models. When using it on your own model, make sure:`),A=l(),S=n("ul"),W=n("li"),fe=r("your model always return tuples or subclasses of "),oe=n("a"),G=r("ModelOutput"),se=r("."),ie=l(),re=n("li"),le=r("your model can compute the loss if a "),H=n("code"),Ze=r("labels"),ge=r(` argument is provided and that loss is returned as the first element of the tuple (if your model returns tuples)`),z=l(),I=n("li"),st=r("your model can accept multiple label arguments (use the "),ae=n("code"),it=r("label_names"),lt=r(" in your "),_e=n("a"),Ia=r("TrainingArguments"),Ua=r(" to indicate their name to the "),Ke=n("a"),Pe=r("Trainer"),Na=r(") but none of them should be named "),ve=n("code"),za=r('"label"'),Fa=r("."),this.h()},l(K){T=s(K,"P",{});var B=i(T);D=a(B,"The "),$=s(B,"A",{href:!0});var el=i($);k=a(el,"Trainer"),el.forEach(o),L=a(B,` class is optimized for \u{1F917} Transformers models and can have surprising behaviors when you use it on other models. When using it on your own model, make sure:`),B.forEach(o),A=d(K),S=s(K,"UL",{});var be=i(S);W=s(be,"LI",{});var Xo=i(W);fe=a(Xo,"your model always return tuples or subclasses of "),oe=s(Xo,"A",{href:!0});var tl=i(oe);G=a(tl,"ModelOutput"),tl.forEach(o),se=a(Xo,"."),Xo.forEach(o),ie=d(be),re=s(be,"LI",{});var ro=i(re);le=a(ro,"your model can compute the loss if a "),H=s(ro,"CODE",{});var C=i(H);Ze=a(C,"labels"),C.forEach(o),ge=a(ro,` argument is provided and that loss is returned as the first element of the tuple (if your model returns tuples)`),ro.forEach(o),z=d(be),I=s(be,"LI",{});var V=i(I);st=a(V,"your model can accept multiple label arguments (use the "),ae=s(V,"CODE",{});var Qo=i(ae);it=a(Qo,"label_names"),Qo.forEach(o),lt=a(V," in your "),_e=s(V,"A",{href:!0});var ol=i(_e);Ia=a(ol,"TrainingArguments"),ol.forEach(o),Ua=a(V," to indicate their name to the "),Ke=s(V,"A",{href:!0});var rl=i(Ke);Pe=a(rl,"Trainer"),rl.forEach(o),Na=a(V,") but none of them should be named "),ve=s(V,"CODE",{});var al=i(ve);za=a(al,'"label"'),al.forEach(o),Fa=a(V,"."),V.forEach(o),be.forEach(o),this.h()},h(){m($,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),m(oe,"href","/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput"),m(_e,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments"),m(Ke,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer")},m(K,B){p(K,T,B),e(T,D),e(T,$),e($,k),e(T,L),p(K,A,B),p(K,S,B),e(S,W),e(W,fe),e(W,oe),e(oe,G),e(W,se),e(S,ie),e(S,re),e(re,le),e(re,H),e(H,Ze),e(re,ge),e(S,z),e(S,I),e(I,st),e(I,ae),e(ae,it),e(I,lt),e(I,_e),e(_e,Ia),e(I,Ua),e(I,Ke),e(Ke,Pe),e(I,Na),e(I,ve),e(ve,za),e(I,Fa)},d(K){K&&o(T),K&&o(A),K&&o(S)}}}function lM(Z){let T,D,$,k,L,A,S,W,fe,oe,G,se,ie,re,le,H,Ze;return{c(){T=n("p"),D=r("To use this method, you need to have provided a "),$=n("code"),k=r("model_init"),L=r(" when initializing your "),A=n("a"),S=r("Trainer"),W=r(`: we need to reinitialize the model at each new run. This is incompatible with the `),fe=n("code"),oe=r("optimizers"),G=r(` argument, so you need to subclass `),se=n("a"),ie=r("Trainer"),re=r(" and override the method "),le=n("a"),H=r("create_optimizer_and_scheduler()"),Ze=r(` for custom optimizer/scheduler.`),this.h()},l(ge){T=s(ge,"P",{});var z=i(T);D=a(z,"To use this method, you need to have provided a "),$=s(z,"CODE",{});var I=i($);k=a(I,"model_init"),I.forEach(o),L=a(z," when initializing your "),A=s(z,"A",{href:!0});var st=i(A);S=a(st,"Trainer"),st.forEach(o),W=a(z,`: we need to reinitialize the model at each new run. This is incompatible with the `),fe=s(z,"CODE",{});var ae=i(fe);oe=a(ae,"optimizers"),ae.forEach(o),G=a(z,` argument, so you need to subclass `),se=s(z,"A",{href:!0});var it=i(se);ie=a(it,"Trainer"),it.forEach(o),re=a(z," and override the method "),le=s(z,"A",{href:!0});var lt=i(le);H=a(lt,"create_optimizer_and_scheduler()"),lt.forEach(o),Ze=a(z,` for custom optimizer/scheduler.`),z.forEach(o),this.h()},h(){m(A,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),m(se,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),m(le,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.create_optimizer_and_scheduler")},m(ge,z){p(ge,T,z),e(T,D),e(T,$),e($,k),e(T,L),e(T,A),e(A,S),e(T,W),e(T,fe),e(fe,oe),e(T,G),e(T,se),e(se,ie),e(T,re),e(T,le),e(le,H),e(T,Ze)},d(ge){ge&&o(T)}}}function dM(Z){let T,D,$,k,L;return k=new O({props:{code:`init_mem_cpu_alloc_delta = 1301MB init_mem_cpu_peaked_delta = 154MB init_mem_gpu_alloc_delta = 230MB init_mem_gpu_peaked_delta = 0MB train_mem_cpu_alloc_delta = 1345MB train_mem_cpu_peaked_delta = 0MB train_mem_gpu_alloc_delta = 693MB train_mem_gpu_peaked_delta = 7MB`,highlighted:`<span class="hljs-attr">init_mem_cpu_alloc_delta</span> = <span class="hljs-number">1301</span>MB <span class="hljs-attr">init_mem_cpu_peaked_delta</span> = <span class="hljs-number">154</span>MB <span class="hljs-attr">init_mem_gpu_alloc_delta</span> = <span class="hljs-number">230</span>MB <span class="hljs-attr">init_mem_gpu_peaked_delta</span> = <span class="hljs-number">0</span>MB <span class="hljs-attr">train_mem_cpu_alloc_delta</span> = <span class="hljs-number">1345</span>MB <span class="hljs-attr">train_mem_cpu_peaked_delta</span> = <span class="hljs-number">0</span>MB <span class="hljs-attr">train_mem_gpu_alloc_delta</span> = <span class="hljs-number">693</span>MB <span class="hljs-attr">train_mem_gpu_peaked_delta</span> = <span class="hljs-number">7</span>MB`}}),{c(){T=n("p"),D=r("Now when this method is run, you will see a report that will include: :"),$=l(),h(k.$$.fragment)},l(A){T=s(A,"P",{});var S=i(T);D=a(S,"Now when this method is run, you will see a report that will include: :"),S.forEach(o),$=d(A),u(k.$$.fragment,A)},m(A,S){p(A,T,S),e(T,D),p(A,$,S),f(k,A,S),L=!0},p:nM,i(A){L||(g(k.$$.fragment,A),L=!0)},o(A){_(k.$$.fragment,A),L=!1},d(A){A&&o(T),A&&o($),v(k,A)}}}function cM(Z){let T,D;return{c(){T=n("p"),D=r(`If your predictions or labels have different sequence length (for instance because you\u2019re doing dynamic padding in a token classification task) the predictions will be padded (on the right) to allow for concatenation into one array. The padding index is -100.`)},l($){T=s($,"P",{});var k=i(T);D=a(k,`If your predictions or labels have different sequence length (for instance because you\u2019re doing dynamic padding in a token classification task) the predictions will be padded (on the right) to allow for concatenation into one array. The padding index is -100.`),k.forEach(o)},m($,k){p($,T,k),e(T,D)},d($){$&&o(T)}}}function pM(Z){let T,D;return{c(){T=n("p"),D=r(`If your predictions or labels have different sequence lengths (for instance because you\u2019re doing dynamic padding in a token classification task) the predictions will be padded (on the right) to allow for concatenation into one array. The padding index is -100.`)},l($){T=s($,"P",{});var k=i(T);D=a(k,`If your predictions or labels have different sequence lengths (for instance because you\u2019re doing dynamic padding in a token classification task) the predictions will be padded (on the right) to allow for concatenation into one array. The padding index is -100.`),k.forEach(o)},m($,k){p($,T,k),e(T,D)},d($){$&&o(T)}}}function mM(Z){let T,D;return{c(){T=n("p"),D=r("This integration is not supported anymore, we recommend you either use DeepSpeed or PyTorch FSDP.")},l($){T=s($,"P",{});var k=i(T);D=a(k,"This integration is not supported anymore, we recommend you either use DeepSpeed or PyTorch FSDP."),k.forEach(o)},m($,k){p($,T,k),e(T,D)},d($){$&&o(T)}}}function hM(Z){let T,D,$,k,L;return{c(){T=n("p"),D=r(`We strongly recommend to install PyTorch >= 1.13 (nightly version at the time of writing) on your MacOS machine. It has major fixes related to model correctness and performance improvements for transformer based models. Please refer to `),$=n("a"),k=r("https://github.com/pytorch/pytorch/issues/82707"),L=r(" for more details."),this.h()},l(A){T=s(A,"P",{});var S=i(T);D=a(S,`We strongly recommend to install PyTorch >= 1.13 (nightly version at the time of writing) on your MacOS machine. It has major fixes related to model correctness and performance improvements for transformer based models. Please refer to `),$=s(S,"A",{href:!0,rel:!0});var W=i($);k=a(W,"https://github.com/pytorch/pytorch/issues/82707"),W.forEach(o),L=a(S," for more details."),S.forEach(o),this.h()},h(){m($,"href","https://github.com/pytorch/pytorch/issues/82707"),m($,"rel","nofollow")},m(A,S){p(A,T,S),e(T,D),e(T,$),e($,k),e(T,L)},d(A){A&&o(T)}}}function uM(Z){let T,D,$,k,L,A,S,W,fe,oe,G,se,ie,re,le,H,Ze,ge,z,I,st,ae,it,lt,_e,Ia,Ua,Ke,Pe,Na,ve,za,Fa,K,B,el,be,Xo,tl,ro,C,V,Qo,ol,rl,al,nl,_p,Aw,Pw,Dw,sl,vp,Sw,qw,Ow,il,bp,Cw,Iw,Uw,dt,yp,Nw,zw,wp,Fw,Lw,Tp,Rw,Ww,Gw,ll,Ep,Mw,jw,Hw,dl,$p,Bw,Vw,Yw,cl,xp,Zw,Kw,Jw,pl,kp,Xw,Qw,e0,ml,Ap,t0,o0,r0,hl,Pp,a0,n0,s0,ul,Dp,i0,l0,yv,er,wv,tr,d0,fl,c0,p0,Tv,La,Ev,ct,m0,gl,h0,u0,_l,f0,g0,$v,ao,or,Sp,Ra,_0,qp,v0,xv,b,Wa,b0,Op,y0,w0,Cp,T0,E0,ye,rr,Ip,$0,x0,vl,k0,A0,P0,J,Up,D0,S0,Np,q0,O0,zp,C0,I0,Fp,U0,N0,Lp,z0,F0,Rp,L0,R0,W0,bl,Wp,G0,M0,j0,De,Gp,H0,B0,Mp,V0,Y0,jp,Z0,K0,Hp,J0,X0,Q0,Se,Bp,eT,tT,Vp,oT,rT,Yp,aT,nT,Zp,sT,iT,lT,ar,Ga,dT,Ma,cT,Kp,pT,mT,hT,nr,ja,uT,Ha,fT,Jp,gT,_T,vT,pt,Ba,bT,Xp,yT,wT,Qp,TT,ET,sr,Va,$T,em,xT,kT,ir,Ya,AT,Za,PT,tm,DT,ST,qT,mt,Ka,OT,om,CT,IT,Ja,UT,rm,NT,zT,FT,ht,Xa,LT,am,RT,WT,Je,GT,nm,MT,jT,sm,HT,BT,im,VT,YT,ZT,lr,Qa,KT,lm,JT,XT,qe,en,QT,dm,e4,t4,tn,o4,cm,r4,a4,n4,pm,s4,i4,ut,on,l4,no,d4,mm,c4,p4,hm,m4,h4,u4,um,f4,g4,dr,rn,_4,an,v4,yl,b4,y4,w4,ft,nn,T4,sn,E4,fm,$4,x4,k4,gm,A4,P4,cr,ln,D4,_m,S4,q4,gt,dn,O4,cn,C4,vm,I4,U4,N4,bm,z4,F4,Oe,pn,L4,mn,R4,ym,W4,G4,M4,so,j4,wm,H4,B4,Tm,V4,Y4,Z4,Em,K4,J4,_t,hn,X4,we,Q4,$m,eE,tE,xm,oE,rE,km,aE,nE,Am,sE,iE,lE,pr,dE,mr,un,cE,fn,pE,Pm,mE,hE,uE,hr,gn,fE,Dm,gE,_E,ur,_n,vE,vn,bE,Sm,yE,wE,TE,vt,bn,EE,yn,$E,qm,xE,kE,AE,Om,PE,DE,P,wn,SE,Cm,qE,OE,Im,CE,IE,Um,UE,NE,io,zE,Nm,FE,LE,zm,RE,WE,GE,fr,ME,Fm,Lm,jE,HE,Xe,Te,BE,Rm,VE,YE,Wm,ZE,KE,Gm,JE,XE,Mm,QE,e9,t9,lo,o9,jm,r9,a9,Hm,n9,s9,i9,wl,Bm,l9,d9,c9,bt,Vm,p9,m9,Ym,h9,u9,Zm,f9,g9,_9,Km,v9,b9,Jm,y9,w9,Tn,T9,Xm,E9,$9,x9,Qe,k9,Qm,A9,P9,eh,D9,S9,th,q9,O9,C9,ne,I9,Tl,U9,N9,oh,z9,F9,rh,L9,R9,ah,W9,G9,nh,M9,j9,H9,U,B9,sh,V9,Y9,ih,Z9,K9,lh,J9,X9,dh,Q9,e$,En,t$,o$,ch,r$,a$,ph,n$,s$,mh,i$,l$,hh,d$,c$,uh,p$,m$,h$,Ee,u$,El,f$,g$,fh,_$,v$,$l,b$,y$,gh,w$,T$,E$,_h,$$,x$,gr,$n,k$,vh,A$,P$,_r,xn,D$,kn,S$,bh,q$,O$,C$,yt,An,I$,Pn,U$,yh,N$,z$,F$,Dn,L$,wh,R$,W$,G$,X,Sn,M$,Th,j$,H$,qn,B$,Eh,V$,Y$,Z$,vr,K$,On,J$,$h,X$,Q$,e3,co,po,t3,xh,o3,r3,kh,a3,n3,s3,mo,i3,Ah,l3,d3,Ph,c3,p3,m3,ho,h3,Dh,u3,f3,Sh,g3,_3,v3,wt,Cn,b3,uo,y3,qh,w3,T3,Oh,E3,$3,x3,Ch,k3,A3,Tt,In,P3,fo,D3,Ih,S3,q3,Uh,O3,C3,I3,Nh,U3,N3,br,Un,z3,et,F3,zh,L3,R3,Fh,W3,G3,Lh,M3,j3,H3,yr,Nn,B3,zn,V3,Rh,Y3,Z3,K3,Ce,Fn,J3,Ln,X3,Wh,Q3,ex,tx,Gh,ox,rx,Rn,ax,xl,nx,sx,ix,Et,Wn,lx,Gn,dx,Mh,cx,px,mx,jh,hx,ux,$t,Mn,fx,Hh,gx,_x,Bh,vx,bx,wr,jn,yx,Hn,wx,Vh,Tx,Ex,$x,Tr,Bn,xx,Yh,kx,Ax,xt,Vn,Px,Zh,Dx,Sx,Kh,qx,kv,go,Er,Jh,Yn,Ox,Xh,Cx,Av,tt,Zn,Ix,Ie,Kn,Ux,Qh,Nx,zx,Jn,Fx,eu,Lx,Rx,Wx,tu,Gx,Mx,Q,Xn,jx,ou,Hx,Bx,Qn,Vx,ru,Yx,Zx,Kx,$r,Jx,es,Xx,au,Qx,ek,tk,_o,vo,ok,nu,rk,ak,su,nk,sk,ik,bo,lk,iu,dk,ck,lu,pk,mk,hk,yo,uk,du,fk,gk,cu,_k,vk,Pv,wo,xr,pu,ts,bk,mu,yk,Dv,F,os,wk,rs,Tk,hu,Ek,$k,xk,To,kk,kl,Ak,Pk,as,Dk,Sk,qk,de,ns,Ok,uu,Ck,Ik,Eo,Uk,fu,Nk,zk,gu,Fk,Lk,Rk,$o,Wk,_u,Gk,Mk,vu,jk,Hk,Bk,ss,Vk,bu,Yk,Zk,Kk,kr,is,Jk,yu,Xk,Qk,kt,ls,e5,wu,t5,o5,xo,r5,Tu,a5,n5,Eu,s5,i5,l5,Ar,ds,d5,cs,c5,$u,p5,m5,h5,Pr,ps,u5,xu,f5,g5,Dr,ms,_5,ku,v5,Sv,ko,Sr,Au,hs,b5,Pu,y5,qv,ot,us,w5,fs,T5,Du,E5,$5,x5,Ao,k5,Al,A5,P5,gs,D5,S5,Ov,Po,qr,Su,_s,q5,qu,O5,Cv,ce,C5,Pl,I5,U5,Ou,N5,z5,Dl,F5,L5,Cu,R5,W5,Iv,Or,G5,Sl,M5,j5,Uv,Cr,ql,Iu,H5,B5,V5,Ol,Uu,Y5,Z5,Nv,Ue,K5,Nu,J5,X5,zu,Q5,e6,Cl,t6,o6,zv,Ir,Ur,Fu,r6,a6,Lu,n6,s6,i6,Il,Ru,l6,d6,Fv,Do,Nr,Wu,vs,c6,Gu,p6,Lv,Ne,m6,Ul,h6,u6,Mu,f6,g6,ju,_6,v6,Rv,At,b6,Hu,y6,w6,Nl,T6,E6,Wv,zr,zl,Bu,$6,x6,k6,Fl,Vu,A6,P6,Gv,ze,D6,Ll,S6,q6,Yu,O6,C6,Zu,I6,U6,Mv,ee,N6,Rl,z6,F6,Ku,L6,R6,Ju,W6,G6,Xu,M6,j6,Wl,H6,B6,jv,Gl,V6,Hv,bs,Bv,Ml,Y6,Vv,ys,Yv,jl,Z6,Zv,ws,Kv,Hl,K6,Jv,Bl,J6,Xv,Ts,Qv,Fr,X6,Qu,Q6,eA,e1,So,Lr,ef,Es,tA,tf,oA,t1,pe,rA,Vl,aA,nA,of,sA,iA,rf,lA,dA,af,cA,pA,o1,Pt,mA,$s,hA,uA,nf,fA,gA,r1,qo,Rr,sf,xs,_A,lf,vA,a1,Yl,bA,n1,Wr,yA,ks,df,wA,TA,s1,As,i1,Dt,EA,Ps,cf,$A,xA,Ds,pf,kA,AA,l1,Ss,d1,qs,c1,Gr,PA,Zl,DA,SA,p1,Kl,qA,m1,Jl,OA,h1,Xl,mf,hf,CA,u1,Mr,IA,uf,UA,NA,f1,Ql,zA,g1,Os,_1,St,FA,ff,LA,RA,gf,WA,GA,v1,ed,MA,b1,Cs,y1,qt,jA,_f,HA,BA,vf,VA,YA,w1,Ot,ZA,bf,KA,JA,Is,yf,XA,QA,T1,Us,E1,td,e8,$1,Ns,x1,od,t8,k1,zs,A1,rd,o8,P1,ad,wf,Tf,r8,D1,jr,a8,Ef,n8,s8,S1,nd,Fs,i8,$f,l8,d8,q1,Ls,O1,Rs,xf,c8,C1,Ws,I1,Ct,p8,kf,m8,h8,Af,u8,f8,U1,sd,g8,N1,Gs,z1,id,_8,F1,Hr,v8,Pf,b8,y8,L1,Oo,Br,Df,Ms,w8,Sf,T8,R1,Vr,E8,ld,$8,x8,W1,me,k8,js,A8,P8,Hs,D8,S8,Bs,q8,O8,Vs,C8,I8,G1,Yr,U8,dd,N8,z8,M1,cd,j1,Co,Zr,qf,Ys,F8,Of,L8,H1,pd,R8,B1,It,W8,Zs,G8,M8,Ks,j8,H8,V1,md,B8,Y1,Js,Z1,hd,V8,K1,Ut,Y8,Cf,Z8,K8,If,J8,X8,J1,Io,Kr,Uf,Xs,Q8,Nf,eP,X1,ud,tP,Q1,Fe,oP,zf,rP,aP,Ff,nP,sP,Lf,iP,lP,eb,Nt,dP,Rf,cP,pP,Wf,mP,hP,tb,Qs,ob,Jr,uP,ei,fP,gP,rb,Uo,Xr,Gf,ti,_P,Mf,vP,ab,fd,bP,nb,oi,sb,zt,yP,jf,wP,TP,Hf,EP,$P,ib,gd,xP,lb,ri,db,_d,kP,cb,Qr,AP,Bf,PP,DP,pb,rt,Vf,SP,qP,Yf,OP,CP,Zf,IP,UP,mb,vd,NP,hb,ai,ub,bd,zP,fb,Le,FP,Kf,LP,RP,Jf,WP,GP,Xf,MP,jP,gb,No,ea,Qf,ni,HP,eg,BP,_b,Ft,VP,tg,YP,ZP,og,KP,JP,vb,yd,XP,bb,wd,QP,yb,ta,eD,rg,tD,oD,wb,si,Tb,M,rD,ag,aD,nD,ng,sD,iD,sg,lD,dD,ig,cD,pD,lg,mD,hD,dg,uD,fD,Eb,Td,gD,$b,zo,oa,cg,ii,_D,pg,vD,xb,ra,kb,Re,bD,li,yD,wD,Ed,TD,ED,di,$D,xD,Ab,We,mg,kD,AD,hg,PD,DD,ug,SD,qD,fg,OD,Pb,$d,CD,Db,ci,gg,ID,UD,Sb,xd,ND,qb,pi,Ob,Lt,zD,_g,FD,LD,vg,RD,WD,Cb,mi,Ib,Rt,GD,bg,MD,jD,hi,HD,BD,Ub,aa,VD,kd,YD,ZD,Nb,Ad,KD,zb,ui,yg,JD,XD,Fb,fi,Lb,Pd,QD,Rb,gi,Wb,_i,wg,eS,tS,Gb,vi,Mb,Dd,oS,jb,bi,Hb,Sd,rS,Bb,na,aS,yi,nS,sS,Vb,wi,Tg,iS,lS,Yb,Wt,dS,Eg,cS,pS,$g,mS,hS,Zb,sa,uS,xg,fS,gS,Kb,Ti,Jb,qd,_S,Xb,Ge,kg,vS,bS,Ag,yS,wS,Ei,TS,Pg,ES,$S,xS,$i,kS,Dg,AS,PS,Qb,xi,at,DS,Sg,SS,qS,qg,OS,CS,Og,IS,US,ey,ia,NS,Cg,zS,FS,ty,ki,oy,Fo,Ig,LS,RS,Ug,WS,GS,ry,Gt,MS,Ng,jS,HS,zg,BS,VS,ay,Od,YS,ny,he,Fg,ZS,KS,Lg,JS,XS,Ai,QS,Rg,eq,tq,oq,Lo,rq,Wg,aq,nq,Gg,sq,iq,lq,Mg,dq,sy,Cd,cq,iy,la,Ro,pq,jg,mq,hq,Hg,uq,fq,gq,$e,_q,Bg,vq,bq,Vg,yq,wq,Yg,Tq,Eq,Zg,$q,xq,ly,Wo,da,Kg,Pi,kq,Jg,Aq,dy,ca,Pq,Di,Dq,Sq,cy,Si,Xg,qq,Oq,py,qi,Qg,Cq,Iq,my,te,e_,Oi,Uq,t_,Nq,zq,Fq,Ci,Id,o_,Lq,Rq,Wq,Go,Ii,Gq,r_,Mq,jq,Hq,Ui,Bq,a_,Vq,Yq,Zq,Ni,Kq,n_,Jq,Xq,Qq,s_,Mo,e7,i_,t7,o7,l_,r7,a7,n7,d_,nt,s7,c_,i7,l7,p_,d7,c7,m_,p7,m7,h7,h_,jo,u7,u_,f7,g7,f_,_7,v7,b7,zi,g_,y7,w7,Fi,xe,T7,__,E7,$7,v_,x7,k7,b_,A7,P7,y_,D7,S7,q7,Li,O7,w_,C7,I7,hy,Ud,T_,U7,uy,pa,Ri,N7,Wi,z7,F7,L7,Ho,R7,Gi,W7,G7,E_,M7,j7,fy,Bo,ma,$_,Mi,H7,x_,B7,gy,Me,V7,k_,Y7,Z7,ji,K7,J7,Hi,X7,Q7,_y,ha,vy,Nd,A_,eO,by,Mt,P_,tO,oO,D_,rO,aO,S_,nO,yy,Vo,q_,sO,iO,Bi,lO,dO,wy,Yo,O_,cO,pO,C_,mO,hO,Ty,Vi,Ey,zd,I_,uO,$y,ua,Yi,fO,U_,gO,_O,vO,ke,bO,N_,yO,wO,z_,TO,EO,F_,$O,xO,L_,kO,AO,xy,jt,PO,R_,DO,SO,Zi,qO,OO,ky,Fd,CO,Ay,w,IO,Ld,UO,W_,NO,Rd,zO,G_,FO,Wd,LO,M_,RO,Gd,WO,j_,GO,Md,MO,H_,jO,jd,HO,B_,BO,Hd,VO,V_,YO,Bd,ZO,Y_,KO,Vd,JO,Z_,XO,Yd,QO,K_,eC,Zd,tC,J_,oC,Kd,rC,X_,aC,Jd,nC,Q_,sC,Xd,iC,ev,lC,Qd,dC,tv,cC,ec,pC,ov,mC,tc,hC,rv,uC,oc,fC,av,gC,rc,_C,nv,vC,ac,bC,sv,yC,nc,wC,iv,TC,sc,EC,lv,$C,ic,xC,dv,kC,Py;return A=new Y({}),er=new gp({props:{warning:!0,$$slots:{default:[iM]},$$scope:{ctx:Z}}}),La=new O({props:{code:`from torch import nn from transformers import Trainer class CustomTrainer(Trainer): def compute_loss(self, model, inputs, return_outputs=False): labels = inputs.get("labels") # forward pass outputs = model(**inputs) logits = outputs.get("logits") # compute custom loss (suppose one has 3 labels with different weights) loss_fct = nn.CrossEntropyLoss(weight=torch.tensor([1.0, 2.0, 3.0])) loss = loss_fct(logits.view(-1, self.model.config.num_labels), labels.view(-1)) return (loss, outputs) if return_outputs else loss`,highlighted:`<span class="hljs-keyword">from</span> torch <span class="hljs-keyword">import</span> nn <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Trainer <span class="hljs-keyword">class</span> <span class="hljs-title class_">CustomTrainer</span>(<span class="hljs-title class_ inherited__">Trainer</span>): <span class="hljs-keyword">def</span> <span class="hljs-title function_">compute_loss</span>(<span class="hljs-params">self, model, inputs, return_outputs=<span class="hljs-literal">False</span></span>): labels = inputs.get(<span class="hljs-string">&quot;labels&quot;</span>) <span class="hljs-comment"># forward pass</span> outputs = model(**inputs) logits = outputs.get(<span class="hljs-string">&quot;logits&quot;</span>) <span class="hljs-comment"># compute custom loss (suppose one has 3 labels with different weights)</span> loss_fct = nn.CrossEntropyLoss(weight=torch.tensor([<span class="hljs-number">1.0</span>, <span class="hljs-number">2.0</span>, <span class="hljs-number">3.0</span>])) loss = loss_fct(logits.view(-<span class="hljs-number">1</span>, self.model.config.num_labels), labels.view(-<span class="hljs-number">1</span>)) <span class="hljs-keyword">return</span> (loss, outputs) <span class="hljs-keyword">if</span> return_outputs <span class="hljs-keyword">else</span> loss`}}),Ra=new Y({}),Wa=new x({props:{name:"class transformers.Trainer",anchor:"transformers.Trainer",parameters:[{name:"model",val:": typing.Union[transformers.modeling_utils.PreTrainedModel, torch.nn.modules.module.Module] = None"},{name:"args",val:": TrainingArguments = None"},{name:"data_collator",val:": typing.Optional[DataCollator] = None"},{name:"train_dataset",val:": typing.Optional[torch.utils.data.dataset.Dataset] = None"},{name:"eval_dataset",val:": typing.Optional[torch.utils.data.dataset.Dataset] = None"},{name:"tokenizer",val:": typing.Optional[transformers.tokenization_utils_base.PreTrainedTokenizerBase] = None"},{name:"model_init",val:": typing.Callable[[], transformers.modeling_utils.PreTrainedModel] = None"},{name:"compute_metrics",val:": typing.Union[typing.Callable[[transformers.trainer_utils.EvalPrediction], typing.Dict], NoneType] = None"},{name:"callbacks",val:": typing.Optional[typing.List[transformers.trainer_callback.TrainerCallback]] = None"},{name:"optimizers",val:": typing.Tuple[torch.optim.optimizer.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None)"},{name:"preprocess_logits_for_metrics",val:": typing.Callable[[torch.Tensor, torch.Tensor], torch.Tensor] = None"}],parametersDescription:[{anchor:"transformers.Trainer.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <code>torch.nn.Module</code>, <em>optional</em>) &#x2014; The model to train, evaluate or use for predictions. If not provided, a <code>model_init</code> must be passed.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p><a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> is optimized to work with the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> provided by the library. You can still use your own models defined as <code>torch.nn.Module</code> as long as they work the same way as the &#x1F917; Transformers models.</p> </div>`,name:"model"},{anchor:"transformers.Trainer.args",description:`<strong>args</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>, <em>optional</em>) &#x2014; The arguments to tweak for training. Will default to a basic instance of <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> with the <code>output_dir</code> set to a directory named <em>tmp_trainer</em> in the current directory if not provided.`,name:"args"},{anchor:"transformers.Trainer.data_collator",description:`<strong>data_collator</strong> (<code>DataCollator</code>, <em>optional</em>) &#x2014; The function to use to form a batch from a list of elements of <code>train_dataset</code> or <code>eval_dataset</code>. Will default to <a href="/docs/transformers/pr_19429/en/main_classes/data_collator#transformers.default_data_collator">default_data_collator()</a> if no <code>tokenizer</code> is provided, an instance of <a href="/docs/transformers/pr_19429/en/main_classes/data_collator#transformers.DataCollatorWithPadding">DataCollatorWithPadding</a> otherwise.`,name:"data_collator"},{anchor:"transformers.Trainer.train_dataset",description:`<strong>train_dataset</strong> (<code>torch.utils.data.Dataset</code> or <code>torch.utils.data.IterableDataset</code>, <em>optional</em>) &#x2014; The dataset to use for training. If it is a <a href="https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset" rel="nofollow">Dataset</a>, columns not accepted by the <code>model.forward()</code> method are automatically removed.</p> <p>Note that if it&#x2019;s a <code>torch.utils.data.IterableDataset</code> with some randomization and you are training in a distributed fashion, your iterable dataset should either use a internal attribute <code>generator</code> that is a <code>torch.Generator</code> for the randomization that must be identical on all processes (and the Trainer will manually set the seed of this <code>generator</code> at each epoch) or have a <code>set_epoch()</code> method that internally sets the seed of the RNGs used.`,name:"train_dataset"},{anchor:"transformers.Trainer.eval_dataset",description:`<strong>eval_dataset</strong> (Union[<code>torch.utils.data.Dataset</code>, Dict[str, <code>torch.utils.data.Dataset</code>]), <em>optional</em>) &#x2014; The dataset to use for evaluation. If it is a <a href="https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset" rel="nofollow">Dataset</a>, columns not accepted by the <code>model.forward()</code> method are automatically removed. If it is a dictionary, it will evaluate on each dataset prepending the dictionary key to the metric name.`,name:"eval_dataset"},{anchor:"transformers.Trainer.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase">PreTrainedTokenizerBase</a>, <em>optional</em>) &#x2014; The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an interrupted training or reuse the fine-tuned model.`,name:"tokenizer"},{anchor:"transformers.Trainer.model_init",description:`<strong>model_init</strong> (<code>Callable[[], PreTrainedModel]</code>, <em>optional</em>) &#x2014; A function that instantiates the model to be used. If provided, each call to <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.train">train()</a> will start from a new instance of the model as given by this function.</p> <p>The function may have zero argument, or a single one containing the optuna/Ray Tune/SigOpt trial object, to be able to choose different architectures according to hyper parameters (such as layer count, sizes of inner layers, dropout probabilities etc).`,name:"model_init"},{anchor:"transformers.Trainer.compute_metrics",description:`<strong>compute_metrics</strong> (<code>Callable[[EvalPrediction], Dict]</code>, <em>optional</em>) &#x2014; The function that will be used to compute metrics at evaluation. Must take a <a href="/docs/transformers/pr_19429/en/internal/trainer_utils#transformers.EvalPrediction">EvalPrediction</a> and return a dictionary string to metric values.`,name:"compute_metrics"},{anchor:"transformers.Trainer.callbacks",description:`<strong>callbacks</strong> (List of <a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a>, <em>optional</em>) &#x2014; A list of callbacks to customize the training loop. Will add those to the list of default callbacks detailed in <a href="callback">here</a>.</p> <p>If you want to remove one of the default callbacks used, use the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.remove_callback">Trainer.remove_callback()</a> method.`,name:"callbacks"},{anchor:"transformers.Trainer.optimizers",description:`<strong>optimizers</strong> (<code>Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]</code>, <em>optional</em>) &#x2014; A tuple containing the optimizer and the scheduler to use. Will default to an instance of <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> on your model and a scheduler given by <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.get_linear_schedule_with_warmup">get_linear_schedule_with_warmup()</a> controlled by <code>args</code>.`,name:"optimizers"},{anchor:"transformers.Trainer.preprocess_logits_for_metrics",description:`<strong>preprocess_logits_for_metrics</strong> (<code>Callable[[torch.Tensor, torch.Tensor], torch.Tensor]</code>, <em>optional</em>) &#x2014; A function that preprocess the logits right before caching them at each evaluation step. Must take two tensors, the logits and the labels, and return the logits once processed as desired. The modifications made by this function will be reflected in the predictions received by <code>compute_metrics</code>.</p> <p>Note that the labels (second parameter) will be <code>None</code> if the dataset does not have them.`,name:"preprocess_logits_for_metrics"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L209"}}),Ga=new x({props:{name:"add_callback",anchor:"transformers.Trainer.add_callback",parameters:[{name:"callback",val:""}],parametersDescription:[{anchor:"transformers.Trainer.add_callback.callback",description:`<strong>callback</strong> (<code>type</code> or <code>~transformer.TrainerCallback</code>) &#x2014; A <code>~transformer.TrainerCallback</code> class or an instance of a <code>~transformer.TrainerCallback</code>. In the first case, will instantiate a member of that class.`,name:"callback"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L664"}}),ja=new x({props:{name:"autocast_smart_context_manager",anchor:"transformers.Trainer.autocast_smart_context_manager",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L2441"}}),Ba=new x({props:{name:"compute_loss",anchor:"transformers.Trainer.compute_loss",parameters:[{name:"model",val:""},{name:"inputs",val:""},{name:"return_outputs",val:" = False"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L2508"}}),Va=new x({props:{name:"compute_loss_context_manager",anchor:"transformers.Trainer.compute_loss_context_manager",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L2424"}}),Ya=new x({props:{name:"create_model_card",anchor:"transformers.Trainer.create_model_card",parameters:[{name:"language",val:": typing.Optional[str] = None"},{name:"license",val:": typing.Optional[str] = None"},{name:"tags",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"model_name",val:": typing.Optional[str] = None"},{name:"finetuned_from",val:": typing.Optional[str] = None"},{name:"tasks",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"dataset_tags",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"dataset",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"dataset_args",val:": typing.Union[str, typing.List[str], NoneType] = None"}],parametersDescription:[{anchor:"transformers.Trainer.create_model_card.language",description:`<strong>language</strong> (<code>str</code>, <em>optional</em>) &#x2014; The language of the model (if applicable)`,name:"language"},{anchor:"transformers.Trainer.create_model_card.license",description:`<strong>license</strong> (<code>str</code>, <em>optional</em>) &#x2014; The license of the model. Will default to the license of the pretrained model used, if the original model given to the <code>Trainer</code> comes from a repo on the Hub.`,name:"license"},{anchor:"transformers.Trainer.create_model_card.tags",description:`<strong>tags</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014; Some tags to be included in the metadata of the model card.`,name:"tags"},{anchor:"transformers.Trainer.create_model_card.model_name",description:`<strong>model_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; The name of the model.`,name:"model_name"},{anchor:"transformers.Trainer.create_model_card.finetuned_from",description:`<strong>finetuned_from</strong> (<code>str</code>, <em>optional</em>) &#x2014; The name of the model used to fine-tune this one (if applicable). Will default to the name of the repo of the original model given to the <code>Trainer</code> (if it comes from the Hub).`,name:"finetuned_from"},{anchor:"transformers.Trainer.create_model_card.tasks",description:`<strong>tasks</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014; One or several task identifiers, to be included in the metadata of the model card.`,name:"tasks"},{anchor:"transformers.Trainer.create_model_card.dataset_tags",description:`<strong>dataset_tags</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014; One or several dataset tags, to be included in the metadata of the model card.`,name:"dataset_tags"},{anchor:"transformers.Trainer.create_model_card.dataset",description:`<strong>dataset</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014; One or several dataset identifiers, to be included in the metadata of the model card.`,name:"dataset"},{anchor:"transformers.Trainer.create_model_card.dataset_args",description:`<strong>dataset_args</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014; One or several dataset arguments, to be included in the metadata of the model card.`,name:"dataset_args"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L3292"}}),Ka=new x({props:{name:"create_optimizer",anchor:"transformers.Trainer.create_optimizer",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L1024"}}),Xa=new x({props:{name:"create_optimizer_and_scheduler",anchor:"transformers.Trainer.create_optimizer_and_scheduler",parameters:[{name:"num_training_steps",val:": int"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L1008"}}),Qa=new x({props:{name:"create_scheduler",anchor:"transformers.Trainer.create_scheduler",parameters:[{name:"num_training_steps",val:": int"},{name:"optimizer",val:": Optimizer = None"}],parametersDescription:[{anchor:"transformers.Trainer.create_scheduler.num_training_steps",description:"<strong>num_training_steps</strong> (int) &#x2014; The number of training steps to do.",name:"num_training_steps"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L1132"}}),en=new x({props:{name:"evaluate",anchor:"transformers.Trainer.evaluate",parameters:[{name:"eval_dataset",val:": typing.Optional[torch.utils.data.dataset.Dataset] = None"},{name:"ignore_keys",val:": typing.Optional[typing.List[str]] = None"},{name:"metric_key_prefix",val:": str = 'eval'"}],parametersDescription:[{anchor:"transformers.Trainer.evaluate.eval_dataset",description:`<strong>eval_dataset</strong> (<code>Dataset</code>, <em>optional</em>) &#x2014; Pass a dataset if you wish to override <code>self.eval_dataset</code>. If it is a <a href="https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset" rel="nofollow">Dataset</a>, columns not accepted by the <code>model.forward()</code> method are automatically removed. It must implement the <code>__len__</code> method.`,name:"eval_dataset"},{anchor:"transformers.Trainer.evaluate.ignore_keys",description:`<strong>ignore_keys</strong> (<code>Lst[str]</code>, <em>optional</em>) &#x2014; A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions.`,name:"ignore_keys"},{anchor:"transformers.Trainer.evaluate.metric_key_prefix",description:`<strong>metric_key_prefix</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;eval&quot;</code>) &#x2014; An optional prefix to be used as the metrics key prefix. For example the metrics &#x201C;bleu&#x201D; will be named &#x201C;eval_bleu&#x201D; if the prefix is &#x201C;eval&#x201D; (default)`,name:"metric_key_prefix"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L2737",returnDescription:` <p>A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The dictionary also contains the epoch number which comes from the training state.</p> `}}),on=new x({props:{name:"evaluation_loop",anchor:"transformers.Trainer.evaluation_loop",parameters:[{name:"dataloader",val:": DataLoader"},{name:"description",val:": str"},{name:"prediction_loss_only",val:": typing.Optional[bool] = None"},{name:"ignore_keys",val:": typing.Optional[typing.List[str]] = None"},{name:"metric_key_prefix",val:": str = 'eval'"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L2866"}}),rn=new x({props:{name:"floating_point_ops",anchor:"transformers.Trainer.floating_point_ops",parameters:[{name:"inputs",val:": typing.Dict[str, typing.Union[torch.Tensor, typing.Any]]"}],parametersDescription:[{anchor:"transformers.Trainer.floating_point_ops.inputs",description:`<strong>inputs</strong> (<code>Dict[str, Union[torch.Tensor, Any]]</code>) &#x2014; The inputs and targets of the model.`,name:"inputs"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L3223",returnDescription:` <p>The number of floating-point operations.</p> `,returnType:` <p><code>int</code></p> `}}),nn=new x({props:{name:"get_eval_dataloader",anchor:"transformers.Trainer.get_eval_dataloader",parameters:[{name:"eval_dataset",val:": typing.Optional[torch.utils.data.dataset.Dataset] = None"}],parametersDescription:[{anchor:"transformers.Trainer.get_eval_dataloader.eval_dataset",description:`<strong>eval_dataset</strong> (<code>torch.utils.data.Dataset</code>, <em>optional</em>) &#x2014; If provided, will override <code>self.eval_dataset</code>. If it is a <a href="https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset" rel="nofollow">Dataset</a>, columns not accepted by the <code>model.forward()</code> method are automatically removed. It must implement <code>__len__</code>.`,name:"eval_dataset"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L910"}}),ln=new x({props:{name:"get_optimizer_cls_and_kwargs",anchor:"transformers.Trainer.get_optimizer_cls_and_kwargs",parameters:[{name:"args",val:": TrainingArguments"}],parametersDescription:[{anchor:"transformers.Trainer.get_optimizer_cls_and_kwargs.args",description:`<strong>args</strong> (<code>transformers.training_args.TrainingArguments</code>) &#x2014; The training arguments for the training session.`,name:"args"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L1072"}}),dn=new x({props:{name:"get_test_dataloader",anchor:"transformers.Trainer.get_test_dataloader",parameters:[{name:"test_dataset",val:": Dataset"}],parametersDescription:[{anchor:"transformers.Trainer.get_test_dataloader.test_dataset",description:`<strong>test_dataset</strong> (<code>torch.utils.data.Dataset</code>, <em>optional</em>) &#x2014; The test dataset to use. If it is a <a href="https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset" rel="nofollow">Dataset</a>, columns not accepted by the <code>model.forward()</code> method are automatically removed. It must implement <code>__len__</code>.`,name:"test_dataset"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L960"}}),pn=new x({props:{name:"get_train_dataloader",anchor:"transformers.Trainer.get_train_dataloader",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L831"}}),hn=new x({props:{name:"hyperparameter_search",anchor:"transformers.Trainer.hyperparameter_search",parameters:[{name:"hp_space",val:": typing.Union[typing.Callable[[ForwardRef('optuna.Trial')], typing.Dict[str, float]], NoneType] = None"},{name:"compute_objective",val:": typing.Union[typing.Callable[[typing.Dict[str, float]], float], NoneType] = None"},{name:"n_trials",val:": int = 20"},{name:"direction",val:": str = 'minimize'"},{name:"backend",val:": typing.Union[ForwardRef('str'), transformers.trainer_utils.HPSearchBackend, NoneType] = None"},{name:"hp_name",val:": typing.Union[typing.Callable[[ForwardRef('optuna.Trial')], str], NoneType] = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.Trainer.hyperparameter_search.hp_space",description:`<strong>hp_space</strong> (<code>Callable[[&quot;optuna.Trial&quot;], Dict[str, float]]</code>, <em>optional</em>) &#x2014; A function that defines the hyperparameter search space. Will default to <code>default_hp_space_optuna()</code> or <code>default_hp_space_ray()</code> or <code>default_hp_space_sigopt()</code> depending on your backend.`,name:"hp_space"},{anchor:"transformers.Trainer.hyperparameter_search.compute_objective",description:`<strong>compute_objective</strong> (<code>Callable[[Dict[str, float]], float]</code>, <em>optional</em>) &#x2014; A function computing the objective to minimize or maximize from the metrics returned by the <code>evaluate</code> method. Will default to <code>default_compute_objective()</code>.`,name:"compute_objective"},{anchor:"transformers.Trainer.hyperparameter_search.n_trials",description:`<strong>n_trials</strong> (<code>int</code>, <em>optional</em>, defaults to 100) &#x2014; The number of trial runs to test.`,name:"n_trials"},{anchor:"transformers.Trainer.hyperparameter_search.direction",description:`<strong>direction</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;minimize&quot;</code>) &#x2014; Whether to optimize greater or lower objects. Can be <code>&quot;minimize&quot;</code> or <code>&quot;maximize&quot;</code>, you should pick <code>&quot;minimize&quot;</code> when optimizing the validation loss, <code>&quot;maximize&quot;</code> when optimizing one or several metrics.`,name:"direction"},{anchor:"transformers.Trainer.hyperparameter_search.backend",description:`<strong>backend</strong> (<code>str</code> or <code>~training_utils.HPSearchBackend</code>, <em>optional</em>) &#x2014; The backend to use for hyperparameter search. Will default to optuna or Ray Tune or SigOpt, depending on which one is installed. If all are installed, will default to optuna.`,name:"backend"},{anchor:"transformers.Trainer.hyperparameter_search.hp_name",description:`<strong>hp_name</strong> (<code>Callable[[&quot;optuna.Trial&quot;], str]]</code>, <em>optional</em>) &#x2014; A function that defines the trial/run name. Will default to None.`,name:"hp_name"},{anchor:"transformers.Trainer.hyperparameter_search.kwargs",description:`<strong>kwargs</strong> (<code>Dict[str, Any]</code>, <em>optional</em>) &#x2014; Additional keyword arguments passed along to <code>optuna.create_study</code> or <code>ray.tune.run</code>. For more information see:</p> <ul> <li>the documentation of <a href="https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html" rel="nofollow">optuna.create_study</a></li> <li>the documentation of <a href="https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run" rel="nofollow">tune.run</a></li> <li>the documentation of <a href="https://app.sigopt.com/docs/endpoints/experiments/create" rel="nofollow">sigopt</a></li> </ul>`,name:"kwargs"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L2278",returnDescription:` <p>All the information about the best run.</p> `,returnType:` <p><code>trainer_utils.BestRun</code></p> `}}),pr=new gp({props:{warning:!0,$$slots:{default:[lM]},$$scope:{ctx:Z}}}),un=new x({props:{name:"init_git_repo",anchor:"transformers.Trainer.init_git_repo",parameters:[{name:"at_init",val:": bool = False"}],parametersDescription:[{anchor:"transformers.Trainer.init_git_repo.at_init",description:`<strong>at_init</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether this function is called before any training or not. If <code>self.args.overwrite_output_dir</code> is <code>True</code> and <code>at_init</code> is <code>True</code>, the path to the repo (which is <code>self.args.output_dir</code>) might be wiped out.`,name:"at_init"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L3241"}}),gn=new x({props:{name:"is_local_process_zero",anchor:"transformers.Trainer.is_local_process_zero",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L2540"}}),_n=new x({props:{name:"is_world_process_zero",anchor:"transformers.Trainer.is_world_process_zero",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L2547"}}),bn=new x({props:{name:"log",anchor:"transformers.Trainer.log",parameters:[{name:"logs",val:": typing.Dict[str, float]"}],parametersDescription:[{anchor:"transformers.Trainer.log.logs",description:`<strong>logs</strong> (<code>Dict[str, float]</code>) &#x2014; The values to log.`,name:"logs"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L2373"}}),wn=new x({props:{name:"log_metrics",anchor:"transformers.Trainer.log_metrics",parameters:[{name:"split",val:""},{name:"metrics",val:""}],parametersDescription:[{anchor:"transformers.Trainer.log_metrics.split",description:`<strong>split</strong> (<code>str</code>) &#x2014; Mode/split name: one of <code>train</code>, <code>eval</code>, <code>test</code>`,name:"split"},{anchor:"transformers.Trainer.log_metrics.metrics",description:`<strong>metrics</strong> (<code>Dict[str, float]</code>) &#x2014; The metrics returned from train/evaluate/predictmetrics: metrics dict`,name:"metrics"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_pt_utils.py#L874"}}),fr=new sM({props:{anchor:"transformers.Trainer.log_metrics.example",$$slots:{default:[dM]},$$scope:{ctx:Z}}}),$n=new x({props:{name:"metrics_format",anchor:"transformers.Trainer.metrics_format",parameters:[{name:"metrics",val:": typing.Dict[str, float]"}],parametersDescription:[{anchor:"transformers.Trainer.metrics_format.metrics",description:`<strong>metrics</strong> (<code>Dict[str, float]</code>) &#x2014; The metrics returned from train/evaluate/predict`,name:"metrics"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_pt_utils.py#L848",returnDescription:` <p>The reformatted metrics</p> `,returnType:` <p>metrics (<code>Dict[str, float]</code>)</p> `}}),xn=new x({props:{name:"num_examples",anchor:"transformers.Trainer.num_examples",parameters:[{name:"dataloader",val:": DataLoader"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L1149"}}),An=new x({props:{name:"pop_callback",anchor:"transformers.Trainer.pop_callback",parameters:[{name:"callback",val:""}],parametersDescription:[{anchor:"transformers.Trainer.pop_callback.callback",description:`<strong>callback</strong> (<code>type</code> or <code>~transformer.TrainerCallback</code>) &#x2014; A <code>~transformer.TrainerCallback</code> class or an instance of a <code>~transformer.TrainerCallback</code>. In the first case, will pop the first member of that class found in the list of callbacks.`,name:"callback"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L675",returnDescription:` <p>The callback removed, if found.</p> `,returnType:` <p><code>~transformer.TrainerCallback</code></p> `}}),Sn=new x({props:{name:"predict",anchor:"transformers.Trainer.predict",parameters:[{name:"test_dataset",val:": Dataset"},{name:"ignore_keys",val:": typing.Optional[typing.List[str]] = None"},{name:"metric_key_prefix",val:": str = 'test'"}],parametersDescription:[{anchor:"transformers.Trainer.predict.test_dataset",description:`<strong>test_dataset</strong> (<code>Dataset</code>) &#x2014; Dataset to run the predictions on. If it is an <code>datasets.Dataset</code>, columns not accepted by the <code>model.forward()</code> method are automatically removed. Has to implement the method <code>__len__</code>`,name:"test_dataset"},{anchor:"transformers.Trainer.predict.ignore_keys",description:`<strong>ignore_keys</strong> (<code>Lst[str]</code>, <em>optional</em>) &#x2014; A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions.`,name:"ignore_keys"},{anchor:"transformers.Trainer.predict.metric_key_prefix",description:`<strong>metric_key_prefix</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;test&quot;</code>) &#x2014; An optional prefix to be used as the metrics key prefix. For example the metrics &#x201C;bleu&#x201D; will be named &#x201C;test_bleu&#x201D; if the prefix is &#x201C;test&#x201D; (default)`,name:"metric_key_prefix"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L2806"}}),vr=new gp({props:{$$slots:{default:[cM]},$$scope:{ctx:Z}}}),Cn=new x({props:{name:"prediction_loop",anchor:"transformers.Trainer.prediction_loop",parameters:[{name:"dataloader",val:": DataLoader"},{name:"description",val:": str"},{name:"prediction_loss_only",val:": typing.Optional[bool] = None"},{name:"ignore_keys",val:": typing.Optional[typing.List[str]] = None"},{name:"metric_key_prefix",val:": str = 'eval'"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L3449"}}),In=new x({props:{name:"prediction_step",anchor:"transformers.Trainer.prediction_step",parameters:[{name:"model",val:": Module"},{name:"inputs",val:": typing.Dict[str, typing.Union[torch.Tensor, typing.Any]]"},{name:"prediction_loss_only",val:": bool"},{name:"ignore_keys",val:": typing.Optional[typing.List[str]] = None"}],parametersDescription:[{anchor:"transformers.Trainer.prediction_step.model",description:`<strong>model</strong> (<code>nn.Module</code>) &#x2014; The model to evaluate.`,name:"model"},{anchor:"transformers.Trainer.prediction_step.inputs",description:`<strong>inputs</strong> (<code>Dict[str, Union[torch.Tensor, Any]]</code>) &#x2014; The inputs and targets of the model.</p> <p>The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument <code>labels</code>. Check your model&#x2019;s documentation for all accepted arguments.`,name:"inputs"},{anchor:"transformers.Trainer.prediction_step.prediction_loss_only",description:`<strong>prediction_loss_only</strong> (<code>bool</code>) &#x2014; Whether or not to return the loss only.`,name:"prediction_loss_only"},{anchor:"transformers.Trainer.prediction_step.ignore_keys",description:`<strong>ignore_keys</strong> (<code>Lst[str]</code>, <em>optional</em>) &#x2014; A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions.`,name:"ignore_keys"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L3126",returnDescription:` <p>A tuple with the loss, logits and labels (each being optional).</p> `,returnType:` <p>Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]</p> `}}),Un=new x({props:{name:"push_to_hub",anchor:"transformers.Trainer.push_to_hub",parameters:[{name:"commit_message",val:": typing.Optional[str] = 'End of training'"},{name:"blocking",val:": bool = True"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.Trainer.push_to_hub.commit_message",description:`<strong>commit_message</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;End of training&quot;</code>) &#x2014; Message to commit while pushing.`,name:"commit_message"},{anchor:"transformers.Trainer.push_to_hub.blocking",description:`<strong>blocking</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether the function should return only when the <code>git push</code> has finished. kwargs &#x2014; Additional keyword arguments passed along to <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.create_model_card">create_model_card()</a>.`,name:"blocking"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L3390",returnDescription:` <p>The url of the commit of your model in the given repository if <code>blocking=False</code>, a tuple with the url of the commit and an object to track the progress of the commit if <code>blocking=True</code></p> `}}),Nn=new x({props:{name:"remove_callback",anchor:"transformers.Trainer.remove_callback",parameters:[{name:"callback",val:""}],parametersDescription:[{anchor:"transformers.Trainer.remove_callback.callback",description:`<strong>callback</strong> (<code>type</code> or <code>~transformer.TrainerCallback</code>) &#x2014; A <code>~transformer.TrainerCallback</code> class or an instance of a <code>~transformer.TrainerCallback</code>. In the first case, will remove the first member of that class found in the list of callbacks.`,name:"callback"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L691"}}),Fn=new x({props:{name:"save_metrics",anchor:"transformers.Trainer.save_metrics",parameters:[{name:"split",val:""},{name:"metrics",val:""},{name:"combined",val:" = True"}],parametersDescription:[{anchor:"transformers.Trainer.save_metrics.split",description:`<strong>split</strong> (<code>str</code>) &#x2014; Mode/split name: one of <code>train</code>, <code>eval</code>, <code>test</code>, <code>all</code>`,name:"split"},{anchor:"transformers.Trainer.save_metrics.metrics",description:`<strong>metrics</strong> (<code>Dict[str, float]</code>) &#x2014; The metrics returned from train/evaluate/predict`,name:"metrics"},{anchor:"transformers.Trainer.save_metrics.combined",description:`<strong>combined</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Creates combined metrics by updating <code>all_results.json</code> with metrics of this call`,name:"combined"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_pt_utils.py#L964"}}),Wn=new x({props:{name:"save_model",anchor:"transformers.Trainer.save_model",parameters:[{name:"output_dir",val:": typing.Optional[str] = None"},{name:"_internal_call",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L2559"}}),Mn=new x({props:{name:"save_state",anchor:"transformers.Trainer.save_state",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_pt_utils.py#L1002"}}),jn=new x({props:{name:"torchdynamo_smart_context_manager",anchor:"transformers.Trainer.torchdynamo_smart_context_manager",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L2435"}}),Bn=new x({props:{name:"train",anchor:"transformers.Trainer.train",parameters:[{name:"resume_from_checkpoint",val:": typing.Union[str, bool, NoneType] = None"},{name:"trial",val:": typing.Union[ForwardRef('optuna.Trial'), typing.Dict[str, typing.Any]] = None"},{name:"ignore_keys_for_eval",val:": typing.Optional[typing.List[str]] = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.Trainer.train.resume_from_checkpoint",description:`<strong>resume_from_checkpoint</strong> (<code>str</code> or <code>bool</code>, <em>optional</em>) &#x2014; If a <code>str</code>, local path to a saved checkpoint as saved by a previous instance of <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>. If a <code>bool</code> and equals <code>True</code>, load the last checkpoint in <em>args.output_dir</em> as saved by a previous instance of <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>. If present, training will resume from the model/optimizer/scheduler states loaded here.`,name:"resume_from_checkpoint"},{anchor:"transformers.Trainer.train.trial",description:`<strong>trial</strong> (<code>optuna.Trial</code> or <code>Dict[str, Any]</code>, <em>optional</em>) &#x2014; The trial run or the hyperparameter dictionary for hyperparameter search.`,name:"trial"},{anchor:"transformers.Trainer.train.ignore_keys_for_eval",description:`<strong>ignore_keys_for_eval</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions for evaluation during the training. kwargs &#x2014; Additional keyword arguments used to hide deprecated arguments`,name:"ignore_keys_for_eval"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L1421"}}),Vn=new x({props:{name:"training_step",anchor:"transformers.Trainer.training_step",parameters:[{name:"model",val:": Module"},{name:"inputs",val:": typing.Dict[str, typing.Union[torch.Tensor, typing.Any]]"}],parametersDescription:[{anchor:"transformers.Trainer.training_step.model",description:`<strong>model</strong> (<code>nn.Module</code>) &#x2014; The model to train.`,name:"model"},{anchor:"transformers.Trainer.training_step.inputs",description:`<strong>inputs</strong> (<code>Dict[str, Union[torch.Tensor, Any]]</code>) &#x2014; The inputs and targets of the model.</p> <p>The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument <code>labels</code>. Check your model&#x2019;s documentation for all accepted arguments.`,name:"inputs"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L2460",returnDescription:` <p>The tensor with training loss on this batch.</p> `,returnType:` <p><code>torch.Tensor</code></p> `}}),Yn=new Y({}),Zn=new x({props:{name:"class transformers.Seq2SeqTrainer",anchor:"transformers.Seq2SeqTrainer",parameters:[{name:"model",val:": typing.Union[transformers.modeling_utils.PreTrainedModel, torch.nn.modules.module.Module] = None"},{name:"args",val:": TrainingArguments = None"},{name:"data_collator",val:": typing.Optional[DataCollator] = None"},{name:"train_dataset",val:": typing.Optional[torch.utils.data.dataset.Dataset] = None"},{name:"eval_dataset",val:": typing.Optional[torch.utils.data.dataset.Dataset] = None"},{name:"tokenizer",val:": typing.Optional[transformers.tokenization_utils_base.PreTrainedTokenizerBase] = None"},{name:"model_init",val:": typing.Callable[[], transformers.modeling_utils.PreTrainedModel] = None"},{name:"compute_metrics",val:": typing.Union[typing.Callable[[transformers.trainer_utils.EvalPrediction], typing.Dict], NoneType] = None"},{name:"callbacks",val:": typing.Optional[typing.List[transformers.trainer_callback.TrainerCallback]] = None"},{name:"optimizers",val:": typing.Tuple[torch.optim.optimizer.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None)"},{name:"preprocess_logits_for_metrics",val:": typing.Callable[[torch.Tensor, torch.Tensor], torch.Tensor] = None"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_seq2seq.py#L30"}}),Kn=new x({props:{name:"evaluate",anchor:"transformers.Seq2SeqTrainer.evaluate",parameters:[{name:"eval_dataset",val:": typing.Optional[torch.utils.data.dataset.Dataset] = None"},{name:"ignore_keys",val:": typing.Optional[typing.List[str]] = None"},{name:"metric_key_prefix",val:": str = 'eval'"},{name:"**gen_kwargs",val:""}],parametersDescription:[{anchor:"transformers.Seq2SeqTrainer.evaluate.eval_dataset",description:`<strong>eval_dataset</strong> (<code>Dataset</code>, <em>optional</em>) &#x2014; Pass a dataset if you wish to override <code>self.eval_dataset</code>. If it is an <a href="https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset" rel="nofollow">Dataset</a>, columns not accepted by the <code>model.forward()</code> method are automatically removed. It must implement the <code>__len__</code> method.`,name:"eval_dataset"},{anchor:"transformers.Seq2SeqTrainer.evaluate.ignore_keys",description:`<strong>ignore_keys</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions.`,name:"ignore_keys"},{anchor:"transformers.Seq2SeqTrainer.evaluate.metric_key_prefix",description:`<strong>metric_key_prefix</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;eval&quot;</code>) &#x2014; An optional prefix to be used as the metrics key prefix. For example the metrics &#x201C;bleu&#x201D; will be named &#x201C;eval_bleu&#x201D; if the prefix is <code>&quot;eval&quot;</code> (default)`,name:"metric_key_prefix"},{anchor:"transformers.Seq2SeqTrainer.evaluate.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum target length to use when predicting with the generate method.`,name:"max_length"},{anchor:"transformers.Seq2SeqTrainer.evaluate.num_beams",description:`<strong>num_beams</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of beams for beam search that will be used when predicting with the generate method. 1 means no beam search. gen_kwargs &#x2014; Additional <code>generate</code> specific kwargs.`,name:"num_beams"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_seq2seq.py#L31",returnDescription:` <p>A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The dictionary also contains the epoch number which comes from the training state.</p> `}}),Xn=new x({props:{name:"predict",anchor:"transformers.Seq2SeqTrainer.predict",parameters:[{name:"test_dataset",val:": Dataset"},{name:"ignore_keys",val:": typing.Optional[typing.List[str]] = None"},{name:"metric_key_prefix",val:": str = 'test'"},{name:"**gen_kwargs",val:""}],parametersDescription:[{anchor:"transformers.Seq2SeqTrainer.predict.test_dataset",description:`<strong>test_dataset</strong> (<code>Dataset</code>) &#x2014; Dataset to run the predictions on. If it is a <a href="https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset" rel="nofollow">Dataset</a>, columns not accepted by the <code>model.forward()</code> method are automatically removed. Has to implement the method <code>__len__</code>`,name:"test_dataset"},{anchor:"transformers.Seq2SeqTrainer.predict.ignore_keys",description:`<strong>ignore_keys</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions.`,name:"ignore_keys"},{anchor:"transformers.Seq2SeqTrainer.predict.metric_key_prefix",description:`<strong>metric_key_prefix</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;eval&quot;</code>) &#x2014; An optional prefix to be used as the metrics key prefix. For example the metrics &#x201C;bleu&#x201D; will be named &#x201C;eval_bleu&#x201D; if the prefix is <code>&quot;eval&quot;</code> (default)`,name:"metric_key_prefix"},{anchor:"transformers.Seq2SeqTrainer.predict.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum target length to use when predicting with the generate method.`,name:"max_length"},{anchor:"transformers.Seq2SeqTrainer.predict.num_beams",description:`<strong>num_beams</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of beams for beam search that will be used when predicting with the generate method. 1 means no beam search. gen_kwargs &#x2014; Additional <code>generate</code> specific kwargs.`,name:"num_beams"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_seq2seq.py#L80"}}),$r=new gp({props:{$$slots:{default:[pM]},$$scope:{ctx:Z}}}),ts=new Y({}),os=new x({props:{name:"class transformers.TrainingArguments",anchor:"transformers.TrainingArguments",parameters:[{name:"output_dir",val:": str"},{name:"overwrite_output_dir",val:": bool = False"},{name:"do_train",val:": bool = False"},{name:"do_eval",val:": bool = False"},{name:"do_predict",val:": bool = False"},{name:"evaluation_strategy",val:": typing.Union[transformers.trainer_utils.IntervalStrategy, str] = 'no'"},{name:"prediction_loss_only",val:": bool = False"},{name:"per_device_train_batch_size",val:": int = 8"},{name:"per_device_eval_batch_size",val:": int = 8"},{name:"per_gpu_train_batch_size",val:": typing.Optional[int] = None"},{name:"per_gpu_eval_batch_size",val:": typing.Optional[int] = None"},{name:"gradient_accumulation_steps",val:": int = 1"},{name:"eval_accumulation_steps",val:": typing.Optional[int] = None"},{name:"eval_delay",val:": typing.Optional[float] = 0"},{name:"learning_rate",val:": float = 5e-05"},{name:"weight_decay",val:": float = 0.0"},{name:"adam_beta1",val:": float = 0.9"},{name:"adam_beta2",val:": float = 0.999"},{name:"adam_epsilon",val:": float = 1e-08"},{name:"max_grad_norm",val:": float = 1.0"},{name:"num_train_epochs",val:": float = 3.0"},{name:"max_steps",val:": int = -1"},{name:"lr_scheduler_type",val:": typing.Union[transformers.trainer_utils.SchedulerType, str] = 'linear'"},{name:"warmup_ratio",val:": float = 0.0"},{name:"warmup_steps",val:": int = 0"},{name:"log_level",val:": typing.Optional[str] = 'passive'"},{name:"log_level_replica",val:": typing.Optional[str] = 'passive'"},{name:"log_on_each_node",val:": bool = True"},{name:"logging_dir",val:": typing.Optional[str] = None"},{name:"logging_strategy",val:": typing.Union[transformers.trainer_utils.IntervalStrategy, str] = 'steps'"},{name:"logging_first_step",val:": bool = False"},{name:"logging_steps",val:": int = 500"},{name:"logging_nan_inf_filter",val:": bool = True"},{name:"save_strategy",val:": typing.Union[transformers.trainer_utils.IntervalStrategy, str] = 'steps'"},{name:"save_steps",val:": int = 500"},{name:"save_total_limit",val:": typing.Optional[int] = None"},{name:"save_on_each_node",val:": bool = False"},{name:"no_cuda",val:": bool = False"},{name:"use_mps_device",val:": bool = False"},{name:"seed",val:": int = 42"},{name:"data_seed",val:": typing.Optional[int] = None"},{name:"jit_mode_eval",val:": bool = False"},{name:"use_ipex",val:": bool = False"},{name:"bf16",val:": bool = False"},{name:"fp16",val:": bool = False"},{name:"fp16_opt_level",val:": str = 'O1'"},{name:"half_precision_backend",val:": str = 'auto'"},{name:"bf16_full_eval",val:": bool = False"},{name:"fp16_full_eval",val:": bool = False"},{name:"tf32",val:": typing.Optional[bool] = None"},{name:"local_rank",val:": int = -1"},{name:"xpu_backend",val:": typing.Optional[str] = None"},{name:"tpu_num_cores",val:": typing.Optional[int] = None"},{name:"tpu_metrics_debug",val:": bool = False"},{name:"debug",val:": str = ''"},{name:"dataloader_drop_last",val:": bool = False"},{name:"eval_steps",val:": typing.Optional[int] = None"},{name:"dataloader_num_workers",val:": int = 0"},{name:"past_index",val:": int = -1"},{name:"run_name",val:": typing.Optional[str] = None"},{name:"disable_tqdm",val:": typing.Optional[bool] = None"},{name:"remove_unused_columns",val:": typing.Optional[bool] = True"},{name:"label_names",val:": typing.Optional[typing.List[str]] = None"},{name:"load_best_model_at_end",val:": typing.Optional[bool] = False"},{name:"metric_for_best_model",val:": typing.Optional[str] = None"},{name:"greater_is_better",val:": typing.Optional[bool] = None"},{name:"ignore_data_skip",val:": bool = False"},{name:"sharded_ddp",val:": str = ''"},{name:"fsdp",val:": str = ''"},{name:"fsdp_min_num_params",val:": int = 0"},{name:"fsdp_transformer_layer_cls_to_wrap",val:": typing.Optional[str] = None"},{name:"deepspeed",val:": typing.Optional[str] = None"},{name:"label_smoothing_factor",val:": float = 0.0"},{name:"optim",val:": typing.Union[transformers.training_args.OptimizerNames, str] = 'adamw_hf'"},{name:"adafactor",val:": bool = False"},{name:"group_by_length",val:": bool = False"},{name:"length_column_name",val:": typing.Optional[str] = 'length'"},{name:"report_to",val:": typing.Optional[typing.List[str]] = None"},{name:"ddp_find_unused_parameters",val:": typing.Optional[bool] = None"},{name:"ddp_bucket_cap_mb",val:": typing.Optional[int] = None"},{name:"dataloader_pin_memory",val:": bool = True"},{name:"skip_memory_metrics",val:": bool = True"},{name:"use_legacy_prediction_loop",val:": bool = False"},{name:"push_to_hub",val:": bool = False"},{name:"resume_from_checkpoint",val:": typing.Optional[str] = None"},{name:"hub_model_id",val:": typing.Optional[str] = None"},{name:"hub_strategy",val:": typing.Union[transformers.trainer_utils.HubStrategy, str] = 'every_save'"},{name:"hub_token",val:": typing.Optional[str] = None"},{name:"hub_private_repo",val:": bool = False"},{name:"gradient_checkpointing",val:": bool = False"},{name:"include_inputs_for_metrics",val:": bool = False"},{name:"fp16_backend",val:": str = 'auto'"},{name:"push_to_hub_model_id",val:": typing.Optional[str] = None"},{name:"push_to_hub_organization",val:": typing.Optional[str] = None"},{name:"push_to_hub_token",val:": typing.Optional[str] = None"},{name:"mp_parameters",val:": str = ''"},{name:"auto_find_batch_size",val:": bool = False"},{name:"full_determinism",val:": bool = False"},{name:"torchdynamo",val:": typing.Optional[str] = None"},{name:"ray_scope",val:": typing.Optional[str] = 'last'"},{name:"ddp_timeout",val:": typing.Optional[int] = 1800"}],parametersDescription:[{anchor:"transformers.TrainingArguments.output_dir",description:`<strong>output_dir</strong> (<code>str</code>) &#x2014; The output directory where the model predictions and checkpoints will be written.`,name:"output_dir"},{anchor:"transformers.TrainingArguments.overwrite_output_dir",description:`<strong>overwrite_output_dir</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If <code>True</code>, overwrite the content of the output directory. Use this to continue training if <code>output_dir</code> points to a checkpoint directory.`,name:"overwrite_output_dir"},{anchor:"transformers.TrainingArguments.do_train",description:`<strong>do_train</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to run training or not. This argument is not directly used by <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/main/examples" rel="nofollow">example scripts</a> for more details.`,name:"do_train"},{anchor:"transformers.TrainingArguments.do_eval",description:`<strong>do_eval</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to run evaluation on the validation set or not. Will be set to <code>True</code> if <code>evaluation_strategy</code> is different from <code>&quot;no&quot;</code>. This argument is not directly used by <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/main/examples" rel="nofollow">example scripts</a> for more details.`,name:"do_eval"},{anchor:"transformers.TrainingArguments.do_predict",description:`<strong>do_predict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to run predictions on the test set or not. This argument is not directly used by <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/main/examples" rel="nofollow">example scripts</a> for more details.`,name:"do_predict"},{anchor:"transformers.TrainingArguments.evaluation_strategy",description:`<strong>evaluation_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/trainer_utils#transformers.IntervalStrategy">IntervalStrategy</a>, <em>optional</em>, defaults to <code>&quot;no&quot;</code>) &#x2014; The evaluation strategy to adopt during training. Possible values are:</p> <ul> <li><code>&quot;no&quot;</code>: No evaluation is done during training.</li> <li><code>&quot;steps&quot;</code>: Evaluation is done (and logged) every <code>eval_steps</code>.</li> <li><code>&quot;epoch&quot;</code>: Evaluation is done at the end of each epoch.</li> </ul>`,name:"evaluation_strategy"},{anchor:"transformers.TrainingArguments.prediction_loss_only",description:`<strong>prediction_loss_only</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; When performing evaluation and generating predictions, only returns the loss.`,name:"prediction_loss_only"},{anchor:"transformers.TrainingArguments.per_device_train_batch_size",description:`<strong>per_device_train_batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; The batch size per GPU/TPU core/CPU for training.`,name:"per_device_train_batch_size"},{anchor:"transformers.TrainingArguments.per_device_eval_batch_size",description:`<strong>per_device_eval_batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; The batch size per GPU/TPU core/CPU for evaluation.`,name:"per_device_eval_batch_size"},{anchor:"transformers.TrainingArguments.gradient_accumulation_steps",description:`<strong>gradient_accumulation_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of updates steps to accumulate the gradients for, before performing a backward/update pass.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>When using gradient accumulation, one step is counted as one step with backward pass. Therefore, logging, evaluation, save will be conducted every <code>gradient_accumulation_steps * xxx_step</code> training examples.</p> </div>`,name:"gradient_accumulation_steps"},{anchor:"transformers.TrainingArguments.eval_accumulation_steps",description:`<strong>eval_accumulation_steps</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of predictions steps to accumulate the output tensors for, before moving the results to the CPU. If left unset, the whole predictions are accumulated on GPU/TPU before being moved to the CPU (faster but requires more memory).`,name:"eval_accumulation_steps"},{anchor:"transformers.TrainingArguments.eval_delay",description:`<strong>eval_delay</strong> (<code>float</code>, <em>optional</em>) &#x2014; Number of epochs or steps to wait for before the first evaluation can be performed, depending on the evaluation_strategy.`,name:"eval_delay"},{anchor:"transformers.TrainingArguments.learning_rate",description:`<strong>learning_rate</strong> (<code>float</code>, <em>optional</em>, defaults to 5e-5) &#x2014; The initial learning rate for <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.`,name:"learning_rate"},{anchor:"transformers.TrainingArguments.weight_decay",description:`<strong>weight_decay</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The weight decay to apply (if not zero) to all layers except all bias and LayerNorm weights in <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.`,name:"weight_decay"},{anchor:"transformers.TrainingArguments.adam_beta1",description:`<strong>adam_beta1</strong> (<code>float</code>, <em>optional</em>, defaults to 0.9) &#x2014; The beta1 hyperparameter for the <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.`,name:"adam_beta1"},{anchor:"transformers.TrainingArguments.adam_beta2",description:`<strong>adam_beta2</strong> (<code>float</code>, <em>optional</em>, defaults to 0.999) &#x2014; The beta2 hyperparameter for the <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.`,name:"adam_beta2"},{anchor:"transformers.TrainingArguments.adam_epsilon",description:`<strong>adam_epsilon</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-8) &#x2014; The epsilon hyperparameter for the <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.`,name:"adam_epsilon"},{anchor:"transformers.TrainingArguments.max_grad_norm",description:`<strong>max_grad_norm</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Maximum gradient norm (for gradient clipping).`,name:"max_grad_norm"},{anchor:"transformers.TrainingArguments.num_train_epochs(float,",description:`<strong>num_train_epochs(<code>float</code>,</strong> <em>optional</em>, defaults to 3.0) &#x2014; Total number of training epochs to perform (if not an integer, will perform the decimal part percents of the last epoch before stopping training).`,name:"num_train_epochs(float,"},{anchor:"transformers.TrainingArguments.max_steps",description:`<strong>max_steps</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; If set to a positive number, the total number of training steps to perform. Overrides <code>num_train_epochs</code>. In case of using a finite iterable dataset the training may stop before reaching the set number of steps when all data is exhausted`,name:"max_steps"},{anchor:"transformers.TrainingArguments.lr_scheduler_type",description:`<strong>lr_scheduler_type</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.SchedulerType">SchedulerType</a>, <em>optional</em>, defaults to <code>&quot;linear&quot;</code>) &#x2014; The scheduler type to use. See the documentation of <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.SchedulerType">SchedulerType</a> for all possible values.`,name:"lr_scheduler_type"},{anchor:"transformers.TrainingArguments.warmup_ratio",description:`<strong>warmup_ratio</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; Ratio of total training steps used for a linear warmup from 0 to <code>learning_rate</code>.`,name:"warmup_ratio"},{anchor:"transformers.TrainingArguments.warmup_steps",description:`<strong>warmup_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Number of steps used for a linear warmup from 0 to <code>learning_rate</code>. Overrides any effect of <code>warmup_ratio</code>.`,name:"warmup_steps"},{anchor:"transformers.TrainingArguments.log_level",description:`<strong>log_level</strong> (<code>str</code>, <em>optional</em>, defaults to <code>passive</code>) &#x2014; Logger log level to use on the main process. Possible choices are the log levels as strings: &#x2018;debug&#x2019;, &#x2018;info&#x2019;, &#x2018;warning&#x2019;, &#x2018;error&#x2019; and &#x2018;critical&#x2019;, plus a &#x2018;passive&#x2019; level which doesn&#x2019;t set anything and lets the application set the level.`,name:"log_level"},{anchor:"transformers.TrainingArguments.log_level_replica",description:`<strong>log_level_replica</strong> (<code>str</code>, <em>optional</em>, defaults to <code>passive</code>) &#x2014; Logger log level to use on replicas. Same choices as <code>log_level</code>&#x201D;`,name:"log_level_replica"},{anchor:"transformers.TrainingArguments.log_on_each_node",description:`<strong>log_on_each_node</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; In multinode distributed training, whether to log using <code>log_level</code> once per node, or only on the main node.`,name:"log_on_each_node"},{anchor:"transformers.TrainingArguments.logging_dir",description:`<strong>logging_dir</strong> (<code>str</code>, <em>optional</em>) &#x2014; <a href="https://www.tensorflow.org/tensorboard" rel="nofollow">TensorBoard</a> log directory. Will default to *output_dir/runs/<strong>CURRENT_DATETIME_HOSTNAME*</strong>.`,name:"logging_dir"},{anchor:"transformers.TrainingArguments.logging_strategy",description:`<strong>logging_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/trainer_utils#transformers.IntervalStrategy">IntervalStrategy</a>, <em>optional</em>, defaults to <code>&quot;steps&quot;</code>) &#x2014; The logging strategy to adopt during training. Possible values are:</p> <ul> <li><code>&quot;no&quot;</code>: No logging is done during training.</li> <li><code>&quot;epoch&quot;</code>: Logging is done at the end of each epoch.</li> <li><code>&quot;steps&quot;</code>: Logging is done every <code>logging_steps</code>.</li> </ul>`,name:"logging_strategy"},{anchor:"transformers.TrainingArguments.logging_first_step",description:`<strong>logging_first_step</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to log and evaluate the first <code>global_step</code> or not.`,name:"logging_first_step"},{anchor:"transformers.TrainingArguments.logging_steps",description:`<strong>logging_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 500) &#x2014; Number of update steps between two logs if <code>logging_strategy=&quot;steps&quot;</code>.`,name:"logging_steps"},{anchor:"transformers.TrainingArguments.logging_nan_inf_filter",description:`<strong>logging_nan_inf_filter</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to filter <code>nan</code> and <code>inf</code> losses for logging. If set to <code>True</code> the loss of every step that is <code>nan</code> or <code>inf</code> is filtered and the average loss of the current logging window is taken instead.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p><code>logging_nan_inf_filter</code> only influences the logging of loss values, it does not change the behavior the gradient is computed or applied to the model.</p> </div>`,name:"logging_nan_inf_filter"},{anchor:"transformers.TrainingArguments.save_strategy",description:`<strong>save_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/trainer_utils#transformers.IntervalStrategy">IntervalStrategy</a>, <em>optional</em>, defaults to <code>&quot;steps&quot;</code>) &#x2014; The checkpoint save strategy to adopt during training. Possible values are:</p> <ul> <li><code>&quot;no&quot;</code>: No save is done during training.</li> <li><code>&quot;epoch&quot;</code>: Save is done at the end of each epoch.</li> <li><code>&quot;steps&quot;</code>: Save is done every <code>save_steps</code>.</li> </ul>`,name:"save_strategy"},{anchor:"transformers.TrainingArguments.save_steps",description:`<strong>save_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 500) &#x2014; Number of updates steps before two checkpoint saves if <code>save_strategy=&quot;steps&quot;</code>.`,name:"save_steps"},{anchor:"transformers.TrainingArguments.save_total_limit",description:`<strong>save_total_limit</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in <code>output_dir</code>.`,name:"save_total_limit"},{anchor:"transformers.TrainingArguments.save_on_each_node",description:`<strong>save_on_each_node</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; When doing multi-node distributed training, whether to save models and checkpoints on each node, or only on the main one.</p> <p>This should not be activated when the different nodes use the same storage as the files will be saved with the same names for each node.`,name:"save_on_each_node"},{anchor:"transformers.TrainingArguments.no_cuda",description:`<strong>no_cuda</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to not use CUDA even when it is available or not.`,name:"no_cuda"},{anchor:"transformers.TrainingArguments.seed",description:`<strong>seed</strong> (<code>int</code>, <em>optional</em>, defaults to 42) &#x2014; Random seed that will be set at the beginning of training. To ensure reproducibility across runs, use the <code>~Trainer.model_init</code> function to instantiate the model if it has some randomly initialized parameters.`,name:"seed"},{anchor:"transformers.TrainingArguments.data_seed",description:`<strong>data_seed</strong> (<code>int</code>, <em>optional</em>) &#x2014; Random seed to be used with data samplers. If not set, random generators for data sampling will use the same seed as <code>seed</code>. This can be used to ensure reproducibility of data sampling, independent of the model seed.`,name:"data_seed"},{anchor:"transformers.TrainingArguments.jit_mode_eval",description:`<strong>jit_mode_eval</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use PyTorch jit trace for inference.`,name:"jit_mode_eval"},{anchor:"transformers.TrainingArguments.use_ipex",description:`<strong>use_ipex</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Use Intel extension for PyTorch when it is available. <a href="https://github.com/intel/intel-extension-for-pytorch" rel="nofollow">IPEX installation</a>.`,name:"use_ipex"},{anchor:"transformers.TrainingArguments.bf16",description:`<strong>bf16</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use bf16 16-bit (mixed) precision training instead of 32-bit training. Requires Ampere or higher NVIDIA architecture or using CPU (no_cuda). This is an experimental API and it may change.`,name:"bf16"},{anchor:"transformers.TrainingArguments.fp16",description:`<strong>fp16</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use fp16 16-bit (mixed) precision training instead of 32-bit training.`,name:"fp16"},{anchor:"transformers.TrainingArguments.fp16_opt_level",description:`<strong>fp16_opt_level</strong> (<code>str</code>, <em>optional</em>, defaults to &#x2018;O1&#x2019;) &#x2014; For <code>fp16</code> training, Apex AMP optimization level selected in [&#x2018;O0&#x2019;, &#x2018;O1&#x2019;, &#x2018;O2&#x2019;, and &#x2018;O3&#x2019;]. See details on the <a href="https://nvidia.github.io/apex/amp" rel="nofollow">Apex documentation</a>.`,name:"fp16_opt_level"},{anchor:"transformers.TrainingArguments.fp16_backend",description:`<strong>fp16_backend</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;auto&quot;</code>) &#x2014; This argument is deprecated. Use <code>half_precision_backend</code> instead.`,name:"fp16_backend"},{anchor:"transformers.TrainingArguments.half_precision_backend",description:`<strong>half_precision_backend</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;auto&quot;</code>) &#x2014; The backend to use for mixed precision training. Must be one of <code>&quot;auto&quot;, &quot;cuda_amp&quot;, &quot;apex&quot;, &quot;cpu_amp&quot;</code>. <code>&quot;auto&quot;</code> will use CPU/CUDA AMP or APEX depending on the PyTorch version detected, while the other choices will force the requested backend.`,name:"half_precision_backend"},{anchor:"transformers.TrainingArguments.bf16_full_eval",description:`<strong>bf16_full_eval</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use full bfloat16 evaluation instead of 32-bit. This will be faster and save memory but can harm metric values. This is an experimental API and it may change.`,name:"bf16_full_eval"},{anchor:"transformers.TrainingArguments.fp16_full_eval",description:`<strong>fp16_full_eval</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use full float16 evaluation instead of 32-bit. This will be faster and save memory but can harm metric values.`,name:"fp16_full_eval"},{anchor:"transformers.TrainingArguments.tf32",description:`<strong>tf32</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to enable the TF32 mode, available in Ampere and newer GPU architectures. The default value depends on PyTorch&#x2019;s version default of <code>torch.backends.cuda.matmul.allow_tf32</code>. For more details please refer to the <a href="https://huggingface.co/docs/transformers/performance#tf32" rel="nofollow">TF32</a> documentation. This is an experimental API and it may change.`,name:"tf32"},{anchor:"transformers.TrainingArguments.local_rank",description:`<strong>local_rank</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Rank of the process during distributed training.`,name:"local_rank"},{anchor:"transformers.TrainingArguments.xpu_backend",description:`<strong>xpu_backend</strong> (<code>str</code>, <em>optional</em>) &#x2014; The backend to use for xpu distributed training. Must be one of <code>&quot;mpi&quot;</code> or <code>&quot;ccl&quot;</code>.`,name:"xpu_backend"},{anchor:"transformers.TrainingArguments.tpu_num_cores",description:`<strong>tpu_num_cores</strong> (<code>int</code>, <em>optional</em>) &#x2014; When training on TPU, the number of TPU cores (automatically passed by launcher script).`,name:"tpu_num_cores"},{anchor:"transformers.TrainingArguments.dataloader_drop_last",description:`<strong>dataloader_drop_last</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size) or not.`,name:"dataloader_drop_last"},{anchor:"transformers.TrainingArguments.eval_steps",description:`<strong>eval_steps</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of update steps between two evaluations if <code>evaluation_strategy=&quot;steps&quot;</code>. Will default to the same value as <code>logging_steps</code> if not set.`,name:"eval_steps"},{anchor:"transformers.TrainingArguments.dataloader_num_workers",description:`<strong>dataloader_num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the main process.`,name:"dataloader_num_workers"},{anchor:"transformers.TrainingArguments.past_index",description:`<strong>past_index</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Some models like <a href="../model_doc/transformerxl">TransformerXL</a> or <a href="../model_doc/xlnet">XLNet</a> can make use of the past hidden states for their predictions. If this argument is set to a positive int, the <code>Trainer</code> will use the corresponding output (usually index 2) as the past state and feed it to the model at the next training step under the keyword argument <code>mems</code>.`,name:"past_index"},{anchor:"transformers.TrainingArguments.run_name",description:`<strong>run_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; A descriptor for the run. Typically used for <a href="https://www.wandb.com/" rel="nofollow">wandb</a> and <a href="https://www.mlflow.org/" rel="nofollow">mlflow</a> logging.`,name:"run_name"},{anchor:"transformers.TrainingArguments.disable_tqdm",description:`<strong>disable_tqdm</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to disable the tqdm progress bars and table of metrics produced by <code>~notebook.NotebookTrainingTracker</code> in Jupyter Notebooks. Will default to <code>True</code> if the logging level is set to warn or lower (default), <code>False</code> otherwise.`,name:"disable_tqdm"},{anchor:"transformers.TrainingArguments.remove_unused_columns",description:`<strong>remove_unused_columns</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to automatically remove the columns unused by the model forward method.</p> <p>(Note that this behavior is not implemented for <code>TFTrainer</code> yet.)`,name:"remove_unused_columns"},{anchor:"transformers.TrainingArguments.label_names",description:`<strong>label_names</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; The list of keys in your dictionary of inputs that correspond to the labels.</p> <p>Will eventually default to <code>[&quot;labels&quot;]</code> except if the model used is one of the <code>XxxForQuestionAnswering</code> in which case it will default to <code>[&quot;start_positions&quot;, &quot;end_positions&quot;]</code>.`,name:"label_names"},{anchor:"transformers.TrainingArguments.load_best_model_at_end",description:`<strong>load_best_model_at_end</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to load the best model found during training at the end of training.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When set to <code>True</code>, the parameters <code>save_strategy</code> needs to be the same as <code>evaluation_strategy</code>, and in the case it is &#x201C;steps&#x201D;, <code>save_steps</code> must be a round multiple of <code>eval_steps</code>.</p> </div>`,name:"load_best_model_at_end"},{anchor:"transformers.TrainingArguments.metric_for_best_model",description:`<strong>metric_for_best_model</strong> (<code>str</code>, <em>optional</em>) &#x2014; Use in conjunction with <code>load_best_model_at_end</code> to specify the metric to use to compare two different models. Must be the name of a metric returned by the evaluation with or without the prefix <code>&quot;eval_&quot;</code>. Will default to <code>&quot;loss&quot;</code> if unspecified and <code>load_best_model_at_end=True</code> (to use the evaluation loss).</p> <p>If you set this value, <code>greater_is_better</code> will default to <code>True</code>. Don&#x2019;t forget to set it to <code>False</code> if your metric is better when lower.`,name:"metric_for_best_model"},{anchor:"transformers.TrainingArguments.greater_is_better",description:`<strong>greater_is_better</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Use in conjunction with <code>load_best_model_at_end</code> and <code>metric_for_best_model</code> to specify if better models should have a greater metric or not. Will default to:</p> <ul> <li><code>True</code> if <code>metric_for_best_model</code> is set to a value that isn&#x2019;t <code>&quot;loss&quot;</code> or <code>&quot;eval_loss&quot;</code>.</li> <li><code>False</code> if <code>metric_for_best_model</code> is not set, or set to <code>&quot;loss&quot;</code> or <code>&quot;eval_loss&quot;</code>.</li> </ul>`,name:"greater_is_better"},{anchor:"transformers.TrainingArguments.ignore_data_skip",description:`<strong>ignore_data_skip</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; When resuming training, whether or not to skip the epochs and batches to get the data loading at the same stage as in the previous training. If set to <code>True</code>, the training will begin faster (as that skipping step can take a long time) but will not yield the same results as the interrupted training would have.`,name:"ignore_data_skip"},{anchor:"transformers.TrainingArguments.sharded_ddp",description:`<strong>sharded_ddp</strong> (<code>bool</code>, <code>str</code> or list of <code>ShardedDDPOption</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Use Sharded DDP training from <a href="https://github.com/facebookresearch/fairscale" rel="nofollow">FairScale</a> (in distributed training only). This is an experimental feature.</p> <p>A list of options along the following:</p> <ul> <li><code>&quot;simple&quot;</code>: to use first instance of sharded DDP released by fairscale (<code>ShardedDDP</code>) similar to ZeRO-2.</li> <li><code>&quot;zero_dp_2&quot;</code>: to use the second instance of sharded DPP released by fairscale (<code>FullyShardedDDP</code>) in Zero-2 mode (with <code>reshard_after_forward=False</code>).</li> <li><code>&quot;zero_dp_3&quot;</code>: to use the second instance of sharded DPP released by fairscale (<code>FullyShardedDDP</code>) in Zero-3 mode (with <code>reshard_after_forward=True</code>).</li> <li><code>&quot;offload&quot;</code>: to add ZeRO-offload (only compatible with <code>&quot;zero_dp_2&quot;</code> and <code>&quot;zero_dp_3&quot;</code>).</li> </ul> <p>If a string is passed, it will be split on space. If a bool is passed, it will be converted to an empty list for <code>False</code> and <code>[&quot;simple&quot;]</code> for <code>True</code>.`,name:"sharded_ddp"},{anchor:"transformers.TrainingArguments.fsdp",description:`<strong>fsdp</strong> (<code>bool</code>, <code>str</code> or list of <code>FSDPOption</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Use PyTorch Distributed Parallel Training (in distributed training only).</p> <p>A list of options along the following:</p> <ul> <li><code>&quot;full_shard&quot;</code>: Shard parameters, gradients and optimizer states.</li> <li><code>&quot;shard_grad_op&quot;</code>: Shard optimizer states and gradients.</li> <li><code>&quot;offload&quot;</code>: Offload parameters and gradients to CPUs (only compatible with <code>&quot;full_shard&quot;</code> and <code>&quot;shard_grad_op&quot;</code>).</li> <li><code>&quot;auto_wrap&quot;</code>: Automatically recursively wrap layers with FSDP using <code>default_auto_wrap_policy</code>.</li> </ul>`,name:"fsdp"},{anchor:"transformers.TrainingArguments.fsdp_min_num_params",description:`<strong>fsdp_min_num_params</strong> (<code>int</code>, <em>optional</em>, defaults to <code>0</code>) &#x2014; FSDP&#x2019;s minimum number of parameters for Default Auto Wrapping. (useful only when <code>fsdp</code> field is passed).`,name:"fsdp_min_num_params"},{anchor:"transformers.TrainingArguments.deepspeed",description:`<strong>deepspeed</strong> (<code>str</code> or <code>dict</code>, <em>optional</em>) &#x2014; Use <a href="https://github.com/microsoft/deepspeed" rel="nofollow">Deepspeed</a>. This is an experimental feature and its API may evolve in the future. The value is either the location of DeepSpeed json config file (e.g., <code>ds_config.json</code>) or an already loaded json file as a <code>dict</code>&#x201D;`,name:"deepspeed"},{anchor:"transformers.TrainingArguments.label_smoothing_factor",description:`<strong>label_smoothing_factor</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The label smoothing factor to use. Zero means no label smoothing, otherwise the underlying onehot-encoded labels are changed from 0s and 1s to <code>label_smoothing_factor/num_labels</code> and <code>1 - label_smoothing_factor + label_smoothing_factor/num_labels</code> respectively.`,name:"label_smoothing_factor"},{anchor:"transformers.TrainingArguments.debug",description:`<strong>debug</strong> (<code>str</code> or list of <code>DebugOption</code>, <em>optional</em>, defaults to <code>&quot;&quot;</code>) &#x2014; Enable one or more debug features. This is an experimental feature.</p> <p>Possible options are:</p> <ul> <li><code>&quot;underflow_overflow&quot;</code>: detects overflow in model&#x2019;s input/outputs and reports the last frames that led to the event</li> <li><code>&quot;tpu_metrics_debug&quot;</code>: print debug metrics on TPU</li> </ul> <p>The options should be separated by whitespaces.`,name:"debug"},{anchor:"transformers.TrainingArguments.optim",description:`<strong>optim</strong> (<code>str</code> or <code>training_args.OptimizerNames</code>, <em>optional</em>, defaults to <code>&quot;adamw_hf&quot;</code>) &#x2014; The optimizer to use: adamw_hf, adamw_torch, adamw_apex_fused, or adafactor.`,name:"optim"},{anchor:"transformers.TrainingArguments.adafactor",description:`<strong>adafactor</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; This argument is deprecated. Use <code>--optim adafactor</code> instead.`,name:"adafactor"},{anchor:"transformers.TrainingArguments.group_by_length",description:`<strong>group_by_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to group together samples of roughly the same length in the training dataset (to minimize padding applied and be more efficient). Only useful if applying dynamic padding.`,name:"group_by_length"},{anchor:"transformers.TrainingArguments.length_column_name",description:`<strong>length_column_name</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;length&quot;</code>) &#x2014; Column name for precomputed lengths. If the column exists, grouping by length will use these values rather than computing them on train startup. Ignored unless <code>group_by_length</code> is <code>True</code> and the dataset is an instance of <code>Dataset</code>.`,name:"length_column_name"},{anchor:"transformers.TrainingArguments.report_to",description:`<strong>report_to</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>, defaults to <code>&quot;all&quot;</code>) &#x2014; The list of integrations to report the results and logs to. Supported platforms are <code>&quot;azure_ml&quot;</code>, <code>&quot;comet_ml&quot;</code>, <code>&quot;mlflow&quot;</code>, <code>&quot;neptune&quot;</code>, <code>&quot;tensorboard&quot;</code> and <code>&quot;wandb&quot;</code>. Use <code>&quot;all&quot;</code> to report to all integrations installed, <code>&quot;none&quot;</code> for no integrations.`,name:"report_to"},{anchor:"transformers.TrainingArguments.ddp_find_unused_parameters",description:`<strong>ddp_find_unused_parameters</strong> (<code>bool</code>, <em>optional</em>) &#x2014; When using distributed training, the value of the flag <code>find_unused_parameters</code> passed to <code>DistributedDataParallel</code>. Will default to <code>False</code> if gradient checkpointing is used, <code>True</code> otherwise.`,name:"ddp_find_unused_parameters"},{anchor:"transformers.TrainingArguments.ddp_bucket_cap_mb",description:`<strong>ddp_bucket_cap_mb</strong> (<code>int</code>, <em>optional</em>) &#x2014; When using distributed training, the value of the flag <code>bucket_cap_mb</code> passed to <code>DistributedDataParallel</code>.`,name:"ddp_bucket_cap_mb"},{anchor:"transformers.TrainingArguments.dataloader_pin_memory",description:`<strong>dataloader_pin_memory</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether you want to pin memory in data loaders or not. Will default to <code>True</code>.`,name:"dataloader_pin_memory"},{anchor:"transformers.TrainingArguments.skip_memory_metrics",description:`<strong>skip_memory_metrics</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to skip adding of memory profiler reports to metrics. This is skipped by default because it slows down the training and evaluation speed.`,name:"skip_memory_metrics"},{anchor:"transformers.TrainingArguments.push_to_hub",description:`<strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push the model to the Hub every time the model is saved. If this is activated, <code>output_dir</code> will begin a git directory synced with the repo (determined by <code>hub_model_id</code>) and the content will be pushed each time a save is triggered (depending on your <code>save_strategy</code>). Calling <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.save_model">save_model()</a> will also trigger a push.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>If <code>output_dir</code> exists, it needs to be a local clone of the repository to which the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> will be pushed.</p> </div>`,name:"push_to_hub"},{anchor:"transformers.TrainingArguments.resume_from_checkpoint",description:`<strong>resume_from_checkpoint</strong> (<code>str</code>, <em>optional</em>) &#x2014; The path to a folder with a valid checkpoint for your model. This argument is not directly used by <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/main/examples" rel="nofollow">example scripts</a> for more details.`,name:"resume_from_checkpoint"},{anchor:"transformers.TrainingArguments.hub_model_id",description:`<strong>hub_model_id</strong> (<code>str</code>, <em>optional</em>) &#x2014; The name of the repository to keep in sync with the local <em>output_dir</em>. It can be a simple model ID in which case the model will be pushed in your namespace. Otherwise it should be the whole repository name, for instance <code>&quot;user_name/model&quot;</code>, which allows you to push to an organization you are a member of with <code>&quot;organization_name/model&quot;</code>. Will default to <code>user_name/output_dir_name</code> with <em>output_dir_name</em> being the name of <code>output_dir</code>.</p> <p>Will default to the name of <code>output_dir</code>.`,name:"hub_model_id"},{anchor:"transformers.TrainingArguments.hub_strategy",description:`<strong>hub_strategy</strong> (<code>str</code> or <code>HubStrategy</code>, <em>optional</em>, defaults to <code>&quot;every_save&quot;</code>) &#x2014; Defines the scope of what is pushed to the Hub and when. Possible values are:</p> <ul> <li><code>&quot;end&quot;</code>: push the model, its configuration, the tokenizer (if passed along to the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>) and a draft of a model card when the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.save_model">save_model()</a> method is called.</li> <li><code>&quot;every_save&quot;</code>: push the model, its configuration, the tokenizer (if passed along to the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>) and a draft of a model card each time there is a model save. The pushes are asynchronous to not block training, and in case the save are very frequent, a new push is only attempted if the previous one is finished. A last push is made with the final model at the end of training.</li> <li><code>&quot;checkpoint&quot;</code>: like <code>&quot;every_save&quot;</code> but the latest checkpoint is also pushed in a subfolder named last-checkpoint, allowing you to resume training easily with <code>trainer.train(resume_from_checkpoint=&quot;last-checkpoint&quot;)</code>.</li> <li><code>&quot;all_checkpoints&quot;</code>: like <code>&quot;checkpoint&quot;</code> but all checkpoints are pushed like they appear in the output folder (so you will get one checkpoint folder per folder in your final repository)</li> </ul>`,name:"hub_strategy"},{anchor:"transformers.TrainingArguments.hub_token",description:`<strong>hub_token</strong> (<code>str</code>, <em>optional</em>) &#x2014; The token to use to push the model to the Hub. Will default to the token in the cache folder obtained with <code>huggingface-cli login</code>.`,name:"hub_token"},{anchor:"transformers.TrainingArguments.hub_private_repo",description:`<strong>hub_private_repo</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If True, the Hub repo will be set to private.`,name:"hub_private_repo"},{anchor:"transformers.TrainingArguments.gradient_checkpointing",description:`<strong>gradient_checkpointing</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If True, use gradient checkpointing to save memory at the expense of slower backward pass.`,name:"gradient_checkpointing"},{anchor:"transformers.TrainingArguments.include_inputs_for_metrics",description:`<strong>include_inputs_for_metrics</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the inputs will be passed to the <code>compute_metrics</code> function. This is intended for metrics that need inputs, predictions and references for scoring calculation in Metric class.`,name:"include_inputs_for_metrics"},{anchor:"transformers.TrainingArguments.auto_find_batch_size",description:`<strong>auto_find_batch_size</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to find a batch size that will fit into memory automatically through exponential decay, avoiding CUDA Out-of-Memory errors. Requires accelerate to be installed (<code>pip install accelerate</code>)`,name:"auto_find_batch_size"},{anchor:"transformers.TrainingArguments.full_determinism",description:`<strong>full_determinism</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If <code>True</code>, <a href="/docs/transformers/pr_19429/en/internal/trainer_utils#transformers.enable_full_determinism">enable_full_determinism()</a> is called instead of <a href="/docs/transformers/pr_19429/en/internal/trainer_utils#transformers.set_seed">set_seed()</a> to ensure reproducible results in distributed training`,name:"full_determinism"},{anchor:"transformers.TrainingArguments.torchdynamo",description:`<strong>torchdynamo</strong> (<code>str</code>, <em>optional</em>) &#x2014; The token that is used to set the backend compiler for TorchDynamo. Possible choices are [&#x201C;eager&#x201D;, &#x201C;nvfuser]. This is an experimental API and subject to change.`,name:"torchdynamo"},{anchor:"transformers.TrainingArguments.ray_scope",description:`<strong>ray_scope</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;last&quot;</code>) &#x2014; The scope to use when doing hyperparameter search with Ray. By default, <code>&quot;last&quot;</code> will be used. Ray will then use the last checkpoint of all trials, compare those, and select the best one. However, other options are also available. See the <a href="https://docs.ray.io/en/latest/tune/api_docs/analysis.html#ray.tune.ExperimentAnalysis.get_best_trial" rel="nofollow">Ray documentation</a> for more options.`,name:"ray_scope"},{anchor:"transformers.TrainingArguments.ddp_timeout",description:`<strong>ddp_timeout</strong> (<code>int</code>, <em>optional</em>, defaults to 1800) &#x2014; The timeout for <code>torch.distributed.init_process_group</code> calls, used to avoid GPU socket timeouts when performing slow operations in distributed runnings. Please refer the [PyTorch documentation] (<a href="https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group" rel="nofollow">https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group</a>) for more information.`,name:"ddp_timeout"},{anchor:"transformers.TrainingArguments.use_mps_device",description:`<strong>use_mps_device</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use Apple Silicon chip based <code>mps</code> device.`,name:"use_mps_device"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/training_args.py#L121"}}),ns=new x({props:{name:"get_process_log_level",anchor:"transformers.TrainingArguments.get_process_log_level",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/training_args.py#L1590"}}),is=new x({props:{name:"get_warmup_steps",anchor:"transformers.TrainingArguments.get_warmup_steps",parameters:[{name:"num_training_steps",val:": int"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/training_args.py#L1680"}}),ls=new x({props:{name:"main_process_first",anchor:"transformers.TrainingArguments.main_process_first",parameters:[{name:"local",val:" = True"},{name:"desc",val:" = 'work'"}],parametersDescription:[{anchor:"transformers.TrainingArguments.main_process_first.local",description:`<strong>local</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; if <code>True</code> first means process of rank 0 of each node if <code>False</code> first means process of rank 0 of node rank 0 In multi-node environment with a shared filesystem you most likely will want to use <code>local=False</code> so that only the main process of the first node will do the processing. If however, the filesystem is not shared, then the main process of each node will need to do the processing, which is the default behavior.`,name:"local"},{anchor:"transformers.TrainingArguments.main_process_first.desc",description:`<strong>desc</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;work&quot;</code>) &#x2014; a work description to be used in debug logs`,name:"desc"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/training_args.py#L1625"}}),ds=new x({props:{name:"to_dict",anchor:"transformers.TrainingArguments.to_dict",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/training_args.py#L1689"}}),ps=new x({props:{name:"to_json_string",anchor:"transformers.TrainingArguments.to_json_string",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/training_args.py#L1706"}}),ms=new x({props:{name:"to_sanitized_dict",anchor:"transformers.TrainingArguments.to_sanitized_dict",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/training_args.py#L1712"}}),hs=new Y({}),us=new x({props:{name:"class transformers.Seq2SeqTrainingArguments",anchor:"transformers.Seq2SeqTrainingArguments",parameters:[{name:"output_dir",val:": str"},{name:"overwrite_output_dir",val:": bool = False"},{name:"do_train",val:": bool = False"},{name:"do_eval",val:": bool = False"},{name:"do_predict",val:": bool = False"},{name:"evaluation_strategy",val:": typing.Union[transformers.trainer_utils.IntervalStrategy, str] = 'no'"},{name:"prediction_loss_only",val:": bool = False"},{name:"per_device_train_batch_size",val:": int = 8"},{name:"per_device_eval_batch_size",val:": int = 8"},{name:"per_gpu_train_batch_size",val:": typing.Optional[int] = None"},{name:"per_gpu_eval_batch_size",val:": typing.Optional[int] = None"},{name:"gradient_accumulation_steps",val:": int = 1"},{name:"eval_accumulation_steps",val:": typing.Optional[int] = None"},{name:"eval_delay",val:": typing.Optional[float] = 0"},{name:"learning_rate",val:": float = 5e-05"},{name:"weight_decay",val:": float = 0.0"},{name:"adam_beta1",val:": float = 0.9"},{name:"adam_beta2",val:": float = 0.999"},{name:"adam_epsilon",val:": float = 1e-08"},{name:"max_grad_norm",val:": float = 1.0"},{name:"num_train_epochs",val:": float = 3.0"},{name:"max_steps",val:": int = -1"},{name:"lr_scheduler_type",val:": typing.Union[transformers.trainer_utils.SchedulerType, str] = 'linear'"},{name:"warmup_ratio",val:": float = 0.0"},{name:"warmup_steps",val:": int = 0"},{name:"log_level",val:": typing.Optional[str] = 'passive'"},{name:"log_level_replica",val:": typing.Optional[str] = 'passive'"},{name:"log_on_each_node",val:": bool = True"},{name:"logging_dir",val:": typing.Optional[str] = None"},{name:"logging_strategy",val:": typing.Union[transformers.trainer_utils.IntervalStrategy, str] = 'steps'"},{name:"logging_first_step",val:": bool = False"},{name:"logging_steps",val:": int = 500"},{name:"logging_nan_inf_filter",val:": bool = True"},{name:"save_strategy",val:": typing.Union[transformers.trainer_utils.IntervalStrategy, str] = 'steps'"},{name:"save_steps",val:": int = 500"},{name:"save_total_limit",val:": typing.Optional[int] = None"},{name:"save_on_each_node",val:": bool = False"},{name:"no_cuda",val:": bool = False"},{name:"use_mps_device",val:": bool = False"},{name:"seed",val:": int = 42"},{name:"data_seed",val:": typing.Optional[int] = None"},{name:"jit_mode_eval",val:": bool = False"},{name:"use_ipex",val:": bool = False"},{name:"bf16",val:": bool = False"},{name:"fp16",val:": bool = False"},{name:"fp16_opt_level",val:": str = 'O1'"},{name:"half_precision_backend",val:": str = 'auto'"},{name:"bf16_full_eval",val:": bool = False"},{name:"fp16_full_eval",val:": bool = False"},{name:"tf32",val:": typing.Optional[bool] = None"},{name:"local_rank",val:": int = -1"},{name:"xpu_backend",val:": typing.Optional[str] = None"},{name:"tpu_num_cores",val:": typing.Optional[int] = None"},{name:"tpu_metrics_debug",val:": bool = False"},{name:"debug",val:": str = ''"},{name:"dataloader_drop_last",val:": bool = False"},{name:"eval_steps",val:": typing.Optional[int] = None"},{name:"dataloader_num_workers",val:": int = 0"},{name:"past_index",val:": int = -1"},{name:"run_name",val:": typing.Optional[str] = None"},{name:"disable_tqdm",val:": typing.Optional[bool] = None"},{name:"remove_unused_columns",val:": typing.Optional[bool] = True"},{name:"label_names",val:": typing.Optional[typing.List[str]] = None"},{name:"load_best_model_at_end",val:": typing.Optional[bool] = False"},{name:"metric_for_best_model",val:": typing.Optional[str] = None"},{name:"greater_is_better",val:": typing.Optional[bool] = None"},{name:"ignore_data_skip",val:": bool = False"},{name:"sharded_ddp",val:": str = ''"},{name:"fsdp",val:": str = ''"},{name:"fsdp_min_num_params",val:": int = 0"},{name:"fsdp_transformer_layer_cls_to_wrap",val:": typing.Optional[str] = None"},{name:"deepspeed",val:": typing.Optional[str] = None"},{name:"label_smoothing_factor",val:": float = 0.0"},{name:"optim",val:": typing.Union[transformers.training_args.OptimizerNames, str] = 'adamw_hf'"},{name:"adafactor",val:": bool = False"},{name:"group_by_length",val:": bool = False"},{name:"length_column_name",val:": typing.Optional[str] = 'length'"},{name:"report_to",val:": typing.Optional[typing.List[str]] = None"},{name:"ddp_find_unused_parameters",val:": typing.Optional[bool] = None"},{name:"ddp_bucket_cap_mb",val:": typing.Optional[int] = None"},{name:"dataloader_pin_memory",val:": bool = True"},{name:"skip_memory_metrics",val:": bool = True"},{name:"use_legacy_prediction_loop",val:": bool = False"},{name:"push_to_hub",val:": bool = False"},{name:"resume_from_checkpoint",val:": typing.Optional[str] = None"},{name:"hub_model_id",val:": typing.Optional[str] = None"},{name:"hub_strategy",val:": typing.Union[transformers.trainer_utils.HubStrategy, str] = 'every_save'"},{name:"hub_token",val:": typing.Optional[str] = None"},{name:"hub_private_repo",val:": bool = False"},{name:"gradient_checkpointing",val:": bool = False"},{name:"include_inputs_for_metrics",val:": bool = False"},{name:"fp16_backend",val:": str = 'auto'"},{name:"push_to_hub_model_id",val:": typing.Optional[str] = None"},{name:"push_to_hub_organization",val:": typing.Optional[str] = None"},{name:"push_to_hub_token",val:": typing.Optional[str] = None"},{name:"mp_parameters",val:": str = ''"},{name:"auto_find_batch_size",val:": bool = False"},{name:"full_determinism",val:": bool = False"},{name:"torchdynamo",val:": typing.Optional[str] = None"},{name:"ray_scope",val:": typing.Optional[str] = 'last'"},{name:"ddp_timeout",val:": typing.Optional[int] = 1800"},{name:"sortish_sampler",val:": bool = False"},{name:"predict_with_generate",val:": bool = False"},{name:"generation_max_length",val:": typing.Optional[int] = None"},{name:"generation_num_beams",val:": typing.Optional[int] = None"}],parametersDescription:[{anchor:"transformers.Seq2SeqTrainingArguments.output_dir",description:`<strong>output_dir</strong> (<code>str</code>) &#x2014; The output directory where the model predictions and checkpoints will be written.`,name:"output_dir"},{anchor:"transformers.Seq2SeqTrainingArguments.overwrite_output_dir",description:`<strong>overwrite_output_dir</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If <code>True</code>, overwrite the content of the output directory. Use this to continue training if <code>output_dir</code> points to a checkpoint directory.`,name:"overwrite_output_dir"},{anchor:"transformers.Seq2SeqTrainingArguments.do_train",description:`<strong>do_train</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to run training or not. This argument is not directly used by <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/main/examples" rel="nofollow">example scripts</a> for more details.`,name:"do_train"},{anchor:"transformers.Seq2SeqTrainingArguments.do_eval",description:`<strong>do_eval</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to run evaluation on the validation set or not. Will be set to <code>True</code> if <code>evaluation_strategy</code> is different from <code>&quot;no&quot;</code>. This argument is not directly used by <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/main/examples" rel="nofollow">example scripts</a> for more details.`,name:"do_eval"},{anchor:"transformers.Seq2SeqTrainingArguments.do_predict",description:`<strong>do_predict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to run predictions on the test set or not. This argument is not directly used by <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/main/examples" rel="nofollow">example scripts</a> for more details.`,name:"do_predict"},{anchor:"transformers.Seq2SeqTrainingArguments.evaluation_strategy",description:`<strong>evaluation_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/trainer_utils#transformers.IntervalStrategy">IntervalStrategy</a>, <em>optional</em>, defaults to <code>&quot;no&quot;</code>) &#x2014; The evaluation strategy to adopt during training. Possible values are:</p> <ul> <li><code>&quot;no&quot;</code>: No evaluation is done during training.</li> <li><code>&quot;steps&quot;</code>: Evaluation is done (and logged) every <code>eval_steps</code>.</li> <li><code>&quot;epoch&quot;</code>: Evaluation is done at the end of each epoch.</li> </ul>`,name:"evaluation_strategy"},{anchor:"transformers.Seq2SeqTrainingArguments.prediction_loss_only",description:`<strong>prediction_loss_only</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; When performing evaluation and generating predictions, only returns the loss.`,name:"prediction_loss_only"},{anchor:"transformers.Seq2SeqTrainingArguments.per_device_train_batch_size",description:`<strong>per_device_train_batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; The batch size per GPU/TPU core/CPU for training.`,name:"per_device_train_batch_size"},{anchor:"transformers.Seq2SeqTrainingArguments.per_device_eval_batch_size",description:`<strong>per_device_eval_batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; The batch size per GPU/TPU core/CPU for evaluation.`,name:"per_device_eval_batch_size"},{anchor:"transformers.Seq2SeqTrainingArguments.gradient_accumulation_steps",description:`<strong>gradient_accumulation_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of updates steps to accumulate the gradients for, before performing a backward/update pass.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>When using gradient accumulation, one step is counted as one step with backward pass. Therefore, logging, evaluation, save will be conducted every <code>gradient_accumulation_steps * xxx_step</code> training examples.</p> </div>`,name:"gradient_accumulation_steps"},{anchor:"transformers.Seq2SeqTrainingArguments.eval_accumulation_steps",description:`<strong>eval_accumulation_steps</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of predictions steps to accumulate the output tensors for, before moving the results to the CPU. If left unset, the whole predictions are accumulated on GPU/TPU before being moved to the CPU (faster but requires more memory).`,name:"eval_accumulation_steps"},{anchor:"transformers.Seq2SeqTrainingArguments.eval_delay",description:`<strong>eval_delay</strong> (<code>float</code>, <em>optional</em>) &#x2014; Number of epochs or steps to wait for before the first evaluation can be performed, depending on the evaluation_strategy.`,name:"eval_delay"},{anchor:"transformers.Seq2SeqTrainingArguments.learning_rate",description:`<strong>learning_rate</strong> (<code>float</code>, <em>optional</em>, defaults to 5e-5) &#x2014; The initial learning rate for <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.`,name:"learning_rate"},{anchor:"transformers.Seq2SeqTrainingArguments.weight_decay",description:`<strong>weight_decay</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The weight decay to apply (if not zero) to all layers except all bias and LayerNorm weights in <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.`,name:"weight_decay"},{anchor:"transformers.Seq2SeqTrainingArguments.adam_beta1",description:`<strong>adam_beta1</strong> (<code>float</code>, <em>optional</em>, defaults to 0.9) &#x2014; The beta1 hyperparameter for the <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.`,name:"adam_beta1"},{anchor:"transformers.Seq2SeqTrainingArguments.adam_beta2",description:`<strong>adam_beta2</strong> (<code>float</code>, <em>optional</em>, defaults to 0.999) &#x2014; The beta2 hyperparameter for the <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.`,name:"adam_beta2"},{anchor:"transformers.Seq2SeqTrainingArguments.adam_epsilon",description:`<strong>adam_epsilon</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-8) &#x2014; The epsilon hyperparameter for the <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.`,name:"adam_epsilon"},{anchor:"transformers.Seq2SeqTrainingArguments.max_grad_norm",description:`<strong>max_grad_norm</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Maximum gradient norm (for gradient clipping).`,name:"max_grad_norm"},{anchor:"transformers.Seq2SeqTrainingArguments.num_train_epochs(float,",description:`<strong>num_train_epochs(<code>float</code>,</strong> <em>optional</em>, defaults to 3.0) &#x2014; Total number of training epochs to perform (if not an integer, will perform the decimal part percents of the last epoch before stopping training).`,name:"num_train_epochs(float,"},{anchor:"transformers.Seq2SeqTrainingArguments.max_steps",description:`<strong>max_steps</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; If set to a positive number, the total number of training steps to perform. Overrides <code>num_train_epochs</code>. In case of using a finite iterable dataset the training may stop before reaching the set number of steps when all data is exhausted`,name:"max_steps"},{anchor:"transformers.Seq2SeqTrainingArguments.lr_scheduler_type",description:`<strong>lr_scheduler_type</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.SchedulerType">SchedulerType</a>, <em>optional</em>, defaults to <code>&quot;linear&quot;</code>) &#x2014; The scheduler type to use. See the documentation of <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.SchedulerType">SchedulerType</a> for all possible values.`,name:"lr_scheduler_type"},{anchor:"transformers.Seq2SeqTrainingArguments.warmup_ratio",description:`<strong>warmup_ratio</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; Ratio of total training steps used for a linear warmup from 0 to <code>learning_rate</code>.`,name:"warmup_ratio"},{anchor:"transformers.Seq2SeqTrainingArguments.warmup_steps",description:`<strong>warmup_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Number of steps used for a linear warmup from 0 to <code>learning_rate</code>. Overrides any effect of <code>warmup_ratio</code>.`,name:"warmup_steps"},{anchor:"transformers.Seq2SeqTrainingArguments.log_level",description:`<strong>log_level</strong> (<code>str</code>, <em>optional</em>, defaults to <code>passive</code>) &#x2014; Logger log level to use on the main process. Possible choices are the log levels as strings: &#x2018;debug&#x2019;, &#x2018;info&#x2019;, &#x2018;warning&#x2019;, &#x2018;error&#x2019; and &#x2018;critical&#x2019;, plus a &#x2018;passive&#x2019; level which doesn&#x2019;t set anything and lets the application set the level.`,name:"log_level"},{anchor:"transformers.Seq2SeqTrainingArguments.log_level_replica",description:`<strong>log_level_replica</strong> (<code>str</code>, <em>optional</em>, defaults to <code>passive</code>) &#x2014; Logger log level to use on replicas. Same choices as <code>log_level</code>&#x201D;`,name:"log_level_replica"},{anchor:"transformers.Seq2SeqTrainingArguments.log_on_each_node",description:`<strong>log_on_each_node</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; In multinode distributed training, whether to log using <code>log_level</code> once per node, or only on the main node.`,name:"log_on_each_node"},{anchor:"transformers.Seq2SeqTrainingArguments.logging_dir",description:`<strong>logging_dir</strong> (<code>str</code>, <em>optional</em>) &#x2014; <a href="https://www.tensorflow.org/tensorboard" rel="nofollow">TensorBoard</a> log directory. Will default to *output_dir/runs/<strong>CURRENT_DATETIME_HOSTNAME*</strong>.`,name:"logging_dir"},{anchor:"transformers.Seq2SeqTrainingArguments.logging_strategy",description:`<strong>logging_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/trainer_utils#transformers.IntervalStrategy">IntervalStrategy</a>, <em>optional</em>, defaults to <code>&quot;steps&quot;</code>) &#x2014; The logging strategy to adopt during training. Possible values are:</p> <ul> <li><code>&quot;no&quot;</code>: No logging is done during training.</li> <li><code>&quot;epoch&quot;</code>: Logging is done at the end of each epoch.</li> <li><code>&quot;steps&quot;</code>: Logging is done every <code>logging_steps</code>.</li> </ul>`,name:"logging_strategy"},{anchor:"transformers.Seq2SeqTrainingArguments.logging_first_step",description:`<strong>logging_first_step</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to log and evaluate the first <code>global_step</code> or not.`,name:"logging_first_step"},{anchor:"transformers.Seq2SeqTrainingArguments.logging_steps",description:`<strong>logging_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 500) &#x2014; Number of update steps between two logs if <code>logging_strategy=&quot;steps&quot;</code>.`,name:"logging_steps"},{anchor:"transformers.Seq2SeqTrainingArguments.logging_nan_inf_filter",description:`<strong>logging_nan_inf_filter</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to filter <code>nan</code> and <code>inf</code> losses for logging. If set to <code>True</code> the loss of every step that is <code>nan</code> or <code>inf</code> is filtered and the average loss of the current logging window is taken instead.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p><code>logging_nan_inf_filter</code> only influences the logging of loss values, it does not change the behavior the gradient is computed or applied to the model.</p> </div>`,name:"logging_nan_inf_filter"},{anchor:"transformers.Seq2SeqTrainingArguments.save_strategy",description:`<strong>save_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/trainer_utils#transformers.IntervalStrategy">IntervalStrategy</a>, <em>optional</em>, defaults to <code>&quot;steps&quot;</code>) &#x2014; The checkpoint save strategy to adopt during training. Possible values are:</p> <ul> <li><code>&quot;no&quot;</code>: No save is done during training.</li> <li><code>&quot;epoch&quot;</code>: Save is done at the end of each epoch.</li> <li><code>&quot;steps&quot;</code>: Save is done every <code>save_steps</code>.</li> </ul>`,name:"save_strategy"},{anchor:"transformers.Seq2SeqTrainingArguments.save_steps",description:`<strong>save_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 500) &#x2014; Number of updates steps before two checkpoint saves if <code>save_strategy=&quot;steps&quot;</code>.`,name:"save_steps"},{anchor:"transformers.Seq2SeqTrainingArguments.save_total_limit",description:`<strong>save_total_limit</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in <code>output_dir</code>.`,name:"save_total_limit"},{anchor:"transformers.Seq2SeqTrainingArguments.save_on_each_node",description:`<strong>save_on_each_node</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; When doing multi-node distributed training, whether to save models and checkpoints on each node, or only on the main one.</p> <p>This should not be activated when the different nodes use the same storage as the files will be saved with the same names for each node.`,name:"save_on_each_node"},{anchor:"transformers.Seq2SeqTrainingArguments.no_cuda",description:`<strong>no_cuda</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to not use CUDA even when it is available or not.`,name:"no_cuda"},{anchor:"transformers.Seq2SeqTrainingArguments.seed",description:`<strong>seed</strong> (<code>int</code>, <em>optional</em>, defaults to 42) &#x2014; Random seed that will be set at the beginning of training. To ensure reproducibility across runs, use the <code>~Trainer.model_init</code> function to instantiate the model if it has some randomly initialized parameters.`,name:"seed"},{anchor:"transformers.Seq2SeqTrainingArguments.data_seed",description:`<strong>data_seed</strong> (<code>int</code>, <em>optional</em>) &#x2014; Random seed to be used with data samplers. If not set, random generators for data sampling will use the same seed as <code>seed</code>. This can be used to ensure reproducibility of data sampling, independent of the model seed.`,name:"data_seed"},{anchor:"transformers.Seq2SeqTrainingArguments.jit_mode_eval",description:`<strong>jit_mode_eval</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use PyTorch jit trace for inference.`,name:"jit_mode_eval"},{anchor:"transformers.Seq2SeqTrainingArguments.use_ipex",description:`<strong>use_ipex</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Use Intel extension for PyTorch when it is available. <a href="https://github.com/intel/intel-extension-for-pytorch" rel="nofollow">IPEX installation</a>.`,name:"use_ipex"},{anchor:"transformers.Seq2SeqTrainingArguments.bf16",description:`<strong>bf16</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use bf16 16-bit (mixed) precision training instead of 32-bit training. Requires Ampere or higher NVIDIA architecture or using CPU (no_cuda). This is an experimental API and it may change.`,name:"bf16"},{anchor:"transformers.Seq2SeqTrainingArguments.fp16",description:`<strong>fp16</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use fp16 16-bit (mixed) precision training instead of 32-bit training.`,name:"fp16"},{anchor:"transformers.Seq2SeqTrainingArguments.fp16_opt_level",description:`<strong>fp16_opt_level</strong> (<code>str</code>, <em>optional</em>, defaults to &#x2018;O1&#x2019;) &#x2014; For <code>fp16</code> training, Apex AMP optimization level selected in [&#x2018;O0&#x2019;, &#x2018;O1&#x2019;, &#x2018;O2&#x2019;, and &#x2018;O3&#x2019;]. See details on the <a href="https://nvidia.github.io/apex/amp" rel="nofollow">Apex documentation</a>.`,name:"fp16_opt_level"},{anchor:"transformers.Seq2SeqTrainingArguments.fp16_backend",description:`<strong>fp16_backend</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;auto&quot;</code>) &#x2014; This argument is deprecated. Use <code>half_precision_backend</code> instead.`,name:"fp16_backend"},{anchor:"transformers.Seq2SeqTrainingArguments.half_precision_backend",description:`<strong>half_precision_backend</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;auto&quot;</code>) &#x2014; The backend to use for mixed precision training. Must be one of <code>&quot;auto&quot;, &quot;cuda_amp&quot;, &quot;apex&quot;, &quot;cpu_amp&quot;</code>. <code>&quot;auto&quot;</code> will use CPU/CUDA AMP or APEX depending on the PyTorch version detected, while the other choices will force the requested backend.`,name:"half_precision_backend"},{anchor:"transformers.Seq2SeqTrainingArguments.bf16_full_eval",description:`<strong>bf16_full_eval</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use full bfloat16 evaluation instead of 32-bit. This will be faster and save memory but can harm metric values. This is an experimental API and it may change.`,name:"bf16_full_eval"},{anchor:"transformers.Seq2SeqTrainingArguments.fp16_full_eval",description:`<strong>fp16_full_eval</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use full float16 evaluation instead of 32-bit. This will be faster and save memory but can harm metric values.`,name:"fp16_full_eval"},{anchor:"transformers.Seq2SeqTrainingArguments.tf32",description:`<strong>tf32</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to enable the TF32 mode, available in Ampere and newer GPU architectures. The default value depends on PyTorch&#x2019;s version default of <code>torch.backends.cuda.matmul.allow_tf32</code>. For more details please refer to the <a href="https://huggingface.co/docs/transformers/performance#tf32" rel="nofollow">TF32</a> documentation. This is an experimental API and it may change.`,name:"tf32"},{anchor:"transformers.Seq2SeqTrainingArguments.local_rank",description:`<strong>local_rank</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Rank of the process during distributed training.`,name:"local_rank"},{anchor:"transformers.Seq2SeqTrainingArguments.xpu_backend",description:`<strong>xpu_backend</strong> (<code>str</code>, <em>optional</em>) &#x2014; The backend to use for xpu distributed training. Must be one of <code>&quot;mpi&quot;</code> or <code>&quot;ccl&quot;</code>.`,name:"xpu_backend"},{anchor:"transformers.Seq2SeqTrainingArguments.tpu_num_cores",description:`<strong>tpu_num_cores</strong> (<code>int</code>, <em>optional</em>) &#x2014; When training on TPU, the number of TPU cores (automatically passed by launcher script).`,name:"tpu_num_cores"},{anchor:"transformers.Seq2SeqTrainingArguments.dataloader_drop_last",description:`<strong>dataloader_drop_last</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size) or not.`,name:"dataloader_drop_last"},{anchor:"transformers.Seq2SeqTrainingArguments.eval_steps",description:`<strong>eval_steps</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of update steps between two evaluations if <code>evaluation_strategy=&quot;steps&quot;</code>. Will default to the same value as <code>logging_steps</code> if not set.`,name:"eval_steps"},{anchor:"transformers.Seq2SeqTrainingArguments.dataloader_num_workers",description:`<strong>dataloader_num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the main process.`,name:"dataloader_num_workers"},{anchor:"transformers.Seq2SeqTrainingArguments.past_index",description:`<strong>past_index</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Some models like <a href="../model_doc/transformerxl">TransformerXL</a> or <a href="../model_doc/xlnet">XLNet</a> can make use of the past hidden states for their predictions. If this argument is set to a positive int, the <code>Trainer</code> will use the corresponding output (usually index 2) as the past state and feed it to the model at the next training step under the keyword argument <code>mems</code>.`,name:"past_index"},{anchor:"transformers.Seq2SeqTrainingArguments.run_name",description:`<strong>run_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; A descriptor for the run. Typically used for <a href="https://www.wandb.com/" rel="nofollow">wandb</a> and <a href="https://www.mlflow.org/" rel="nofollow">mlflow</a> logging.`,name:"run_name"},{anchor:"transformers.Seq2SeqTrainingArguments.disable_tqdm",description:`<strong>disable_tqdm</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to disable the tqdm progress bars and table of metrics produced by <code>~notebook.NotebookTrainingTracker</code> in Jupyter Notebooks. Will default to <code>True</code> if the logging level is set to warn or lower (default), <code>False</code> otherwise.`,name:"disable_tqdm"},{anchor:"transformers.Seq2SeqTrainingArguments.remove_unused_columns",description:`<strong>remove_unused_columns</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to automatically remove the columns unused by the model forward method.</p> <p>(Note that this behavior is not implemented for <code>TFTrainer</code> yet.)`,name:"remove_unused_columns"},{anchor:"transformers.Seq2SeqTrainingArguments.label_names",description:`<strong>label_names</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; The list of keys in your dictionary of inputs that correspond to the labels.</p> <p>Will eventually default to <code>[&quot;labels&quot;]</code> except if the model used is one of the <code>XxxForQuestionAnswering</code> in which case it will default to <code>[&quot;start_positions&quot;, &quot;end_positions&quot;]</code>.`,name:"label_names"},{anchor:"transformers.Seq2SeqTrainingArguments.load_best_model_at_end",description:`<strong>load_best_model_at_end</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to load the best model found during training at the end of training.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When set to <code>True</code>, the parameters <code>save_strategy</code> needs to be the same as <code>evaluation_strategy</code>, and in the case it is &#x201C;steps&#x201D;, <code>save_steps</code> must be a round multiple of <code>eval_steps</code>.</p> </div>`,name:"load_best_model_at_end"},{anchor:"transformers.Seq2SeqTrainingArguments.metric_for_best_model",description:`<strong>metric_for_best_model</strong> (<code>str</code>, <em>optional</em>) &#x2014; Use in conjunction with <code>load_best_model_at_end</code> to specify the metric to use to compare two different models. Must be the name of a metric returned by the evaluation with or without the prefix <code>&quot;eval_&quot;</code>. Will default to <code>&quot;loss&quot;</code> if unspecified and <code>load_best_model_at_end=True</code> (to use the evaluation loss).</p> <p>If you set this value, <code>greater_is_better</code> will default to <code>True</code>. Don&#x2019;t forget to set it to <code>False</code> if your metric is better when lower.`,name:"metric_for_best_model"},{anchor:"transformers.Seq2SeqTrainingArguments.greater_is_better",description:`<strong>greater_is_better</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Use in conjunction with <code>load_best_model_at_end</code> and <code>metric_for_best_model</code> to specify if better models should have a greater metric or not. Will default to:</p> <ul> <li><code>True</code> if <code>metric_for_best_model</code> is set to a value that isn&#x2019;t <code>&quot;loss&quot;</code> or <code>&quot;eval_loss&quot;</code>.</li> <li><code>False</code> if <code>metric_for_best_model</code> is not set, or set to <code>&quot;loss&quot;</code> or <code>&quot;eval_loss&quot;</code>.</li> </ul>`,name:"greater_is_better"},{anchor:"transformers.Seq2SeqTrainingArguments.ignore_data_skip",description:`<strong>ignore_data_skip</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; When resuming training, whether or not to skip the epochs and batches to get the data loading at the same stage as in the previous training. If set to <code>True</code>, the training will begin faster (as that skipping step can take a long time) but will not yield the same results as the interrupted training would have.`,name:"ignore_data_skip"},{anchor:"transformers.Seq2SeqTrainingArguments.sharded_ddp",description:`<strong>sharded_ddp</strong> (<code>bool</code>, <code>str</code> or list of <code>ShardedDDPOption</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Use Sharded DDP training from <a href="https://github.com/facebookresearch/fairscale" rel="nofollow">FairScale</a> (in distributed training only). This is an experimental feature.</p> <p>A list of options along the following:</p> <ul> <li><code>&quot;simple&quot;</code>: to use first instance of sharded DDP released by fairscale (<code>ShardedDDP</code>) similar to ZeRO-2.</li> <li><code>&quot;zero_dp_2&quot;</code>: to use the second instance of sharded DPP released by fairscale (<code>FullyShardedDDP</code>) in Zero-2 mode (with <code>reshard_after_forward=False</code>).</li> <li><code>&quot;zero_dp_3&quot;</code>: to use the second instance of sharded DPP released by fairscale (<code>FullyShardedDDP</code>) in Zero-3 mode (with <code>reshard_after_forward=True</code>).</li> <li><code>&quot;offload&quot;</code>: to add ZeRO-offload (only compatible with <code>&quot;zero_dp_2&quot;</code> and <code>&quot;zero_dp_3&quot;</code>).</li> </ul> <p>If a string is passed, it will be split on space. If a bool is passed, it will be converted to an empty list for <code>False</code> and <code>[&quot;simple&quot;]</code> for <code>True</code>.`,name:"sharded_ddp"},{anchor:"transformers.Seq2SeqTrainingArguments.fsdp",description:`<strong>fsdp</strong> (<code>bool</code>, <code>str</code> or list of <code>FSDPOption</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Use PyTorch Distributed Parallel Training (in distributed training only).</p> <p>A list of options along the following:</p> <ul> <li><code>&quot;full_shard&quot;</code>: Shard parameters, gradients and optimizer states.</li> <li><code>&quot;shard_grad_op&quot;</code>: Shard optimizer states and gradients.</li> <li><code>&quot;offload&quot;</code>: Offload parameters and gradients to CPUs (only compatible with <code>&quot;full_shard&quot;</code> and <code>&quot;shard_grad_op&quot;</code>).</li> <li><code>&quot;auto_wrap&quot;</code>: Automatically recursively wrap layers with FSDP using <code>default_auto_wrap_policy</code>.</li> </ul>`,name:"fsdp"},{anchor:"transformers.Seq2SeqTrainingArguments.fsdp_min_num_params",description:`<strong>fsdp_min_num_params</strong> (<code>int</code>, <em>optional</em>, defaults to <code>0</code>) &#x2014; FSDP&#x2019;s minimum number of parameters for Default Auto Wrapping. (useful only when <code>fsdp</code> field is passed).`,name:"fsdp_min_num_params"},{anchor:"transformers.Seq2SeqTrainingArguments.deepspeed",description:`<strong>deepspeed</strong> (<code>str</code> or <code>dict</code>, <em>optional</em>) &#x2014; Use <a href="https://github.com/microsoft/deepspeed" rel="nofollow">Deepspeed</a>. This is an experimental feature and its API may evolve in the future. The value is either the location of DeepSpeed json config file (e.g., <code>ds_config.json</code>) or an already loaded json file as a <code>dict</code>&#x201D;`,name:"deepspeed"},{anchor:"transformers.Seq2SeqTrainingArguments.label_smoothing_factor",description:`<strong>label_smoothing_factor</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The label smoothing factor to use. Zero means no label smoothing, otherwise the underlying onehot-encoded labels are changed from 0s and 1s to <code>label_smoothing_factor/num_labels</code> and <code>1 - label_smoothing_factor + label_smoothing_factor/num_labels</code> respectively.`,name:"label_smoothing_factor"},{anchor:"transformers.Seq2SeqTrainingArguments.debug",description:`<strong>debug</strong> (<code>str</code> or list of <code>DebugOption</code>, <em>optional</em>, defaults to <code>&quot;&quot;</code>) &#x2014; Enable one or more debug features. This is an experimental feature.</p> <p>Possible options are:</p> <ul> <li><code>&quot;underflow_overflow&quot;</code>: detects overflow in model&#x2019;s input/outputs and reports the last frames that led to the event</li> <li><code>&quot;tpu_metrics_debug&quot;</code>: print debug metrics on TPU</li> </ul> <p>The options should be separated by whitespaces.`,name:"debug"},{anchor:"transformers.Seq2SeqTrainingArguments.optim",description:`<strong>optim</strong> (<code>str</code> or <code>training_args.OptimizerNames</code>, <em>optional</em>, defaults to <code>&quot;adamw_hf&quot;</code>) &#x2014; The optimizer to use: adamw_hf, adamw_torch, adamw_apex_fused, or adafactor.`,name:"optim"},{anchor:"transformers.Seq2SeqTrainingArguments.adafactor",description:`<strong>adafactor</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; This argument is deprecated. Use <code>--optim adafactor</code> instead.`,name:"adafactor"},{anchor:"transformers.Seq2SeqTrainingArguments.group_by_length",description:`<strong>group_by_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to group together samples of roughly the same length in the training dataset (to minimize padding applied and be more efficient). Only useful if applying dynamic padding.`,name:"group_by_length"},{anchor:"transformers.Seq2SeqTrainingArguments.length_column_name",description:`<strong>length_column_name</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;length&quot;</code>) &#x2014; Column name for precomputed lengths. If the column exists, grouping by length will use these values rather than computing them on train startup. Ignored unless <code>group_by_length</code> is <code>True</code> and the dataset is an instance of <code>Dataset</code>.`,name:"length_column_name"},{anchor:"transformers.Seq2SeqTrainingArguments.report_to",description:`<strong>report_to</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>, defaults to <code>&quot;all&quot;</code>) &#x2014; The list of integrations to report the results and logs to. Supported platforms are <code>&quot;azure_ml&quot;</code>, <code>&quot;comet_ml&quot;</code>, <code>&quot;mlflow&quot;</code>, <code>&quot;neptune&quot;</code>, <code>&quot;tensorboard&quot;</code> and <code>&quot;wandb&quot;</code>. Use <code>&quot;all&quot;</code> to report to all integrations installed, <code>&quot;none&quot;</code> for no integrations.`,name:"report_to"},{anchor:"transformers.Seq2SeqTrainingArguments.ddp_find_unused_parameters",description:`<strong>ddp_find_unused_parameters</strong> (<code>bool</code>, <em>optional</em>) &#x2014; When using distributed training, the value of the flag <code>find_unused_parameters</code> passed to <code>DistributedDataParallel</code>. Will default to <code>False</code> if gradient checkpointing is used, <code>True</code> otherwise.`,name:"ddp_find_unused_parameters"},{anchor:"transformers.Seq2SeqTrainingArguments.ddp_bucket_cap_mb",description:`<strong>ddp_bucket_cap_mb</strong> (<code>int</code>, <em>optional</em>) &#x2014; When using distributed training, the value of the flag <code>bucket_cap_mb</code> passed to <code>DistributedDataParallel</code>.`,name:"ddp_bucket_cap_mb"},{anchor:"transformers.Seq2SeqTrainingArguments.dataloader_pin_memory",description:`<strong>dataloader_pin_memory</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether you want to pin memory in data loaders or not. Will default to <code>True</code>.`,name:"dataloader_pin_memory"},{anchor:"transformers.Seq2SeqTrainingArguments.skip_memory_metrics",description:`<strong>skip_memory_metrics</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to skip adding of memory profiler reports to metrics. This is skipped by default because it slows down the training and evaluation speed.`,name:"skip_memory_metrics"},{anchor:"transformers.Seq2SeqTrainingArguments.push_to_hub",description:`<strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push the model to the Hub every time the model is saved. If this is activated, <code>output_dir</code> will begin a git directory synced with the repo (determined by <code>hub_model_id</code>) and the content will be pushed each time a save is triggered (depending on your <code>save_strategy</code>). Calling <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.save_model">save_model()</a> will also trigger a push.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>If <code>output_dir</code> exists, it needs to be a local clone of the repository to which the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> will be pushed.</p> </div>`,name:"push_to_hub"},{anchor:"transformers.Seq2SeqTrainingArguments.resume_from_checkpoint",description:`<strong>resume_from_checkpoint</strong> (<code>str</code>, <em>optional</em>) &#x2014; The path to a folder with a valid checkpoint for your model. This argument is not directly used by <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/main/examples" rel="nofollow">example scripts</a> for more details.`,name:"resume_from_checkpoint"},{anchor:"transformers.Seq2SeqTrainingArguments.hub_model_id",description:`<strong>hub_model_id</strong> (<code>str</code>, <em>optional</em>) &#x2014; The name of the repository to keep in sync with the local <em>output_dir</em>. It can be a simple model ID in which case the model will be pushed in your namespace. Otherwise it should be the whole repository name, for instance <code>&quot;user_name/model&quot;</code>, which allows you to push to an organization you are a member of with <code>&quot;organization_name/model&quot;</code>. Will default to <code>user_name/output_dir_name</code> with <em>output_dir_name</em> being the name of <code>output_dir</code>.</p> <p>Will default to the name of <code>output_dir</code>.`,name:"hub_model_id"},{anchor:"transformers.Seq2SeqTrainingArguments.hub_strategy",description:`<strong>hub_strategy</strong> (<code>str</code> or <code>HubStrategy</code>, <em>optional</em>, defaults to <code>&quot;every_save&quot;</code>) &#x2014; Defines the scope of what is pushed to the Hub and when. Possible values are:</p> <ul> <li><code>&quot;end&quot;</code>: push the model, its configuration, the tokenizer (if passed along to the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>) and a draft of a model card when the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.save_model">save_model()</a> method is called.</li> <li><code>&quot;every_save&quot;</code>: push the model, its configuration, the tokenizer (if passed along to the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>) and a draft of a model card each time there is a model save. The pushes are asynchronous to not block training, and in case the save are very frequent, a new push is only attempted if the previous one is finished. A last push is made with the final model at the end of training.</li> <li><code>&quot;checkpoint&quot;</code>: like <code>&quot;every_save&quot;</code> but the latest checkpoint is also pushed in a subfolder named last-checkpoint, allowing you to resume training easily with <code>trainer.train(resume_from_checkpoint=&quot;last-checkpoint&quot;)</code>.</li> <li><code>&quot;all_checkpoints&quot;</code>: like <code>&quot;checkpoint&quot;</code> but all checkpoints are pushed like they appear in the output folder (so you will get one checkpoint folder per folder in your final repository)</li> </ul>`,name:"hub_strategy"},{anchor:"transformers.Seq2SeqTrainingArguments.hub_token",description:`<strong>hub_token</strong> (<code>str</code>, <em>optional</em>) &#x2014; The token to use to push the model to the Hub. Will default to the token in the cache folder obtained with <code>huggingface-cli login</code>.`,name:"hub_token"},{anchor:"transformers.Seq2SeqTrainingArguments.hub_private_repo",description:`<strong>hub_private_repo</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If True, the Hub repo will be set to private.`,name:"hub_private_repo"},{anchor:"transformers.Seq2SeqTrainingArguments.gradient_checkpointing",description:`<strong>gradient_checkpointing</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If True, use gradient checkpointing to save memory at the expense of slower backward pass.`,name:"gradient_checkpointing"},{anchor:"transformers.Seq2SeqTrainingArguments.include_inputs_for_metrics",description:`<strong>include_inputs_for_metrics</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the inputs will be passed to the <code>compute_metrics</code> function. This is intended for metrics that need inputs, predictions and references for scoring calculation in Metric class.`,name:"include_inputs_for_metrics"},{anchor:"transformers.Seq2SeqTrainingArguments.auto_find_batch_size",description:`<strong>auto_find_batch_size</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to find a batch size that will fit into memory automatically through exponential decay, avoiding CUDA Out-of-Memory errors. Requires accelerate to be installed (<code>pip install accelerate</code>)`,name:"auto_find_batch_size"},{anchor:"transformers.Seq2SeqTrainingArguments.full_determinism",description:`<strong>full_determinism</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If <code>True</code>, <a href="/docs/transformers/pr_19429/en/internal/trainer_utils#transformers.enable_full_determinism">enable_full_determinism()</a> is called instead of <a href="/docs/transformers/pr_19429/en/internal/trainer_utils#transformers.set_seed">set_seed()</a> to ensure reproducible results in distributed training`,name:"full_determinism"},{anchor:"transformers.Seq2SeqTrainingArguments.torchdynamo",description:`<strong>torchdynamo</strong> (<code>str</code>, <em>optional</em>) &#x2014; The token that is used to set the backend compiler for TorchDynamo. Possible choices are [&#x201C;eager&#x201D;, &#x201C;nvfuser]. This is an experimental API and subject to change.`,name:"torchdynamo"},{anchor:"transformers.Seq2SeqTrainingArguments.ray_scope",description:`<strong>ray_scope</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;last&quot;</code>) &#x2014; The scope to use when doing hyperparameter search with Ray. By default, <code>&quot;last&quot;</code> will be used. Ray will then use the last checkpoint of all trials, compare those, and select the best one. However, other options are also available. See the <a href="https://docs.ray.io/en/latest/tune/api_docs/analysis.html#ray.tune.ExperimentAnalysis.get_best_trial" rel="nofollow">Ray documentation</a> for more options.`,name:"ray_scope"},{anchor:"transformers.Seq2SeqTrainingArguments.ddp_timeout",description:`<strong>ddp_timeout</strong> (<code>int</code>, <em>optional</em>, defaults to 1800) &#x2014; The timeout for <code>torch.distributed.init_process_group</code> calls, used to avoid GPU socket timeouts when performing slow operations in distributed runnings. Please refer the [PyTorch documentation] (<a href="https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group" rel="nofollow">https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group</a>) for more information.`,name:"ddp_timeout"},{anchor:"transformers.Seq2SeqTrainingArguments.use_mps_device",description:`<strong>use_mps_device</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use Apple Silicon chip based <code>mps</code> device.`,name:"use_mps_device"},{anchor:"transformers.Seq2SeqTrainingArguments.sortish_sampler",description:`<strong>sortish_sampler</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use a <em>sortish sampler</em> or not. Only possible if the underlying datasets are <em>Seq2SeqDataset</em> for now but will become generally available in the near future.</p> <p>It sorts the inputs according to lengths in order to minimize the padding size, with a bit of randomness for the training set.`,name:"sortish_sampler"},{anchor:"transformers.Seq2SeqTrainingArguments.predict_with_generate",description:`<strong>predict_with_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use generate to calculate generative metrics (ROUGE, BLEU).`,name:"predict_with_generate"},{anchor:"transformers.Seq2SeqTrainingArguments.generation_max_length",description:`<strong>generation_max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; The <code>max_length</code> to use on each evaluation loop when <code>predict_with_generate=True</code>. Will default to the <code>max_length</code> value of the model configuration.`,name:"generation_max_length"},{anchor:"transformers.Seq2SeqTrainingArguments.generation_num_beams",description:`<strong>generation_num_beams</strong> (<code>int</code>, <em>optional</em>) &#x2014; The <code>num_beams</code> to use on each evaluation loop when <code>predict_with_generate=True</code>. Will default to the <code>num_beams</code> value of the model configuration.`,name:"generation_num_beams"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/training_args_seq2seq.py#L28"}}),_s=new Y({}),vs=new Y({}),bs=new O({props:{code:`[...] logger = logging.getLogger(__name__) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) # set the main code and the modules it uses to the same log-level according to the node log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) trainer = Trainer(...)`,highlighted:`[...] logger = logging.getLogger(__name__) <span class="hljs-comment"># Setup logging</span> logging.basicConfig( <span class="hljs-built_in">format</span>=<span class="hljs-string">&quot;%(asctime)s - %(levelname)s - %(name)s - %(message)s&quot;</span>, datefmt=<span class="hljs-string">&quot;%m/%d/%Y %H:%M:%S&quot;</span>, handlers=[logging.StreamHandler(sys.stdout)], ) <span class="hljs-comment"># set the main code and the modules it uses to the same log-level according to the node</span> log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) trainer = Trainer(...)`}}),ys=new O({props:{code:"my_app.py ... --log_level warning --log_level_replica error",highlighted:"my_app.py ... --log_level warning --log_level_replica error"}}),ws=new O({props:{code:"my_app.py ... --log_level warning --log_level_replica error --log_on_each_node 0",highlighted:"my_app.py ... --log_level warning --log_level_replica error --log_on_each_node 0"}}),Ts=new O({props:{code:"my_app.py ... --log_level error --log_level_replica error --log_on_each_node 0",highlighted:"my_app.py ... --log_level error --log_level_replica error --log_on_each_node 0"}}),Es=new Y({}),xs=new Y({}),As=new O({props:{code:"python -m torch.distributed.launch --nproc_per_node=2 trainer-program.py ...",highlighted:"python -m torch.distributed.launch --nproc_per_node=2 trainer-program.py ..."}}),Ss=new O({props:{code:"accelerate launch --num_processes 2 trainer-program.py ...",highlighted:"accelerate launch --num_processes 2 trainer-program.py ..."}}),qs=new O({props:{code:"deepspeed --num_gpus 2 trainer-program.py ...",highlighted:"deepspeed --num_gpus 2 trainer-program.py ..."}}),Os=new O({props:{code:"CUDA_VISIBLE_DEVICES=0,2 python -m torch.distributed.launch trainer-program.py ...",highlighted:"CUDA_VISIBLE_DEVICES=0,2 python -m torch.distributed.launch trainer-program.py ..."}}),Cs=new O({props:{code:"CUDA_VISIBLE_DEVICES=2,0 python -m torch.distributed.launch trainer-program.py ...",highlighted:"CUDA_VISIBLE_DEVICES=2,0 python -m torch.distributed.launch trainer-program.py ..."}}),Us=new O({props:{code:"CUDA_VISIBLE_DEVICES=2,0 python trainer-program.py ...",highlighted:"CUDA_VISIBLE_DEVICES=2,0 python trainer-program.py ..."}}),Ns=new O({props:{code:"CUDA_VISIBLE_DEVICES= python trainer-program.py ...",highlighted:"CUDA_VISIBLE_DEVICES= python trainer-program.py ..."}}),zs=new O({props:{code:`export CUDA_VISIBLE_DEVICES=0,2 python -m torch.distributed.launch trainer-program.py ...`,highlighted:`<span class="hljs-built_in">export</span> CUDA_VISIBLE_DEVICES=0,2 python -m torch.distributed.launch trainer-program.py ...`}}),Ls=new O({props:{code:"export CUDA_DEVICE_ORDER=PCI_BUS_ID",highlighted:'<span class="hljs-built_in">export</span> CUDA_DEVICE_ORDER=PCI_BUS_ID'}}),Ws=new O({props:{code:"export CUDA_DEVICE_ORDER=FASTEST_FIRST",highlighted:'<span class="hljs-built_in">export</span> CUDA_DEVICE_ORDER=FASTEST_FIRST'}}),Gs=new O({props:{code:"export CUDA_VISIBLE_DEVICES=1,0",highlighted:'<span class="hljs-built_in">export</span> CUDA_VISIBLE_DEVICES=1,0'}}),Ms=new Y({}),Ys=new Y({}),Js=new O({props:{code:`pip install fairscale pip install deepspeed`,highlighted:`pip install fairscale pip install deepspeed`}}),Xs=new Y({}),Qs=new O({props:{code:"which nvcc",highlighted:'<span class="hljs-built_in">which</span> nvcc'}}),ti=new Y({}),oi=new O({props:{code:`/usr/local/cuda-10.2 /usr/local/cuda-11.0`,highlighted:`/usr/local/cuda-10.2 /usr/local/cuda-11.0`}}),ri=new O({props:{code:`echo $PATH echo $LD_LIBRARY_PATH`,highlighted:`<span class="hljs-built_in">echo</span> <span class="hljs-variable">$PATH</span> <span class="hljs-built_in">echo</span> <span class="hljs-variable">$LD_LIBRARY_PATH</span>`}}),ai=new O({props:{code:`export PATH=/usr/local/cuda-10.2/bin:$PATH export LD_LIBRARY_PATH=/usr/local/cuda-10.2/lib64:$LD_LIBRARY_PATH`,highlighted:`<span class="hljs-built_in">export</span> PATH=/usr/local/cuda-10.2/bin:<span class="hljs-variable">$PATH</span> <span class="hljs-built_in">export</span> LD_LIBRARY_PATH=/usr/local/cuda-10.2/lib64:<span class="hljs-variable">$LD_LIBRARY_PATH</span>`}}),ni=new Y({}),si=new O({props:{code:`sudo ln -s /usr/bin/gcc-7 /usr/local/cuda-10.2/bin/gcc sudo ln -s /usr/bin/g++-7 /usr/local/cuda-10.2/bin/g++`,highlighted:`sudo <span class="hljs-built_in">ln</span> -s /usr/bin/gcc-7 /usr/local/cuda-10.2/bin/gcc sudo <span class="hljs-built_in">ln</span> -s /usr/bin/g++-7 /usr/local/cuda-10.2/bin/g++`}}),ii=new Y({}),ra=new gp({props:{warning:!0,$$slots:{default:[mM]},$$scope:{ctx:Z}}}),pi=new O({props:{code:"pip install fairscale",highlighted:"pip install fairscale"}}),mi=new O({props:{code:"pip install transformers[fairscale]",highlighted:"pip install transformers[fairscale]"}}),fi=new O({props:{code:"pip install fairscale --no-build-isolation .",highlighted:"pip install fairscale --no-build-isolation ."}}),gi=new O({props:{code:`git clone https://github.com/facebookresearch/fairscale/ cd fairscale rm -r dist build python setup.py bdist_wheel pip uninstall -y fairscale pip install dist/fairscale-*.whl`,highlighted:`git <span class="hljs-built_in">clone</span> https://github.com/facebookresearch/fairscale/ <span class="hljs-built_in">cd</span> fairscale <span class="hljs-built_in">rm</span> -r dist build python setup.py bdist_wheel pip uninstall -y fairscale pip install dist/fairscale-*.whl`}}),vi=new O({props:{code:`pip uninstall -y fairscale; pip install fairscale --pre \\ -f https://download.pytorch.org/whl/nightly/cu110/torch_nightly \\ --no-cache --no-build-isolation`,highlighted:`pip uninstall -y fairscale; pip install fairscale --pre \\ -f https://download.pytorch.org/whl/nightly/cu110/torch_nightly \\ --no-cache --no-build-isolation`}}),bi=new O({props:{code:`pip install -v --disable-pip-version-check . \\ -f https://download.pytorch.org/whl/nightly/cu110/torch_nightly --pre`,highlighted:`pip install -v --disable-pip-version-check . \\ -f https://download.pytorch.org/whl/nightly/cu110/torch_nightly --pre`}}),Ti=new O({props:{code:`python -m torch.distributed.launch --nproc_per_node=2 examples/pytorch/translation/run_translation.py \\ --model_name_or_path t5-small --per_device_train_batch_size 1 \\ --output_dir output_dir --overwrite_output_dir \\ --do_train --max_train_samples 500 --num_train_epochs 1 \\ --dataset_name wmt16 --dataset_config "ro-en" \\ --source_lang en --target_lang ro \\ --fp16 --sharded_ddp simple`,highlighted:`python -m torch.distributed.launch --nproc_per_node=2 examples/pytorch/translation/run_translation.py \\ --model_name_or_path t5-small --per_device_train_batch_size 1 \\ --output_dir output_dir --overwrite_output_dir \\ --do_train --max_train_samples 500 --num_train_epochs 1 \\ --dataset_name wmt16 --dataset_config <span class="hljs-string">&quot;ro-en&quot;</span> \\ --source_lang en --target_lang ro \\ --fp16 --sharded_ddp simple`}}),ki=new O({props:{code:`python -m torch.distributed.launch --nproc_per_node=2 examples/pytorch/translation/run_translation.py \\ --model_name_or_path t5-small --per_device_train_batch_size 1 \\ --output_dir output_dir --overwrite_output_dir \\ --do_train --max_train_samples 500 --num_train_epochs 1 \\ --dataset_name wmt16 --dataset_config "ro-en" \\ --source_lang en --target_lang ro \\ --fp16 --sharded_ddp zero_dp_2`,highlighted:`python -m torch.distributed.launch --nproc_per_node=2 examples/pytorch/translation/run_translation.py \\ --model_name_or_path t5-small --per_device_train_batch_size 1 \\ --output_dir output_dir --overwrite_output_dir \\ --do_train --max_train_samples 500 --num_train_epochs 1 \\ --dataset_name wmt16 --dataset_config <span class="hljs-string">&quot;ro-en&quot;</span> \\ --source_lang en --target_lang ro \\ --fp16 --sharded_ddp zero_dp_2`}}),Pi=new Y({}),Mi=new Y({}),ha=new gp({props:{warning:!1,$$slots:{default:[hM]},$$scope:{ctx:Z}}}),Vi=new O({props:{code:`export TASK_NAME=mrpc python examples/pytorch/text-classification/run_glue.py \\ --model_name_or_path bert-base-cased \\ --task_name $TASK_NAME \\ --do_train \\ --do_eval \\ --max_seq_length 128 \\ --per_device_train_batch_size 32 \\ --learning_rate 2e-5 \\ --num_train_epochs 3 \\ --output_dir /tmp/$TASK_NAME/ \\ --use_mps_device \\ --overwrite_output_dir`,highlighted:`<span class="hljs-built_in">export</span> TASK_NAME=mrpc python examples/pytorch/text-classification/run_glue.py \\ --model_name_or_path bert-base-cased \\ --task_name <span class="hljs-variable">$TASK_NAME</span> \\ --do_train \\ --do_eval \\ --max_seq_length 128 \\ --per_device_train_batch_size 32 \\ --learning_rate 2e-5 \\ --num_train_epochs 3 \\ --output_dir /tmp/<span class="hljs-variable">$TASK_NAME</span>/ \\ --use_mps_device \\ --overwrite_output_dir`}}),{c(){T=n("meta"),D=l(),$=n("h1"),k=n("a"),L=n("span"),h(A.$$.fragment),S=l(),W=n("span"),fe=r("Trainer"),oe=l(),G=n("p"),se=r("The "),ie=n("a"),re=r("Trainer"),le=r(" class provides an API for feature-complete training in PyTorch for most standard use cases. It\u2019s used in most of the "),H=n("a"),Ze=r("example scripts"),ge=r("."),z=l(),I=n("p"),st=r("Before instantiating your "),ae=n("a"),it=r("Trainer"),lt=r(", create a "),_e=n("a"),Ia=r("TrainingArguments"),Ua=r(" to access all the points of customization during training."),Ke=l(),Pe=n("p"),Na=r("The API supports distributed training on multiple GPUs/TPUs, mixed precision through "),ve=n("a"),za=r("NVIDIA Apex"),Fa=r(" and Native AMP for PyTorch."),K=l(),B=n("p"),el=r("The "),be=n("a"),Xo=r("Trainer"),tl=r(" contains the basic training loop which supports the above features. To inject custom behavior you can subclass them and override the following methods:"),ro=l(),C=n("ul"),V=n("li"),Qo=n("strong"),ol=r("get_train_dataloader"),rl=r(" \u2014 Creates the training DataLoader."),al=l(),nl=n("li"),_p=n("strong"),Aw=r("get_eval_dataloader"),Pw=r(" \u2014 Creates the evaluation DataLoader."),Dw=l(),sl=n("li"),vp=n("strong"),Sw=r("get_test_dataloader"),qw=r(" \u2014 Creates the test DataLoader."),Ow=l(),il=n("li"),bp=n("strong"),Cw=r("log"),Iw=r(" \u2014 Logs information on the various objects watching training."),Uw=l(),dt=n("li"),yp=n("strong"),Nw=r("create_optimizer_and_scheduler"),zw=r(` \u2014 Sets up the optimizer and learning rate scheduler if they were not passed at init. Note, that you can also subclass or override the `),wp=n("code"),Fw=r("create_optimizer"),Lw=r(" and "),Tp=n("code"),Rw=r("create_scheduler"),Ww=r(` methods separately.`),Gw=l(),ll=n("li"),Ep=n("strong"),Mw=r("create_optimizer"),jw=r(" \u2014 Sets up the optimizer if it wasn\u2019t passed at init."),Hw=l(),dl=n("li"),$p=n("strong"),Bw=r("create_scheduler"),Vw=r(" \u2014 Sets up the learning rate scheduler if it wasn\u2019t passed at init."),Yw=l(),cl=n("li"),xp=n("strong"),Zw=r("compute_loss"),Kw=r(" - Computes the loss on a batch of training inputs."),Jw=l(),pl=n("li"),kp=n("strong"),Xw=r("training_step"),Qw=r(" \u2014 Performs a training step."),e0=l(),ml=n("li"),Ap=n("strong"),t0=r("prediction_step"),o0=r(" \u2014 Performs an evaluation/test step."),r0=l(),hl=n("li"),Pp=n("strong"),a0=r("evaluate"),n0=r(" \u2014 Runs an evaluation loop and returns metrics."),s0=l(),ul=n("li"),Dp=n("strong"),i0=r("predict"),l0=r(" \u2014 Returns predictions (with metrics if labels are available) on a test set."),yv=l(),h(er.$$.fragment),wv=l(),tr=n("p"),d0=r("Here is an example of how to customize "),fl=n("a"),c0=r("Trainer"),p0=r(" to use a weighted loss (useful when you have an unbalanced training set):"),Tv=l(),h(La.$$.fragment),Ev=l(),ct=n("p"),m0=r("Another way to customize the training loop behavior for the PyTorch "),gl=n("a"),h0=r("Trainer"),u0=r(" is to use "),_l=n("a"),f0=r("callbacks"),g0=r(" that can inspect the training loop state (for progress reporting, logging on TensorBoard or other ML platforms\u2026) and take decisions (like early stopping)."),$v=l(),ao=n("h2"),or=n("a"),Sp=n("span"),h(Ra.$$.fragment),_0=l(),qp=n("span"),v0=r("Trainer"),xv=l(),b=n("div"),h(Wa.$$.fragment),b0=l(),Op=n("p"),y0=r("Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for \u{1F917} Transformers."),w0=l(),Cp=n("p"),T0=r("Important attributes:"),E0=l(),ye=n("ul"),rr=n("li"),Ip=n("strong"),$0=r("model"),x0=r(" \u2014 Always points to the core model. If using a transformers model, it will be a "),vl=n("a"),k0=r("PreTrainedModel"),A0=r(` subclass.`),P0=l(),J=n("li"),Up=n("strong"),D0=r("model_wrapped"),S0=r(` \u2014 Always points to the most external model in case one or more other modules wrap the original model. This is the model that should be used for the forward pass. For example, under `),Np=n("code"),q0=r("DeepSpeed"),O0=r(`, the inner model is wrapped in `),zp=n("code"),C0=r("DeepSpeed"),I0=r(" and then again in "),Fp=n("code"),U0=r("torch.nn.DistributedDataParallel"),N0=r(`. If the inner model hasn\u2019t been wrapped, then `),Lp=n("code"),z0=r("self.model_wrapped"),F0=r(" is the same as "),Rp=n("code"),L0=r("self.model"),R0=r("."),W0=l(),bl=n("li"),Wp=n("strong"),G0=r("is_model_parallel"),M0=r(` \u2014 Whether or not a model has been switched to a model parallel mode (different from data parallelism, this means some of the model layers are split on different GPUs).`),j0=l(),De=n("li"),Gp=n("strong"),H0=r("place_model_on_device"),B0=r(` \u2014 Whether or not to automatically place the model on the device - it will be set to `),Mp=n("code"),V0=r("False"),Y0=r(` if model parallel or deepspeed is used, or if the default `),jp=n("code"),Z0=r("TrainingArguments.place_model_on_device"),K0=r(" is overridden to return "),Hp=n("code"),J0=r("False"),X0=r(" ."),Q0=l(),Se=n("li"),Bp=n("strong"),eT=r("is_in_train"),tT=r(" \u2014 Whether or not a model is currently running "),Vp=n("code"),oT=r("train"),rT=r(" (e.g. when "),Yp=n("code"),aT=r("evaluate"),nT=r(` is called while in `),Zp=n("code"),sT=r("train"),iT=r(")"),lT=l(),ar=n("div"),h(Ga.$$.fragment),dT=l(),Ma=n("p"),cT=r("Add a callback to the current list of "),Kp=n("code"),pT=r("~transformer.TrainerCallback"),mT=r("."),hT=l(),nr=n("div"),h(ja.$$.fragment),uT=l(),Ha=n("p"),fT=r("A helper wrapper that creates an appropriate context manager for "),Jp=n("code"),gT=r("autocast"),_T=r(` while feeding it the desired arguments, depending on the situation.`),vT=l(),pt=n("div"),h(Ba.$$.fragment),bT=l(),Xp=n("p"),yT=r("How the loss is computed by Trainer. By default, all models return the loss in the first element."),wT=l(),Qp=n("p"),TT=r("Subclass and override for custom behavior."),ET=l(),sr=n("div"),h(Va.$$.fragment),$T=l(),em=n("p"),xT=r("A helper wrapper to group together context managers."),kT=l(),ir=n("div"),h(Ya.$$.fragment),AT=l(),Za=n("p"),PT=r("Creates a draft of a model card using the information available to the "),tm=n("code"),DT=r("Trainer"),ST=r("."),qT=l(),mt=n("div"),h(Ka.$$.fragment),OT=l(),om=n("p"),CT=r("Setup the optimizer."),IT=l(),Ja=n("p"),UT=r(`We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the Trainer\u2019s init through `),rm=n("code"),NT=r("optimizers"),zT=r(", or subclass and override this method in a subclass."),FT=l(),ht=n("div"),h(Xa.$$.fragment),LT=l(),am=n("p"),RT=r("Setup the optimizer and the learning rate scheduler."),WT=l(),Je=n("p"),GT=r(`We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the Trainer\u2019s init through `),nm=n("code"),MT=r("optimizers"),jT=r(", or subclass and override this method (or "),sm=n("code"),HT=r("create_optimizer"),BT=r(` and/or `),im=n("code"),VT=r("create_scheduler"),YT=r(") in a subclass."),ZT=l(),lr=n("div"),h(Qa.$$.fragment),KT=l(),lm=n("p"),JT=r(`Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or passed as an argument.`),XT=l(),qe=n("div"),h(en.$$.fragment),QT=l(),dm=n("p"),e4=r("Run evaluation and returns metrics."),t4=l(),tn=n("p"),o4=r(`The calling script will be responsible for providing a method to compute metrics, as they are task-dependent (pass it to the init `),cm=n("code"),r4=r("compute_metrics"),a4=r(" argument)."),n4=l(),pm=n("p"),s4=r("You can also subclass and override this method to inject custom behavior."),i4=l(),ut=n("div"),h(on.$$.fragment),l4=l(),no=n("p"),d4=r("Prediction/evaluation loop, shared by "),mm=n("code"),c4=r("Trainer.evaluate()"),p4=r(" and "),hm=n("code"),m4=r("Trainer.predict()"),h4=r("."),u4=l(),um=n("p"),f4=r("Works both with or without labels."),g4=l(),dr=n("div"),h(rn.$$.fragment),_4=l(),an=n("p"),v4=r("For models that inherit from "),yl=n("a"),b4=r("PreTrainedModel"),y4=r(`, uses that method to compute the number of floating point operations for every backward + forward pass. If using another model, either implement such a method in the model or subclass and override this method.`),w4=l(),ft=n("div"),h(nn.$$.fragment),T4=l(),sn=n("p"),E4=r("Returns the evaluation "),fm=n("code"),$4=r("~torch.utils.data.DataLoader"),x4=r("."),k4=l(),gm=n("p"),A4=r("Subclass and override this method if you want to inject some custom behavior."),P4=l(),cr=n("div"),h(ln.$$.fragment),D4=l(),_m=n("p"),S4=r("Returns the optimizer class and optimizer parameters based on the training arguments."),q4=l(),gt=n("div"),h(dn.$$.fragment),O4=l(),cn=n("p"),C4=r("Returns the test "),vm=n("code"),I4=r("~torch.utils.data.DataLoader"),U4=r("."),N4=l(),bm=n("p"),z4=r("Subclass and override this method if you want to inject some custom behavior."),F4=l(),Oe=n("div"),h(pn.$$.fragment),L4=l(),mn=n("p"),R4=r("Returns the training "),ym=n("code"),W4=r("~torch.utils.data.DataLoader"),G4=r("."),M4=l(),so=n("p"),j4=r("Will use no sampler if "),wm=n("code"),H4=r("train_dataset"),B4=r(" does not implement "),Tm=n("code"),V4=r("__len__"),Y4=r(`, a random sampler (adapted to distributed training if necessary) otherwise.`),Z4=l(),Em=n("p"),K4=r("Subclass and override this method if you want to inject some custom behavior."),J4=l(),_t=n("div"),h(hn.$$.fragment),X4=l(),we=n("p"),Q4=r("Launch an hyperparameter search using "),$m=n("code"),eE=r("optuna"),tE=r(" or "),xm=n("code"),oE=r("Ray Tune"),rE=r(" or "),km=n("code"),aE=r("SigOpt"),nE=r(`. The optimized quantity is determined by `),Am=n("code"),sE=r("compute_objective"),iE=r(`, which defaults to a function returning the evaluation loss when no metric is provided, the sum of all metrics otherwise.`),lE=l(),h(pr.$$.fragment),dE=l(),mr=n("div"),h(un.$$.fragment),cE=l(),fn=n("p"),pE=r("Initializes a git repo in "),Pm=n("code"),mE=r("self.args.hub_model_id"),hE=r("."),uE=l(),hr=n("div"),h(gn.$$.fragment),fE=l(),Dm=n("p"),gE=r(`Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several machines) main process.`),_E=l(),ur=n("div"),h(_n.$$.fragment),vE=l(),vn=n("p"),bE=r(`Whether or not this process is the global main process (when training in a distributed fashion on several machines, this is only going to be `),Sm=n("code"),yE=r("True"),wE=r(" for one process)."),TE=l(),vt=n("div"),h(bn.$$.fragment),EE=l(),yn=n("p"),$E=r("Log "),qm=n("code"),xE=r("logs"),kE=r(" on the various objects watching training."),AE=l(),Om=n("p"),PE=r("Subclass and override this method to inject custom behavior."),DE=l(),P=n("div"),h(wn.$$.fragment),SE=l(),Cm=n("p"),qE=r("Log metrics in a specially formatted way"),OE=l(),Im=n("p"),CE=r("Under distributed environment this is done only for a process with rank 0."),IE=l(),Um=n("p"),UE=r("Notes on memory reports:"),NE=l(),io=n("p"),zE=r("In order to get memory usage report you need to install "),Nm=n("code"),FE=r("psutil"),LE=r(". You can do that with "),zm=n("code"),RE=r("pip install psutil"),WE=r("."),GE=l(),h(fr.$$.fragment),ME=l(),Fm=n("p"),Lm=n("strong"),jE=r("Understanding the reports:"),HE=l(),Xe=n("ul"),Te=n("li"),BE=r("the first segment, e.g., "),Rm=n("code"),VE=r("train__"),YE=r(", tells you which stage the metrics are for. Reports starting with "),Wm=n("code"),ZE=r("init_"),KE=r(` will be added to the first stage that gets run. So that if only evaluation is run, the memory usage for the `),Gm=n("code"),JE=r("__init__"),XE=r(" will be reported along with the "),Mm=n("code"),QE=r("eval_"),e9=r(" metrics."),t9=l(),lo=n("li"),o9=r("the third segment, is either "),jm=n("code"),r9=r("cpu"),a9=r(" or "),Hm=n("code"),n9=r("gpu"),s9=r(`, tells you whether it\u2019s the general RAM or the gpu0 memory metric.`),i9=l(),wl=n("li"),Bm=n("code"),l9=r("*_alloc_delta"),d9=r(` - is the difference in the used/allocated memory counter between the end and the start of the stage - it can be negative if a function released more memory than it allocated.`),c9=l(),bt=n("li"),Vm=n("code"),p9=r("*_peaked_delta"),m9=r(` - is any extra memory that was consumed and then freed - relative to the current allocated memory counter - it is never negative. When you look at the metrics of any stage you add up `),Ym=n("code"),h9=r("alloc_delta"),u9=r(` + `),Zm=n("code"),f9=r("peaked_delta"),g9=r(" and you know how much memory was needed to complete that stage."),_9=l(),Km=n("p"),v9=r(`The reporting happens only for process of rank 0 and gpu 0 (if there is a gpu). Typically this is enough since the main process does the bulk of work, but it could be not quite so if model parallel is used and then other GPUs may use a different amount of gpu memory. This is also not the same under DataParallel where gpu0 may require much more memory than the rest since it stores the gradient and optimizer states for all participating GPUS. Perhaps in the future these reports will evolve to measure those too.`),b9=l(),Jm=n("p"),y9=r(`The CPU RAM metric measures RSS (Resident Set Size) includes both the memory which is unique to the process and the memory shared with other processes. It is important to note that it does not include swapped out memory, so the reports could be imprecise.`),w9=l(),Tn=n("p"),T9=r(`The CPU peak memory is measured using a sampling thread. Due to python\u2019s GIL it may miss some of the peak memory if that thread didn\u2019t get a chance to run when the highest memory was used. Therefore this report can be less than reality. Using `),Xm=n("code"),E9=r("tracemalloc"),$9=r(` would have reported the exact peak memory, but it doesn\u2019t report memory allocations outside of python. So if some C++ CUDA extension allocated its own memory it won\u2019t be reported. And therefore it was dropped in favor of the memory sampling approach, which reads the current process memory usage.`),x9=l(),Qe=n("p"),k9=r("The GPU allocated and peak memory reporting is done with "),Qm=n("code"),A9=r("torch.cuda.memory_allocated()"),P9=r(` and `),eh=n("code"),D9=r("torch.cuda.max_memory_allocated()"),S9=r(`. This metric reports only \u201Cdeltas\u201D for pytorch-specific allocations, as `),th=n("code"),q9=r("torch.cuda"),O9=r(` memory management system doesn\u2019t track any memory allocated outside of pytorch. For example, the very first cuda call typically loads CUDA kernels, which may take from 0.5 to 2GB of GPU memory.`),C9=l(),ne=n("p"),I9=r("Note that this tracker doesn\u2019t account for memory allocations outside of "),Tl=n("a"),U9=r("Trainer"),N9=r("\u2019s "),oh=n("code"),z9=r("__init__"),F9=r(", "),rh=n("code"),L9=r("train"),R9=r(`, `),ah=n("code"),W9=r("evaluate"),G9=r(" and "),nh=n("code"),M9=r("predict"),j9=r(" calls."),H9=l(),U=n("p"),B9=r("Because "),sh=n("code"),V9=r("evaluation"),Y9=r(" calls may happen during "),ih=n("code"),Z9=r("train"),K9=r(`, we can\u2019t handle nested invocations because `),lh=n("code"),J9=r("torch.cuda.max_memory_allocated"),X9=r(" is a single counter, so if it gets reset by a nested eval call, "),dh=n("code"),Q9=r("train"),e$=r(`\u2019s tracker will report incorrect info. If this `),En=n("a"),t$=r("pytorch issue"),o$=r(` gets resolved it will be possible to change this class to be re-entrant. Until then we will only track the outer level of `),ch=n("code"),r$=r("train"),a$=r(", "),ph=n("code"),n$=r("evaluate"),s$=r(" and "),mh=n("code"),i$=r("predict"),l$=r(" methods. Which means that if "),hh=n("code"),d$=r("eval"),c$=r(" is called during "),uh=n("code"),p$=r("train"),m$=r(`, it\u2019s the latter that will account for its memory usage and that of the former.`),h$=l(),Ee=n("p"),u$=r("This also means that if any other tool that is used along the "),El=n("a"),f$=r("Trainer"),g$=r(` calls `),fh=n("code"),_$=r("torch.cuda.reset_peak_memory_stats"),v$=r(", the gpu peak memory stats could be invalid. And the "),$l=n("a"),b$=r("Trainer"),y$=r(` will disrupt the normal behavior of any such tools that rely on calling `),gh=n("code"),w$=r("torch.cuda.reset_peak_memory_stats"),T$=r(" themselves."),E$=l(),_h=n("p"),$$=r("For best performance you may want to consider turning the memory profiling off for production runs."),x$=l(),gr=n("div"),h($n.$$.fragment),k$=l(),vh=n("p"),A$=r("Reformat Trainer metrics values to a human-readable format"),P$=l(),_r=n("div"),h(xn.$$.fragment),D$=l(),kn=n("p"),S$=r("Helper to get number of samples in a "),bh=n("code"),q$=r("~torch.utils.data.DataLoader"),O$=r(` by accessing its dataset. When dataloader.dataset does not exist or has no length, estimates as best it can`),C$=l(),yt=n("div"),h(An.$$.fragment),I$=l(),Pn=n("p"),U$=r("Remove a callback from the current list of "),yh=n("code"),N$=r("~transformer.TrainerCallback"),z$=r(" and returns it."),F$=l(),Dn=n("p"),L$=r("If the callback is not found, returns "),wh=n("code"),R$=r("None"),W$=r(" (and no error is raised)."),G$=l(),X=n("div"),h(Sn.$$.fragment),M$=l(),Th=n("p"),j$=r("Run prediction and returns predictions and potential metrics."),H$=l(),qn=n("p"),B$=r(`Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in `),Eh=n("code"),V$=r("evaluate()"),Y$=r("."),Z$=l(),h(vr.$$.fragment),K$=l(),On=n("p"),J$=r("Returns: "),$h=n("em"),X$=r("NamedTuple"),Q$=r(" A namedtuple with the following keys:"),e3=l(),co=n("ul"),po=n("li"),t3=r("predictions ("),xh=n("code"),o3=r("np.ndarray"),r3=r("): The predictions on "),kh=n("code"),a3=r("test_dataset"),n3=r("."),s3=l(),mo=n("li"),i3=r("label_ids ("),Ah=n("code"),l3=r("np.ndarray"),d3=r(", "),Ph=n("em"),c3=r("optional"),p3=r("): The labels (if the dataset contained some)."),m3=l(),ho=n("li"),h3=r("metrics ("),Dh=n("code"),u3=r("Dict[str, float]"),f3=r(", "),Sh=n("em"),g3=r("optional"),_3=r(`): The potential dictionary of metrics (if the dataset contained labels).`),v3=l(),wt=n("div"),h(Cn.$$.fragment),b3=l(),uo=n("p"),y3=r("Prediction/evaluation loop, shared by "),qh=n("code"),w3=r("Trainer.evaluate()"),T3=r(" and "),Oh=n("code"),E3=r("Trainer.predict()"),$3=r("."),x3=l(),Ch=n("p"),k3=r("Works both with or without labels."),A3=l(),Tt=n("div"),h(In.$$.fragment),P3=l(),fo=n("p"),D3=r("Perform an evaluation step on "),Ih=n("code"),S3=r("model"),q3=r(" using "),Uh=n("code"),O3=r("inputs"),C3=r("."),I3=l(),Nh=n("p"),U3=r("Subclass and override to inject custom behavior."),N3=l(),br=n("div"),h(Un.$$.fragment),z3=l(),et=n("p"),F3=r("Upload "),zh=n("em"),L3=r("self.model"),R3=r(" and "),Fh=n("em"),W3=r("self.tokenizer"),G3=r(" to the \u{1F917} model hub on the repo "),Lh=n("em"),M3=r("self.args.hub_model_id"),j3=r("."),H3=l(),yr=n("div"),h(Nn.$$.fragment),B3=l(),zn=n("p"),V3=r("Remove a callback from the current list of "),Rh=n("code"),Y3=r("~transformer.TrainerCallback"),Z3=r("."),K3=l(),Ce=n("div"),h(Fn.$$.fragment),J3=l(),Ln=n("p"),X3=r("Save metrics into a json file for that split, e.g. "),Wh=n("code"),Q3=r("train_results.json"),ex=r("."),tx=l(),Gh=n("p"),ox=r("Under distributed environment this is done only for a process with rank 0."),rx=l(),Rn=n("p"),ax=r("To understand the metrics please read the docstring of "),xl=n("a"),nx=r("log_metrics()"),sx=r(`. The only difference is that raw unformatted numbers are saved in the current method.`),ix=l(),Et=n("div"),h(Wn.$$.fragment),lx=l(),Gn=n("p"),dx=r("Will save the model, so you can reload it using "),Mh=n("code"),cx=r("from_pretrained()"),px=r("."),mx=l(),jh=n("p"),hx=r("Will only save from the main process."),ux=l(),$t=n("div"),h(Mn.$$.fragment),fx=l(),Hh=n("p"),gx=r("Saves the Trainer state, since Trainer.save_model saves only the tokenizer with the model"),_x=l(),Bh=n("p"),vx=r("Under distributed environment this is done only for a process with rank 0."),bx=l(),wr=n("div"),h(jn.$$.fragment),yx=l(),Hn=n("p"),wx=r("A helper wrapper that creates an appropriate context manager for "),Vh=n("code"),Tx=r("torchdynamo"),Ex=r("."),$x=l(),Tr=n("div"),h(Bn.$$.fragment),xx=l(),Yh=n("p"),kx=r("Main training entry point."),Ax=l(),xt=n("div"),h(Vn.$$.fragment),Px=l(),Zh=n("p"),Dx=r("Perform a training step on a batch of inputs."),Sx=l(),Kh=n("p"),qx=r("Subclass and override to inject custom behavior."),kv=l(),go=n("h2"),Er=n("a"),Jh=n("span"),h(Yn.$$.fragment),Ox=l(),Xh=n("span"),Cx=r("Seq2SeqTrainer"),Av=l(),tt=n("div"),h(Zn.$$.fragment),Ix=l(),Ie=n("div"),h(Kn.$$.fragment),Ux=l(),Qh=n("p"),Nx=r("Run evaluation and returns metrics."),zx=l(),Jn=n("p"),Fx=r(`The calling script will be responsible for providing a method to compute metrics, as they are task-dependent (pass it to the init `),eu=n("code"),Lx=r("compute_metrics"),Rx=r(" argument)."),Wx=l(),tu=n("p"),Gx=r("You can also subclass and override this method to inject custom behavior."),Mx=l(),Q=n("div"),h(Xn.$$.fragment),jx=l(),ou=n("p"),Hx=r("Run prediction and returns predictions and potential metrics."),Bx=l(),Qn=n("p"),Vx=r(`Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in `),ru=n("code"),Yx=r("evaluate()"),Zx=r("."),Kx=l(),h($r.$$.fragment),Jx=l(),es=n("p"),Xx=r("Returns: "),au=n("em"),Qx=r("NamedTuple"),ek=r(" A namedtuple with the following keys:"),tk=l(),_o=n("ul"),vo=n("li"),ok=r("predictions ("),nu=n("code"),rk=r("np.ndarray"),ak=r("): The predictions on "),su=n("code"),nk=r("test_dataset"),sk=r("."),ik=l(),bo=n("li"),lk=r("label_ids ("),iu=n("code"),dk=r("np.ndarray"),ck=r(", "),lu=n("em"),pk=r("optional"),mk=r("): The labels (if the dataset contained some)."),hk=l(),yo=n("li"),uk=r("metrics ("),du=n("code"),fk=r("Dict[str, float]"),gk=r(", "),cu=n("em"),_k=r("optional"),vk=r(`): The potential dictionary of metrics (if the dataset contained labels).`),Pv=l(),wo=n("h2"),xr=n("a"),pu=n("span"),h(ts.$$.fragment),bk=l(),mu=n("span"),yk=r("TrainingArguments"),Dv=l(),F=n("div"),h(os.$$.fragment),wk=l(),rs=n("p"),Tk=r("TrainingArguments is the subset of the arguments we use in our example scripts "),hu=n("strong"),Ek=r(`which relate to the training loop itself`),$k=r("."),xk=l(),To=n("p"),kk=r("Using "),kl=n("a"),Ak=r("HfArgumentParser"),Pk=r(` we can turn this class into `),as=n("a"),Dk=r("argparse"),Sk=r(` arguments that can be specified on the command line.`),qk=l(),de=n("div"),h(ns.$$.fragment),Ok=l(),uu=n("p"),Ck=r(`Returns the log level to be used depending on whether this process is the main process of node 0, main process of node non-0, or a non-main process.`),Ik=l(),Eo=n("p"),Uk=r("For the main process the log level defaults to "),fu=n("code"),Nk=r("logging.INFO"),zk=r(" unless overridden by "),gu=n("code"),Fk=r("log_level"),Lk=r(" argument."),Rk=l(),$o=n("p"),Wk=r("For the replica processes the log level defaults to "),_u=n("code"),Gk=r("logging.WARNING"),Mk=r(" unless overridden by "),vu=n("code"),jk=r("log_level_replica"),Hk=r(` argument.`),Bk=l(),ss=n("p"),Vk=r("The choice between the main and replica process settings is made according to the return value of "),bu=n("code"),Yk=r("should_log"),Zk=r("."),Kk=l(),kr=n("div"),h(is.$$.fragment),Jk=l(),yu=n("p"),Xk=r("Get number of steps used for a linear warmup."),Qk=l(),kt=n("div"),h(ls.$$.fragment),e5=l(),wu=n("p"),t5=r(`A context manager for torch distributed environment where on needs to do something on the main process, while blocking replicas, and when it\u2019s finished releasing the replicas.`),o5=l(),xo=n("p"),r5=r("One such use is for "),Tu=n("code"),a5=r("datasets"),n5=r("\u2019s "),Eu=n("code"),s5=r("map"),i5=r(` feature which to be efficient should be run once on the main process, which upon completion saves a cached version of results and which then automatically gets loaded by the replicas.`),l5=l(),Ar=n("div"),h(ds.$$.fragment),d5=l(),cs=n("p"),c5=r("Serializes this instance while replace "),$u=n("code"),p5=r("Enum"),m5=r(` by their values (for JSON serialization support). It obfuscates the token values by removing their value.`),h5=l(),Pr=n("div"),h(ps.$$.fragment),u5=l(),xu=n("p"),f5=r("Serializes this instance to a JSON string."),g5=l(),Dr=n("div"),h(ms.$$.fragment),_5=l(),ku=n("p"),v5=r("Sanitized serialization to use with TensorBoard\u2019s hparams"),Sv=l(),ko=n("h2"),Sr=n("a"),Au=n("span"),h(hs.$$.fragment),b5=l(),Pu=n("span"),y5=r("Seq2SeqTrainingArguments"),qv=l(),ot=n("div"),h(us.$$.fragment),w5=l(),fs=n("p"),T5=r("TrainingArguments is the subset of the arguments we use in our example scripts "),Du=n("strong"),E5=r(`which relate to the training loop itself`),$5=r("."),x5=l(),Ao=n("p"),k5=r("Using "),Al=n("a"),A5=r("HfArgumentParser"),P5=r(` we can turn this class into `),gs=n("a"),D5=r("argparse"),S5=r(` arguments that can be specified on the command line.`),Ov=l(),Po=n("h2"),qr=n("a"),Su=n("span"),h(_s.$$.fragment),q5=l(),qu=n("span"),O5=r("Checkpoints"),Cv=l(),ce=n("p"),C5=r("By default, "),Pl=n("a"),I5=r("Trainer"),U5=r(" will save all checkpoints in the "),Ou=n("code"),N5=r("output_dir"),z5=r(` you set in the `),Dl=n("a"),F5=r("TrainingArguments"),L5=r(" you are using. Those will go in subfolder named "),Cu=n("code"),R5=r("checkpoint-xxx"),W5=r(` with xxx being the step at which the training was at.`),Iv=l(),Or=n("p"),G5=r("Resuming training from a checkpoint can be done when calling "),Sl=n("a"),M5=r("Trainer.train()"),j5=r(" with either:"),Uv=l(),Cr=n("ul"),ql=n("li"),Iu=n("code"),H5=r("resume_from_checkpoint=True"),B5=r(" which will resume training from the latest checkpoint"),V5=l(),Ol=n("li"),Uu=n("code"),Y5=r("resume_from_checkpoint=checkpoint_dir"),Z5=r(` which will resume training from the specific checkpoint in the directory passed.`),Nv=l(),Ue=n("p"),K5=r("In addition, you can easily save your checkpoints on the Model Hub when using "),Nu=n("code"),J5=r("push_to_hub=True"),X5=r(`. By default, all the models saved in intermediate checkpoints are saved in different commits, but not the optimizer state. You can adapt the `),zu=n("code"),Q5=r("hub-strategy"),e6=r(" value of your "),Cl=n("a"),t6=r("TrainingArguments"),o6=r(" to either:"),zv=l(),Ir=n("ul"),Ur=n("li"),Fu=n("code"),r6=r('"checkpoint"'),a6=r(`: the latest checkpoint is also pushed in a subfolder named last-checkpoint, allowing you to resume training easily with `),Lu=n("code"),n6=r('trainer.train(resume_from_checkpoint="output_dir/last-checkpoint")'),s6=r("."),i6=l(),Il=n("li"),Ru=n("code"),l6=r('"all_checkpoints"'),d6=r(`: all checkpoints are pushed like they appear in the output folder (so you will get one checkpoint folder per folder in your final repository)`),Fv=l(),Do=n("h2"),Nr=n("a"),Wu=n("span"),h(vs.$$.fragment),c6=l(),Gu=n("span"),p6=r("Logging"),Lv=l(),Ne=n("p"),m6=r("By default "),Ul=n("a"),h6=r("Trainer"),u6=r(" will use "),Mu=n("code"),f6=r("logging.INFO"),g6=r(" for the main process and "),ju=n("code"),_6=r("logging.WARNING"),v6=r(" for the replicas if any."),Rv=l(),At=n("p"),b6=r("These defaults can be overridden to use any of the 5 "),Hu=n("code"),y6=r("logging"),w6=r(" levels with "),Nl=n("a"),T6=r("TrainingArguments"),E6=r(`\u2019s arguments:`),Wv=l(),zr=n("ul"),zl=n("li"),Bu=n("code"),$6=r("log_level"),x6=r(" - for the main process"),k6=l(),Fl=n("li"),Vu=n("code"),A6=r("log_level_replica"),P6=r(" - for the replicas"),Gv=l(),ze=n("p"),D6=r("Further, if "),Ll=n("a"),S6=r("TrainingArguments"),q6=r("\u2019s "),Yu=n("code"),O6=r("log_on_each_node"),C6=r(" is set to "),Zu=n("code"),I6=r("False"),U6=r(` only the main node will use the log level settings for its main process, all other nodes will use the log level settings for replicas.`),Mv=l(),ee=n("p"),N6=r("Note that "),Rl=n("a"),z6=r("Trainer"),F6=r(" is going to set "),Ku=n("code"),L6=r("transformers"),R6=r(`\u2019s log level separately for each node in its `),Ju=n("code"),W6=r("Trainer.__init__()"),G6=r(`. So you may want to set this sooner (see the next example) if you tap into other `),Xu=n("code"),M6=r("transformers"),j6=r(" functionality before creating the "),Wl=n("a"),H6=r("Trainer"),B6=r(" object."),jv=l(),Gl=n("p"),V6=r("Here is an example of how this can be used in an application:"),Hv=l(),h(bs.$$.fragment),Bv=l(),Ml=n("p"),Y6=r(`And then if you only want to see warnings on the main node and all other nodes to not print any most likely duplicated warnings you could run it as:`),Vv=l(),h(ys.$$.fragment),Yv=l(),jl=n("p"),Z6=r(`In the multi-node environment if you also don\u2019t want the logs to repeat for each node\u2019s main process, you will want to change the above to:`),Zv=l(),h(ws.$$.fragment),Kv=l(),Hl=n("p"),K6=r(`and then only the main process of the first node will log at the \u201Cwarning\u201D level, and all other processes on the main node and all processes on other nodes will log at the \u201Cerror\u201D level.`),Jv=l(),Bl=n("p"),J6=r("If you need your application to be as quiet as possible you could do:"),Xv=l(),h(Ts.$$.fragment),Qv=l(),Fr=n("p"),X6=r("(add "),Qu=n("code"),Q6=r("--log_on_each_node 0"),eA=r(" if on multi-node environment)"),e1=l(),So=n("h2"),Lr=n("a"),ef=n("span"),h(Es.$$.fragment),tA=l(),tf=n("span"),oA=r("Randomness"),t1=l(),pe=n("p"),rA=r("When resuming from a checkpoint generated by "),Vl=n("a"),aA=r("Trainer"),nA=r(` all efforts are made to restore the `),of=n("em"),sA=r("python"),iA=r(", "),rf=n("em"),lA=r("numpy"),dA=r(" and "),af=n("em"),cA=r("pytorch"),pA=r(` RNG states to the same states as they were at the moment of saving that checkpoint, which should make the \u201Cstop and resume\u201D style of training as close as possible to non-stop training.`),o1=l(),Pt=n("p"),mA=r(`However, due to various default non-deterministic pytorch settings this might not fully work. If you want full determinism please refer to `),$s=n("a"),hA=r("Controlling sources of randomness"),uA=r(`. As explained in the document, that some of those settings that make things deterministic (.e.g., `),nf=n("code"),fA=r("torch.backends.cudnn.deterministic"),gA=r(`) may slow things down, therefore this can\u2019t be done by default, but you can enable those yourself if needed.`),r1=l(),qo=n("h2"),Rr=n("a"),sf=n("span"),h(xs.$$.fragment),_A=l(),lf=n("span"),vA=r("Specific GPUs Selection"),a1=l(),Yl=n("p"),bA=r("Let\u2019s discuss how you can tell your program which GPUs are to be used and in what order."),n1=l(),Wr=n("p"),yA=r("When using "),ks=n("a"),df=n("code"),wA=r("DistributedDataParallel"),TA=r(" to use only a subset of your GPUs, you simply specify the number of GPUs to use. For example, if you have 4 GPUs, but you wish to use the first 2 you can do:"),s1=l(),h(As.$$.fragment),i1=l(),Dt=n("p"),EA=r("if you have either "),Ps=n("a"),cf=n("code"),$A=r("accelerate"),xA=r(" or "),Ds=n("a"),pf=n("code"),kA=r("deepspeed"),AA=r(" installed you can also accomplish the same by using one of:"),l1=l(),h(Ss.$$.fragment),d1=l(),h(qs.$$.fragment),c1=l(),Gr=n("p"),PA=r("You don\u2019t need to use the Accelerate or "),Zl=n("a"),DA=r("the Deepspeed integration"),SA=r(" features to use these launchers."),p1=l(),Kl=n("p"),qA=r("Until now you were able to tell the program how many GPUs to use. Now let\u2019s discuss how to select specific GPUs and control their order."),m1=l(),Jl=n("p"),OA=r("The following environment variables help you control which GPUs to use and their order."),h1=l(),Xl=n("p"),mf=n("strong"),hf=n("code"),CA=r("CUDA_VISIBLE_DEVICES"),u1=l(),Mr=n("p"),IA=r("If you have multiple GPUs and you\u2019d like to use only 1 or a few of those GPUs, set the environment variable "),uf=n("code"),UA=r("CUDA_VISIBLE_DEVICES"),NA=r(" to a list of the GPUs to be used."),f1=l(),Ql=n("p"),zA=r("For example, let\u2019s say you have 4 GPUs: 0, 1, 2 and 3. To run only on the physical GPUs 0 and 2, you can do:"),g1=l(),h(Os.$$.fragment),_1=l(),St=n("p"),FA=r("So now pytorch will see only 2 GPUs, where your physical GPUs 0 and 2 are mapped to "),ff=n("code"),LA=r("cuda:0"),RA=r(" and "),gf=n("code"),WA=r("cuda:1"),GA=r(" correspondingly."),v1=l(),ed=n("p"),MA=r("You can even change their order:"),b1=l(),h(Cs.$$.fragment),y1=l(),qt=n("p"),jA=r("Here your physical GPUs 0 and 2 are mapped to "),_f=n("code"),HA=r("cuda:1"),BA=r(" and "),vf=n("code"),VA=r("cuda:0"),YA=r(" correspondingly."),w1=l(),Ot=n("p"),ZA=r("The above examples were all for "),bf=n("code"),KA=r("DistributedDataParallel"),JA=r(" use pattern, but the same method works for "),Is=n("a"),yf=n("code"),XA=r("DataParallel"),QA=r(" as well:"),T1=l(),h(Us.$$.fragment),E1=l(),td=n("p"),e8=r("To emulate an environment without GPUs simply set this environment variable to an empty value like so:"),$1=l(),h(Ns.$$.fragment),x1=l(),od=n("p"),t8=r("As with any environment variable you can, of course, export those instead of adding these to the command line, as in:"),k1=l(),h(zs.$$.fragment),A1=l(),rd=n("p"),o8=r("but this approach can be confusing since you may forget you set up the environment variable earlier and not understand why the wrong GPUs are used. Therefore, it\u2019s a common practice to set the environment variable just for a specific run on the same command line as it\u2019s shown in most examples of this section."),P1=l(),ad=n("p"),wf=n("strong"),Tf=n("code"),r8=r("CUDA_DEVICE_ORDER"),D1=l(),jr=n("p"),a8=r("There is an additional environment variable "),Ef=n("code"),n8=r("CUDA_DEVICE_ORDER"),s8=r(" that controls how the physical devices are ordered. The two choices are:"),S1=l(),nd=n("ol"),Fs=n("li"),i8=r("ordered by PCIe bus IDs (matches "),$f=n("code"),l8=r("nvidia-smi"),d8=r("\u2019s order) - this is the default."),q1=l(),h(Ls.$$.fragment),O1=l(),Rs=n("ol"),xf=n("li"),c8=r("ordered by GPU compute capabilities"),C1=l(),h(Ws.$$.fragment),I1=l(),Ct=n("p"),p8=r("Most of the time you don\u2019t need to care about this environment variable, but it\u2019s very helpful if you have a lopsided setup where you have an old and a new GPUs physically inserted in such a way so that the slow older card appears to be first. One way to fix that is to swap the cards. But if you can\u2019t swap the cards (e.g., if the cooling of the devices gets impacted) then setting "),kf=n("code"),m8=r("CUDA_DEVICE_ORDER=FASTEST_FIRST"),h8=r(" will always put the newer faster card first. It\u2019ll be somewhat confusing though since "),Af=n("code"),u8=r("nvidia-smi"),f8=r(" will still report them in the PCIe order."),U1=l(),sd=n("p"),g8=r("The other solution to swapping the order is to use:"),N1=l(),h(Gs.$$.fragment),z1=l(),id=n("p"),_8=r("In this example we are working with just 2 GPUs, but of course the same would apply to as many GPUs as your computer has."),F1=l(),Hr=n("p"),v8=r("Also if you do set this environment variable it\u2019s the best to set it in your "),Pf=n("code"),b8=r("~/.bashrc"),y8=r(" file or some other startup config file and forget about it."),L1=l(),Oo=n("h2"),Br=n("a"),Df=n("span"),h(Ms.$$.fragment),w8=l(),Sf=n("span"),T8=r("Trainer Integrations"),R1=l(),Vr=n("p"),E8=r("The "),ld=n("a"),$8=r("Trainer"),x8=r(` has been extended to support libraries that may dramatically improve your training time and fit much bigger models.`),W1=l(),me=n("p"),k8=r("Currently it supports third party solutions, "),js=n("a"),A8=r("DeepSpeed"),P8=r(", "),Hs=n("a"),D8=r("PyTorch FSDP"),S8=r(" and "),Bs=n("a"),q8=r("FairScale"),O8=r(", which implement parts of the paper "),Vs=n("a"),C8=r(`ZeRO: Memory Optimizations Toward Training Trillion Parameter Models, by Samyam Rajbhandari, Jeff Rasley, Olatunji Ruwase, Yuxiong He`),I8=r("."),G1=l(),Yr=n("p"),U8=r("This provided support is new and experimental as of this writing. While the support for DeepSpeed and PyTorch FSDP is active and we welcome issues around it, we don\u2019t support the FairScale integration anymore since it has been integrated in PyTorch main (see the "),dd=n("a"),N8=r("PyTorch FSDP integration"),z8=r(")"),M1=l(),cd=n("a"),j1=l(),Co=n("h3"),Zr=n("a"),qf=n("span"),h(Ys.$$.fragment),F8=l(),Of=n("span"),L8=r("CUDA Extension Installation Notes"),H1=l(),pd=n("p"),R8=r("As of this writing, both FairScale and Deepspeed require compilation of CUDA C++ code, before they can be used."),B1=l(),It=n("p"),W8=r("While all installation issues should be dealt with through the corresponding GitHub Issues of "),Zs=n("a"),G8=r("FairScale"),M8=r(" and "),Ks=n("a"),j8=r("Deepspeed"),H8=r(`, there are a few common issues that one may encounter while building any PyTorch extension that needs to build CUDA extensions.`),V1=l(),md=n("p"),B8=r("Therefore, if you encounter a CUDA-related build issue while doing one of the following or both:"),Y1=l(),h(Js.$$.fragment),Z1=l(),hd=n("p"),V8=r("please, read the following notes first."),K1=l(),Ut=n("p"),Y8=r("In these notes we give examples for what to do when "),Cf=n("code"),Z8=r("pytorch"),K8=r(" has been built with CUDA "),If=n("code"),J8=r("10.2"),X8=r(`. If your situation is different remember to adjust the version number to the one you are after.`),J1=l(),Io=n("h4"),Kr=n("a"),Uf=n("span"),h(Xs.$$.fragment),Q8=l(),Nf=n("span"),eP=r("Possible problem #1"),X1=l(),ud=n("p"),tP=r(`While, Pytorch comes with its own CUDA toolkit, to build these two projects you must have an identical version of CUDA installed system-wide.`),Q1=l(),Fe=n("p"),oP=r("For example, if you installed "),zf=n("code"),rP=r("pytorch"),aP=r(" with "),Ff=n("code"),nP=r("cudatoolkit==10.2"),sP=r(` in the Python environment, you also need to have CUDA `),Lf=n("code"),iP=r("10.2"),lP=r(" installed system-wide."),eb=l(),Nt=n("p"),dP=r("The exact location may vary from system to system, but "),Rf=n("code"),cP=r("/usr/local/cuda-10.2"),pP=r(` is the most common location on many Unix systems. When CUDA is correctly set up and added to the `),Wf=n("code"),mP=r("PATH"),hP=r(` environment variable, one can find the installation location by doing:`),tb=l(),h(Qs.$$.fragment),ob=l(),Jr=n("p"),uP=r(`If you don\u2019t have CUDA installed system-wide, install it first. You will find the instructions by using your favorite search engine. For example, if you\u2019re on Ubuntu you may want to search for: `),ei=n("a"),fP=r("ubuntu cuda 10.2 install"),gP=r("."),rb=l(),Uo=n("h4"),Xr=n("a"),Gf=n("span"),h(ti.$$.fragment),_P=l(),Mf=n("span"),vP=r("Possible problem #2"),ab=l(),fd=n("p"),bP=r(`Another possible common problem is that you may have more than one CUDA toolkit installed system-wide. For example you may have:`),nb=l(),h(oi.$$.fragment),sb=l(),zt=n("p"),yP=r("Now, in this situation you need to make sure that your "),jf=n("code"),wP=r("PATH"),TP=r(" and "),Hf=n("code"),EP=r("LD_LIBRARY_PATH"),$P=r(` environment variables contain the correct paths to the desired CUDA version. Typically, package installers will set these to contain whatever the last version was installed. If you encounter the problem, where the package build fails because it can\u2019t find the right CUDA version despite you having it installed system-wide, it means that you need to adjust the 2 aforementioned environment variables.`),ib=l(),gd=n("p"),xP=r("First, you may look at their contents:"),lb=l(),h(ri.$$.fragment),db=l(),_d=n("p"),kP=r("so you get an idea of what is inside."),cb=l(),Qr=n("p"),AP=r("It\u2019s possible that "),Bf=n("code"),PP=r("LD_LIBRARY_PATH"),DP=r(" is empty."),pb=l(),rt=n("p"),Vf=n("code"),SP=r("PATH"),qP=r(" lists the locations of where executables can be found and "),Yf=n("code"),OP=r("LD_LIBRARY_PATH"),CP=r(` is for where shared libraries are to looked for. In both cases, earlier entries have priority over the later ones. `),Zf=n("code"),IP=r(":"),UP=r(` is used to separate multiple entries.`),mb=l(),vd=n("p"),NP=r(`Now, to tell the build program where to find the specific CUDA toolkit, insert the desired paths to be listed first by doing:`),hb=l(),h(ai.$$.fragment),ub=l(),bd=n("p"),zP=r("Note that we aren\u2019t overwriting the existing values, but prepending instead."),fb=l(),Le=n("p"),FP=r(`Of course, adjust the version number, the full path if need be. Check that the directories you assign actually do exist. `),Kf=n("code"),LP=r("lib64"),RP=r(" sub-directory is where the various CUDA "),Jf=n("code"),WP=r(".so"),GP=r(" objects, like "),Xf=n("code"),MP=r("libcudart.so"),jP=r(` reside, it\u2019s unlikely that your system will have it named differently, but if it is adjust it to reflect your reality.`),gb=l(),No=n("h4"),ea=n("a"),Qf=n("span"),h(ni.$$.fragment),HP=l(),eg=n("span"),BP=r("Possible problem #3"),_b=l(),Ft=n("p"),VP=r("Some older CUDA versions may refuse to build with newer compilers. For example, you my have "),tg=n("code"),YP=r("gcc-9"),ZP=r(` but it wants `),og=n("code"),KP=r("gcc-7"),JP=r("."),vb=l(),yd=n("p"),XP=r("There are various ways to go about it."),bb=l(),wd=n("p"),QP=r("If you can install the latest CUDA toolkit it typically should support the newer compiler."),yb=l(),ta=n("p"),eD=r(`Alternatively, you could install the lower version of the compiler in addition to the one you already have, or you may already have it but it\u2019s not the default one, so the build system can\u2019t see it. If you have `),rg=n("code"),tD=r("gcc-7"),oD=r(` installed but the build system complains it can\u2019t find it, the following might do the trick:`),wb=l(),h(si.$$.fragment),Tb=l(),M=n("p"),rD=r("Here, we are making a symlink to "),ag=n("code"),aD=r("gcc-7"),nD=r(" from "),ng=n("code"),sD=r("/usr/local/cuda-10.2/bin/gcc"),iD=r(` and since `),sg=n("code"),lD=r("/usr/local/cuda-10.2/bin/"),dD=r(" should be in the "),ig=n("code"),cD=r("PATH"),pD=r(` environment variable (see the previous problem\u2019s solution), it should find `),lg=n("code"),mD=r("gcc-7"),hD=r(" (and "),dg=n("code"),uD=r("g++7"),fD=r(") and then the build will succeed."),Eb=l(),Td=n("p"),gD=r("As always make sure to edit the paths in the example to match your situation."),$b=l(),zo=n("h3"),oa=n("a"),cg=n("span"),h(ii.$$.fragment),_D=l(),pg=n("span"),vD=r("FairScale"),xb=l(),h(ra.$$.fragment),kb=l(),Re=n("p"),bD=r("By integrating "),li=n("a"),yD=r("FairScale"),wD=r(" the "),Ed=n("a"),TD=r("Trainer"),ED=r(` provides support for the following features from `),di=n("a"),$D=r("the ZeRO paper"),xD=r(":"),Ab=l(),We=n("ol"),mg=n("li"),kD=r("Optimizer State Sharding"),AD=l(),hg=n("li"),PD=r("Gradient Sharding"),DD=l(),ug=n("li"),SD=r("Model Parameters Sharding (new and very experimental)"),qD=l(),fg=n("li"),OD=r("CPU offload (new and very experimental)"),Pb=l(),$d=n("p"),CD=r("You will need at least two GPUs to use this feature."),Db=l(),ci=n("p"),gg=n("strong"),ID=r("Installation"),UD=r(":"),Sb=l(),xd=n("p"),ND=r("Install the library via pypi:"),qb=l(),h(pi.$$.fragment),Ob=l(),Lt=n("p"),zD=r("or via "),_g=n("code"),FD=r("transformers"),LD=r("\u2019 "),vg=n("code"),RD=r("extras"),WD=r(":"),Cb=l(),h(mi.$$.fragment),Ib=l(),Rt=n("p"),GD=r("(available starting from "),bg=n("code"),MD=r("transformers==4.6.0"),jD=r(") or find more details on "),hi=n("a"),HD=r("the FairScale\u2019s GitHub page"),BD=r("."),Ub=l(),aa=n("p"),VD=r("If you\u2019re still struggling with the build, first make sure to read "),kd=n("a"),YD=r("CUDA Extension Installation Notes"),ZD=r("."),Nb=l(),Ad=n("p"),KD=r("If it\u2019s still not resolved the build issue, here are a few more ideas."),zb=l(),ui=n("p"),yg=n("code"),JD=r("fairscale"),XD=r(` seems to have an issue with the recently introduced by pip build isolation feature. If you have a problem with it, you may want to try one of:`),Fb=l(),h(fi.$$.fragment),Lb=l(),Pd=n("p"),QD=r("or:"),Rb=l(),h(gi.$$.fragment),Wb=l(),_i=n("p"),wg=n("code"),eS=r("fairscale"),tS=r(" also has issues with building against pytorch-nightly, so if you use it you may have to try one of:"),Gb=l(),h(vi.$$.fragment),Mb=l(),Dd=n("p"),oS=r("or:"),jb=l(),h(bi.$$.fragment),Hb=l(),Sd=n("p"),rS=r("Of course, adjust the urls to match the cuda version you use."),Bb=l(),na=n("p"),aS=r(`If after trying everything suggested you still encounter build issues, please, proceed with the GitHub Issue of `),yi=n("a"),nS=r("FairScale"),sS=r("."),Vb=l(),wi=n("p"),Tg=n("strong"),iS=r("Usage"),lS=r(":"),Yb=l(),Wt=n("p"),dS=r("To use the first version of Sharded data-parallelism, add "),Eg=n("code"),cS=r("--sharded_ddp simple"),pS=r(` to the command line arguments, and make sure you have added the distributed launcher `),$g=n("code"),mS=r("-m torch.distributed.launch --nproc_per_node=NUMBER_OF_GPUS_YOU_HAVE"),hS=r(" if you haven\u2019t been using it already."),Zb=l(),sa=n("p"),uS=r("For example here is how you could use it for "),xg=n("code"),fS=r("run_translation.py"),gS=r(" with 2 GPUs:"),Kb=l(),h(Ti.$$.fragment),Jb=l(),qd=n("p"),_S=r("Notes:"),Xb=l(),Ge=n("ul"),kg=n("li"),vS=r("This feature requires distributed training (so multiple GPUs)."),bS=l(),Ag=n("li"),yS=r("It is not implemented for TPUs."),wS=l(),Ei=n("li"),TS=r("It works with "),Pg=n("code"),ES=r("--fp16"),$S=r(" too, to make things even faster."),xS=l(),$i=n("li"),kS=r("One of the main benefits of enabling "),Dg=n("code"),AS=r("--sharded_ddp simple"),PS=r(` is that it uses a lot less GPU memory, so you should be able to use significantly larger batch sizes using the same hardware (e.g. 3x and even bigger) which should lead to significantly shorter training time.`),Qb=l(),xi=n("ol"),at=n("li"),DS=r("To use the second version of Sharded data-parallelism, add "),Sg=n("code"),SS=r("--sharded_ddp zero_dp_2"),qS=r(" or "),qg=n("code"),OS=r("--sharded_ddp zero_dp_3"),CS=r(" to the command line arguments, and make sure you have added the distributed launcher "),Og=n("code"),IS=r("-m torch.distributed.launch --nproc_per_node=NUMBER_OF_GPUS_YOU_HAVE"),US=r(" if you haven\u2019t been using it already."),ey=l(),ia=n("p"),NS=r("For example here is how you could use it for "),Cg=n("code"),zS=r("run_translation.py"),FS=r(" with 2 GPUs:"),ty=l(),h(ki.$$.fragment),oy=l(),Fo=n("p"),Ig=n("code"),LS=r("zero_dp_2"),RS=r(" is an optimized version of the simple wrapper, while "),Ug=n("code"),WS=r("zero_dp_3"),GS=r(` fully shards model weights, gradients and optimizer states.`),ry=l(),Gt=n("p"),MS=r("Both are compatible with adding "),Ng=n("code"),jS=r("cpu_offload"),HS=r(" to enable ZeRO-offload (activate it like this: "),zg=n("code"),BS=r('--sharded_ddp "zero_dp_2 cpu_offload"'),VS=r(")."),ay=l(),Od=n("p"),YS=r("Notes:"),ny=l(),he=n("ul"),Fg=n("li"),ZS=r("This feature requires distributed training (so multiple GPUs)."),KS=l(),Lg=n("li"),JS=r("It is not implemented for TPUs."),XS=l(),Ai=n("li"),QS=r("It works with "),Rg=n("code"),eq=r("--fp16"),tq=r(" too, to make things even faster."),oq=l(),Lo=n("li"),rq=r("The "),Wg=n("code"),aq=r("cpu_offload"),nq=r(" additional option requires "),Gg=n("code"),sq=r("--fp16"),iq=r("."),lq=l(),Mg=n("li"),dq=r(`This is an area of active development, so make sure you have a source install of fairscale to use this feature as some bugs you encounter may have been fixed there already.`),sy=l(),Cd=n("p"),cq=r("Known caveats:"),iy=l(),la=n("ul"),Ro=n("li"),pq=r("This feature is incompatible with "),jg=n("code"),mq=r("--predict_with_generate"),hq=r(" in the "),Hg=n("em"),uq=r("run_translation.py"),fq=r(" script."),gq=l(),$e=n("li"),_q=r("Using "),Bg=n("code"),vq=r("--sharded_ddp zero_dp_3"),bq=r(` requires wrapping each layer of the model in the special container `),Vg=n("code"),yq=r("FullyShardedDataParallelism"),wq=r(" of fairscale. It should be used with the option "),Yg=n("code"),Tq=r("auto_wrap"),Eq=r(` if you are not doing this yourself: `),Zg=n("code"),$q=r('--sharded_ddp "zero_dp_3 auto_wrap"'),xq=r("."),ly=l(),Wo=n("h3"),da=n("a"),Kg=n("span"),h(Pi.$$.fragment),kq=l(),Jg=n("span"),Aq=r("PyTorch Fully Sharded Data parallel"),dy=l(),ca=n("p"),Pq=r(`To accelerate training huge models on larger batch sizes, we can use a fully sharded data parallel model. This type of data parallel paradigm enables fitting more data and larger models by sharding the optimizer states, gradients and parameters. To read more about it and the benefits, check out the `),Di=n("a"),Dq=r("Fully Sharded Data Parallel blog"),Sq=r(`. We have integrated the latest PyTorch\u2019s Fully Sharded Data Parallel (FSDP) training feature. All you need to do is enable it through the config.`),cy=l(),Si=n("p"),Xg=n("strong"),qq=r("Required PyTorch version for FSDP support"),Oq=r(`: PyTorch Nightly (or 1.12.0 if you read this after it has been released) as the model saving with FSDP activated is only available with recent fixes.`),py=l(),qi=n("p"),Qg=n("strong"),Cq=r("Usage"),Iq=r(":"),my=l(),te=n("ul"),e_=n("li"),Oi=n("p"),Uq=r(`Make sure you have added the distributed launcher `),t_=n("code"),Nq=r("-m torch.distributed.launch --nproc_per_node=NUMBER_OF_GPUS_YOU_HAVE"),zq=r(" if you haven\u2019t been using it already."),Fq=l(),Ci=n("li"),Id=n("p"),o_=n("strong"),Lq=r("Sharding Strategy"),Rq=r(":"),Wq=l(),Go=n("ul"),Ii=n("li"),Gq=r(`FULL_SHARD : Shards optimizer states + gradients + model parameters across data parallel workers/GPUs. For this, add `),r_=n("code"),Mq=r("--fsdp full_shard"),jq=r(" to the command line arguments."),Hq=l(),Ui=n("li"),Bq=r(`SHARD_GRAD_OP : Shards optimizer states + gradients across data parallel workers/GPUs. For this, add `),a_=n("code"),Vq=r("--fsdp shard_grad_op"),Yq=r(" to the command line arguments."),Zq=l(),Ni=n("li"),Kq=r("NO_SHARD : No sharding. For this, add "),n_=n("code"),Jq=r("--fsdp no_shard"),Xq=r(" to the command line arguments."),Qq=l(),s_=n("li"),Mo=n("p"),e7=r(`To offload the parameters and gradients to the CPU, add `),i_=n("code"),t7=r('--fsdp "full_shard offload"'),o7=r(" or "),l_=n("code"),r7=r('--fsdp "shard_grad_op offload"'),a7=r(" to the command line arguments."),n7=l(),d_=n("li"),nt=n("p"),s7=r("To automatically recursively wrap layers with FSDP using "),c_=n("code"),i7=r("default_auto_wrap_policy"),l7=r(`, add `),p_=n("code"),d7=r('--fsdp "full_shard auto_wrap"'),c7=r(" or "),m_=n("code"),p7=r('--fsdp "shard_grad_op auto_wrap"'),m7=r(" to the command line arguments."),h7=l(),h_=n("li"),jo=n("p"),u7=r(`To enable both CPU offloading and auto wrapping, add `),u_=n("code"),f7=r('--fsdp "full_shard offload auto_wrap"'),g7=r(" or "),f_=n("code"),_7=r('--fsdp "shard_grad_op offload auto_wrap"'),v7=r(" to the command line arguments."),b7=l(),zi=n("li"),g_=n("p"),y7=r("If auto wrapping is enabled, you can either use transformer based auto wrap policy or size based auto wrap policy."),w7=l(),Fi=n("ul"),xe=n("li"),T7=r("For transformer based auto wrap policy, please add "),__=n("code"),E7=r("--fsdp_transformer_layer_cls_to_wrap <value>"),$7=r(` to command line arguments. This specifies the transformer layer class name (case-sensitive) to wrap ,e.g, `),v_=n("code"),x7=r("BertLayer"),k7=r(", "),b_=n("code"),A7=r("GPTJBlock"),P7=r(", "),y_=n("code"),D7=r("T5Block"),S7=r(` \u2026 This is important because submodules that share weights (e.g., embedding layer) should not end up in different FSDP wrapped units. Using this policy, wrapping happens for each block containing Multi-Head Attention followed by couple of MLP layers. Remaining layers including the shared embeddings are conviniently wrapped in same outermost FSDP unit. Therefore, use this for transformer based models.`),q7=l(),Li=n("li"),O7=r("For size based auto wrap policy, please add "),w_=n("code"),C7=r("--fsdp_min_num_params <number>"),I7=r(` to command line arguments. It specifies FSDP\u2019s minimum number of parameters for auto wrapping.`),hy=l(),Ud=n("p"),T_=n("strong"),U7=r("Few caveats to be aware of"),uy=l(),pa=n("ul"),Ri=n("li"),N7=r(`Mixed precision is currently not supported with FSDP as we wait for PyTorch to fix support for it. More details in this `),Wi=n("a"),z7=r("issues"),F7=r("."),L7=l(),Ho=n("li"),R7=r(`FSDP currently doesn\u2019t support multiple parameter groups. More details mentioned in this `),Gi=n("a"),W7=r("issue"),G7=r(` (`),E_=n("code"),M7=r("The original model parameters' .grads are not set, meaning that they cannot be optimized separately (which is why we cannot support multiple parameter groups)"),j7=r(")."),fy=l(),Bo=n("h3"),ma=n("a"),$_=n("span"),h(Mi.$$.fragment),H7=l(),x_=n("span"),B7=r("Using Trainer for accelerated PyTorch Training on Mac"),gy=l(),Me=n("p"),V7=r(`With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac. Apple\u2019s Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `),k_=n("code"),Y7=r('"mps"'),Z7=r(` device. This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS. For more information please refer official documents `),ji=n("a"),K7=r("Introducing Accelerated PyTorch Training on Mac"),J7=r(` and `),Hi=n("a"),X7=r("MPS BACKEND"),Q7=r("."),_y=l(),h(ha.$$.fragment),vy=l(),Nd=n("p"),A_=n("strong"),eO=r("Benefits of Training and Inference using Apple Silicon Chips"),by=l(),Mt=n("ol"),P_=n("li"),tO=r("Enables users to train larger networks or batch sizes locally"),oO=l(),D_=n("li"),rO=r(`Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. Therefore, improving end-to-end performance.`),aO=l(),S_=n("li"),nO=r("Reduces costs associated with cloud-based development or the need for additional local GPUs."),yy=l(),Vo=n("p"),q_=n("strong"),sO=r("Pre-requisites"),iO=r(`: To install torch with mps support, please follow this nice medium article `),Bi=n("a"),lO=r("GPU-Acceleration Comes to PyTorch on M1 Macs"),dO=r("."),wy=l(),Yo=n("p"),O_=n("strong"),cO=r("Usage"),pO=r(`: User has to just pass `),C_=n("code"),mO=r("--use_mps_device"),hO=r(` argument. For example, you can run the offical Glue text classififcation task (from the root folder) using Apple Silicon GPU with below command:`),Ty=l(),h(Vi.$$.fragment),Ey=l(),zd=n("p"),I_=n("strong"),uO=r("A few caveats to be aware of"),$y=l(),ua=n("ol"),Yi=n("li"),fO=r(`Some PyTorch operations have not been implemented in mps and will throw an error. One way to get around that is to set the environment variable `),U_=n("code"),gO=r("PYTORCH_ENABLE_MPS_FALLBACK=1"),_O=r(`, which will fallback to CPU for these operations. It still throws a UserWarning however.`),vO=l(),ke=n("li"),bO=r("Distributed setups "),N_=n("code"),yO=r("gloo"),wO=r(" and "),z_=n("code"),TO=r("nccl"),EO=r(" are not working with "),F_=n("code"),$O=r("mps"),xO=r(` device. This means that currently only single GPU of `),L_=n("code"),kO=r("mps"),AO=r(" device type can be used."),xy=l(),jt=n("p"),PO=r("Finally, please, remember that, \u{1F917} "),R_=n("code"),DO=r("Trainer"),SO=r(` only integrates MPS backend, therefore if you have any problems or questions with regards to MPS backend usage, please, file an issue with `),Zi=n("a"),qO=r("PyTorch GitHub"),OO=r("."),ky=l(),Fd=n("p"),CO=r("Sections that were moved:"),Ay=l(),w=n("p"),IO=r("[ "),Ld=n("a"),UO=r("DeepSpeed"),W_=n("a"),NO=r(` | `),Rd=n("a"),zO=r("Installation"),G_=n("a"),FO=r(` | `),Wd=n("a"),LO=r("Deployment with multiple GPUs"),M_=n("a"),RO=r(` | `),Gd=n("a"),WO=r("Deployment with one GPU"),j_=n("a"),GO=r(` | `),Md=n("a"),MO=r("Deployment in Notebooks"),H_=n("a"),jO=r(` | `),jd=n("a"),HO=r("Configuration"),B_=n("a"),BO=r(` | `),Hd=n("a"),VO=r("Passing Configuration"),V_=n("a"),YO=r(` | `),Bd=n("a"),ZO=r("Shared Configuration"),Y_=n("a"),KO=r(` | `),Vd=n("a"),JO=r("ZeRO"),Z_=n("a"),XO=r(` | `),Yd=n("a"),QO=r("ZeRO-2 Config"),K_=n("a"),eC=r(` | `),Zd=n("a"),tC=r("ZeRO-3 Config"),J_=n("a"),oC=r(` | `),Kd=n("a"),rC=r("NVMe Support"),X_=n("a"),aC=r(` | `),Jd=n("a"),nC=r("ZeRO-2 vs ZeRO-3 Performance"),Q_=n("a"),sC=r(` | `),Xd=n("a"),iC=r("ZeRO-2 Example"),ev=n("a"),lC=r(` | `),Qd=n("a"),dC=r("ZeRO-3 Example"),tv=n("a"),cC=r(` | `),ec=n("a"),pC=r("Optimizer"),ov=n("a"),mC=r(` | `),tc=n("a"),hC=r("Scheduler"),rv=n("a"),uC=r(` | `),oc=n("a"),fC=r("fp32 Precision"),av=n("a"),gC=r(` | `),rc=n("a"),_C=r("Automatic Mixed Precision"),nv=n("a"),vC=r(` | `),ac=n("a"),bC=r("Batch Size"),sv=n("a"),yC=r(` | `),nc=n("a"),wC=r("Gradient Accumulation"),iv=n("a"),TC=r(` | `),sc=n("a"),EC=r("Gradient Clipping"),lv=n("a"),$C=r(` | `),ic=n("a"),xC=r("Getting The Model Weights Out"),dv=n("a"),kC=r(` ]`),this.h()},l(t){const c=rM('[data-svelte="svelte-1phssyn"]',document.head);T=s(c,"META",{name:!0,content:!0}),c.forEach(o),D=d(t),$=s(t,"H1",{class:!0});var Ki=i($);k=s(Ki,"A",{id:!0,class:!0,href:!0});var cv=i(k);L=s(cv,"SPAN",{});var pv=i(L);u(A.$$.fragment,pv),pv.forEach(o),cv.forEach(o),S=d(Ki),W=s(Ki,"SPAN",{});var mv=i(W);fe=a(mv,"Trainer"),mv.forEach(o),Ki.forEach(o),oe=d(t),G=s(t,"P",{});var Zo=i(G);se=a(Zo,"The "),ie=s(Zo,"A",{href:!0});var hv=i(ie);re=a(hv,"Trainer"),hv.forEach(o),le=a(Zo," class provides an API for feature-complete training in PyTorch for most standard use cases. It\u2019s used in most of the "),H=s(Zo,"A",{href:!0,rel:!0});var uv=i(H);Ze=a(uv,"example scripts"),uv.forEach(o),ge=a(Zo,"."),Zo.forEach(o),z=d(t),I=s(t,"P",{});var lc=i(I);st=a(lc,"Before instantiating your "),ae=s(lc,"A",{href:!0});var XC=i(ae);it=a(XC,"Trainer"),XC.forEach(o),lt=a(lc,", create a "),_e=s(lc,"A",{href:!0});var QC=i(_e);Ia=a(QC,"TrainingArguments"),QC.forEach(o),Ua=a(lc," to access all the points of customization during training."),lc.forEach(o),Ke=d(t),Pe=s(t,"P",{});var Dy=i(Pe);Na=a(Dy,"The API supports distributed training on multiple GPUs/TPUs, mixed precision through "),ve=s(Dy,"A",{href:!0,rel:!0});var eI=i(ve);za=a(eI,"NVIDIA Apex"),eI.forEach(o),Fa=a(Dy," and Native AMP for PyTorch."),Dy.forEach(o),K=d(t),B=s(t,"P",{});var Sy=i(B);el=a(Sy,"The "),be=s(Sy,"A",{href:!0});var tI=i(be);Xo=a(tI,"Trainer"),tI.forEach(o),tl=a(Sy," contains the basic training loop which supports the above features. To inject custom behavior you can subclass them and override the following methods:"),Sy.forEach(o),ro=d(t),C=s(t,"UL",{});var N=i(C);V=s(N,"LI",{});var AC=i(V);Qo=s(AC,"STRONG",{});var oI=i(Qo);ol=a(oI,"get_train_dataloader"),oI.forEach(o),rl=a(AC," \u2014 Creates the training DataLoader."),AC.forEach(o),al=d(N),nl=s(N,"LI",{});var PC=i(nl);_p=s(PC,"STRONG",{});var rI=i(_p);Aw=a(rI,"get_eval_dataloader"),rI.forEach(o),Pw=a(PC," \u2014 Creates the evaluation DataLoader."),PC.forEach(o),Dw=d(N),sl=s(N,"LI",{});var DC=i(sl);vp=s(DC,"STRONG",{});var aI=i(vp);Sw=a(aI,"get_test_dataloader"),aI.forEach(o),qw=a(DC," \u2014 Creates the test DataLoader."),DC.forEach(o),Ow=d(N),il=s(N,"LI",{});var SC=i(il);bp=s(SC,"STRONG",{});var nI=i(bp);Cw=a(nI,"log"),nI.forEach(o),Iw=a(SC," \u2014 Logs information on the various objects watching training."),SC.forEach(o),Uw=d(N),dt=s(N,"LI",{});var Ji=i(dt);yp=s(Ji,"STRONG",{});var sI=i(yp);Nw=a(sI,"create_optimizer_and_scheduler"),sI.forEach(o),zw=a(Ji,` \u2014 Sets up the optimizer and learning rate scheduler if they were not passed at init. Note, that you can also subclass or override the `),wp=s(Ji,"CODE",{});var iI=i(wp);Fw=a(iI,"create_optimizer"),iI.forEach(o),Lw=a(Ji," and "),Tp=s(Ji,"CODE",{});var lI=i(Tp);Rw=a(lI,"create_scheduler"),lI.forEach(o),Ww=a(Ji,` methods separately.`),Ji.forEach(o),Gw=d(N),ll=s(N,"LI",{});var qC=i(ll);Ep=s(qC,"STRONG",{});var dI=i(Ep);Mw=a(dI,"create_optimizer"),dI.forEach(o),jw=a(qC," \u2014 Sets up the optimizer if it wasn\u2019t passed at init."),qC.forEach(o),Hw=d(N),dl=s(N,"LI",{});var OC=i(dl);$p=s(OC,"STRONG",{});var cI=i($p);Bw=a(cI,"create_scheduler"),cI.forEach(o),Vw=a(OC," \u2014 Sets up the learning rate scheduler if it wasn\u2019t passed at init."),OC.forEach(o),Yw=d(N),cl=s(N,"LI",{});var CC=i(cl);xp=s(CC,"STRONG",{});var pI=i(xp);Zw=a(pI,"compute_loss"),pI.forEach(o),Kw=a(CC," - Computes the loss on a batch of training inputs."),CC.forEach(o),Jw=d(N),pl=s(N,"LI",{});var IC=i(pl);kp=s(IC,"STRONG",{});var mI=i(kp);Xw=a(mI,"training_step"),mI.forEach(o),Qw=a(IC," \u2014 Performs a training step."),IC.forEach(o),e0=d(N),ml=s(N,"LI",{});var UC=i(ml);Ap=s(UC,"STRONG",{});var hI=i(Ap);t0=a(hI,"prediction_step"),hI.forEach(o),o0=a(UC," \u2014 Performs an evaluation/test step."),UC.forEach(o),r0=d(N),hl=s(N,"LI",{});var NC=i(hl);Pp=s(NC,"STRONG",{});var uI=i(Pp);a0=a(uI,"evaluate"),uI.forEach(o),n0=a(NC," \u2014 Runs an evaluation loop and returns metrics."),NC.forEach(o),s0=d(N),ul=s(N,"LI",{});var zC=i(ul);Dp=s(zC,"STRONG",{});var fI=i(Dp);i0=a(fI,"predict"),fI.forEach(o),l0=a(zC," \u2014 Returns predictions (with metrics if labels are available) on a test set."),zC.forEach(o),N.forEach(o),yv=d(t),u(er.$$.fragment,t),wv=d(t),tr=s(t,"P",{});var qy=i(tr);d0=a(qy,"Here is an example of how to customize "),fl=s(qy,"A",{href:!0});var gI=i(fl);c0=a(gI,"Trainer"),gI.forEach(o),p0=a(qy," to use a weighted loss (useful when you have an unbalanced training set):"),qy.forEach(o),Tv=d(t),u(La.$$.fragment,t),Ev=d(t),ct=s(t,"P",{});var dc=i(ct);m0=a(dc,"Another way to customize the training loop behavior for the PyTorch "),gl=s(dc,"A",{href:!0});var _I=i(gl);h0=a(_I,"Trainer"),_I.forEach(o),u0=a(dc," is to use "),_l=s(dc,"A",{href:!0});var vI=i(_l);f0=a(vI,"callbacks"),vI.forEach(o),g0=a(dc," that can inspect the training loop state (for progress reporting, logging on TensorBoard or other ML platforms\u2026) and take decisions (like early stopping)."),dc.forEach(o),$v=d(t),ao=s(t,"H2",{class:!0});var Oy=i(ao);or=s(Oy,"A",{id:!0,class:!0,href:!0});var bI=i(or);Sp=s(bI,"SPAN",{});var yI=i(Sp);u(Ra.$$.fragment,yI),yI.forEach(o),bI.forEach(o),_0=d(Oy),qp=s(Oy,"SPAN",{});var wI=i(qp);v0=a(wI,"Trainer"),wI.forEach(o),Oy.forEach(o),xv=d(t),b=s(t,"DIV",{class:!0});var y=i(b);u(Wa.$$.fragment,y),b0=d(y),Op=s(y,"P",{});var TI=i(Op);y0=a(TI,"Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for \u{1F917} Transformers."),TI.forEach(o),w0=d(y),Cp=s(y,"P",{});var EI=i(Cp);T0=a(EI,"Important attributes:"),EI.forEach(o),E0=d(y),ye=s(y,"UL",{});var Ht=i(ye);rr=s(Ht,"LI",{});var fv=i(rr);Ip=s(fv,"STRONG",{});var $I=i(Ip);$0=a($I,"model"),$I.forEach(o),x0=a(fv," \u2014 Always points to the core model. If using a transformers model, it will be a "),vl=s(fv,"A",{href:!0});var xI=i(vl);k0=a(xI,"PreTrainedModel"),xI.forEach(o),A0=a(fv,` subclass.`),fv.forEach(o),P0=d(Ht),J=s(Ht,"LI",{});var Ae=i(J);Up=s(Ae,"STRONG",{});var kI=i(Up);D0=a(kI,"model_wrapped"),kI.forEach(o),S0=a(Ae,` \u2014 Always points to the most external model in case one or more other modules wrap the original model. This is the model that should be used for the forward pass. For example, under `),Np=s(Ae,"CODE",{});var AI=i(Np);q0=a(AI,"DeepSpeed"),AI.forEach(o),O0=a(Ae,`, the inner model is wrapped in `),zp=s(Ae,"CODE",{});var PI=i(zp);C0=a(PI,"DeepSpeed"),PI.forEach(o),I0=a(Ae," and then again in "),Fp=s(Ae,"CODE",{});var DI=i(Fp);U0=a(DI,"torch.nn.DistributedDataParallel"),DI.forEach(o),N0=a(Ae,`. If the inner model hasn\u2019t been wrapped, then `),Lp=s(Ae,"CODE",{});var SI=i(Lp);z0=a(SI,"self.model_wrapped"),SI.forEach(o),F0=a(Ae," is the same as "),Rp=s(Ae,"CODE",{});var qI=i(Rp);L0=a(qI,"self.model"),qI.forEach(o),R0=a(Ae,"."),Ae.forEach(o),W0=d(Ht),bl=s(Ht,"LI",{});var FC=i(bl);Wp=s(FC,"STRONG",{});var OI=i(Wp);G0=a(OI,"is_model_parallel"),OI.forEach(o),M0=a(FC,` \u2014 Whether or not a model has been switched to a model parallel mode (different from data parallelism, this means some of the model layers are split on different GPUs).`),FC.forEach(o),j0=d(Ht),De=s(Ht,"LI",{});var Ko=i(De);Gp=s(Ko,"STRONG",{});var CI=i(Gp);H0=a(CI,"place_model_on_device"),CI.forEach(o),B0=a(Ko,` \u2014 Whether or not to automatically place the model on the device - it will be set to `),Mp=s(Ko,"CODE",{});var II=i(Mp);V0=a(II,"False"),II.forEach(o),Y0=a(Ko,` if model parallel or deepspeed is used, or if the default `),jp=s(Ko,"CODE",{});var UI=i(jp);Z0=a(UI,"TrainingArguments.place_model_on_device"),UI.forEach(o),K0=a(Ko," is overridden to return "),Hp=s(Ko,"CODE",{});var NI=i(Hp);J0=a(NI,"False"),NI.forEach(o),X0=a(Ko," ."),Ko.forEach(o),Q0=d(Ht),Se=s(Ht,"LI",{});var Jo=i(Se);Bp=s(Jo,"STRONG",{});var zI=i(Bp);eT=a(zI,"is_in_train"),zI.forEach(o),tT=a(Jo," \u2014 Whether or not a model is currently running "),Vp=s(Jo,"CODE",{});var FI=i(Vp);oT=a(FI,"train"),FI.forEach(o),rT=a(Jo," (e.g. when "),Yp=s(Jo,"CODE",{});var LI=i(Yp);aT=a(LI,"evaluate"),LI.forEach(o),nT=a(Jo,` is called while in `),Zp=s(Jo,"CODE",{});var RI=i(Zp);sT=a(RI,"train"),RI.forEach(o),iT=a(Jo,")"),Jo.forEach(o),Ht.forEach(o),lT=d(y),ar=s(y,"DIV",{class:!0});var Cy=i(ar);u(Ga.$$.fragment,Cy),dT=d(Cy),Ma=s(Cy,"P",{});var Iy=i(Ma);cT=a(Iy,"Add a callback to the current list of "),Kp=s(Iy,"CODE",{});var WI=i(Kp);pT=a(WI,"~transformer.TrainerCallback"),WI.forEach(o),mT=a(Iy,"."),Iy.forEach(o),Cy.forEach(o),hT=d(y),nr=s(y,"DIV",{class:!0});var Uy=i(nr);u(ja.$$.fragment,Uy),uT=d(Uy),Ha=s(Uy,"P",{});var Ny=i(Ha);fT=a(Ny,"A helper wrapper that creates an appropriate context manager for "),Jp=s(Ny,"CODE",{});var GI=i(Jp);gT=a(GI,"autocast"),GI.forEach(o),_T=a(Ny,` while feeding it the desired arguments, depending on the situation.`),Ny.forEach(o),Uy.forEach(o),vT=d(y),pt=s(y,"DIV",{class:!0});var cc=i(pt);u(Ba.$$.fragment,cc),bT=d(cc),Xp=s(cc,"P",{});var MI=i(Xp);yT=a(MI,"How the loss is computed by Trainer. By default, all models return the loss in the first element."),MI.forEach(o),wT=d(cc),Qp=s(cc,"P",{});var jI=i(Qp);TT=a(jI,"Subclass and override for custom behavior."),jI.forEach(o),cc.forEach(o),ET=d(y),sr=s(y,"DIV",{class:!0});var zy=i(sr);u(Va.$$.fragment,zy),$T=d(zy),em=s(zy,"P",{});var HI=i(em);xT=a(HI,"A helper wrapper to group together context managers."),HI.forEach(o),zy.forEach(o),kT=d(y),ir=s(y,"DIV",{class:!0});var Fy=i(ir);u(Ya.$$.fragment,Fy),AT=d(Fy),Za=s(Fy,"P",{});var Ly=i(Za);PT=a(Ly,"Creates a draft of a model card using the information available to the "),tm=s(Ly,"CODE",{});var BI=i(tm);DT=a(BI,"Trainer"),BI.forEach(o),ST=a(Ly,"."),Ly.forEach(o),Fy.forEach(o),qT=d(y),mt=s(y,"DIV",{class:!0});var pc=i(mt);u(Ka.$$.fragment,pc),OT=d(pc),om=s(pc,"P",{});var VI=i(om);CT=a(VI,"Setup the optimizer."),VI.forEach(o),IT=d(pc),Ja=s(pc,"P",{});var Ry=i(Ja);UT=a(Ry,`We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the Trainer\u2019s init through `),rm=s(Ry,"CODE",{});var YI=i(rm);NT=a(YI,"optimizers"),YI.forEach(o),zT=a(Ry,", or subclass and override this method in a subclass."),Ry.forEach(o),pc.forEach(o),FT=d(y),ht=s(y,"DIV",{class:!0});var mc=i(ht);u(Xa.$$.fragment,mc),LT=d(mc),am=s(mc,"P",{});var ZI=i(am);RT=a(ZI,"Setup the optimizer and the learning rate scheduler."),ZI.forEach(o),WT=d(mc),Je=s(mc,"P",{});var fa=i(Je);GT=a(fa,`We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the Trainer\u2019s init through `),nm=s(fa,"CODE",{});var KI=i(nm);MT=a(KI,"optimizers"),KI.forEach(o),jT=a(fa,", or subclass and override this method (or "),sm=s(fa,"CODE",{});var JI=i(sm);HT=a(JI,"create_optimizer"),JI.forEach(o),BT=a(fa,` and/or `),im=s(fa,"CODE",{});var XI=i(im);VT=a(XI,"create_scheduler"),XI.forEach(o),YT=a(fa,") in a subclass."),fa.forEach(o),mc.forEach(o),ZT=d(y),lr=s(y,"DIV",{class:!0});var Wy=i(lr);u(Qa.$$.fragment,Wy),KT=d(Wy),lm=s(Wy,"P",{});var QI=i(lm);JT=a(QI,`Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or passed as an argument.`),QI.forEach(o),Wy.forEach(o),XT=d(y),qe=s(y,"DIV",{class:!0});var ga=i(qe);u(en.$$.fragment,ga),QT=d(ga),dm=s(ga,"P",{});var eU=i(dm);e4=a(eU,"Run evaluation and returns metrics."),eU.forEach(o),t4=d(ga),tn=s(ga,"P",{});var Gy=i(tn);o4=a(Gy,`The calling script will be responsible for providing a method to compute metrics, as they are task-dependent (pass it to the init `),cm=s(Gy,"CODE",{});var tU=i(cm);r4=a(tU,"compute_metrics"),tU.forEach(o),a4=a(Gy," argument)."),Gy.forEach(o),n4=d(ga),pm=s(ga,"P",{});var oU=i(pm);s4=a(oU,"You can also subclass and override this method to inject custom behavior."),oU.forEach(o),ga.forEach(o),i4=d(y),ut=s(y,"DIV",{class:!0});var hc=i(ut);u(on.$$.fragment,hc),l4=d(hc),no=s(hc,"P",{});var uc=i(no);d4=a(uc,"Prediction/evaluation loop, shared by "),mm=s(uc,"CODE",{});var rU=i(mm);c4=a(rU,"Trainer.evaluate()"),rU.forEach(o),p4=a(uc," and "),hm=s(uc,"CODE",{});var aU=i(hm);m4=a(aU,"Trainer.predict()"),aU.forEach(o),h4=a(uc,"."),uc.forEach(o),u4=d(hc),um=s(hc,"P",{});var nU=i(um);f4=a(nU,"Works both with or without labels."),nU.forEach(o),hc.forEach(o),g4=d(y),dr=s(y,"DIV",{class:!0});var My=i(dr);u(rn.$$.fragment,My),_4=d(My),an=s(My,"P",{});var jy=i(an);v4=a(jy,"For models that inherit from "),yl=s(jy,"A",{href:!0});var sU=i(yl);b4=a(sU,"PreTrainedModel"),sU.forEach(o),y4=a(jy,`, uses that method to compute the number of floating point operations for every backward + forward pass. If using another model, either implement such a method in the model or subclass and override this method.`),jy.forEach(o),My.forEach(o),w4=d(y),ft=s(y,"DIV",{class:!0});var fc=i(ft);u(nn.$$.fragment,fc),T4=d(fc),sn=s(fc,"P",{});var Hy=i(sn);E4=a(Hy,"Returns the evaluation "),fm=s(Hy,"CODE",{});var iU=i(fm);$4=a(iU,"~torch.utils.data.DataLoader"),iU.forEach(o),x4=a(Hy,"."),Hy.forEach(o),k4=d(fc),gm=s(fc,"P",{});var lU=i(gm);A4=a(lU,"Subclass and override this method if you want to inject some custom behavior."),lU.forEach(o),fc.forEach(o),P4=d(y),cr=s(y,"DIV",{class:!0});var By=i(cr);u(ln.$$.fragment,By),D4=d(By),_m=s(By,"P",{});var dU=i(_m);S4=a(dU,"Returns the optimizer class and optimizer parameters based on the training arguments."),dU.forEach(o),By.forEach(o),q4=d(y),gt=s(y,"DIV",{class:!0});var gc=i(gt);u(dn.$$.fragment,gc),O4=d(gc),cn=s(gc,"P",{});var Vy=i(cn);C4=a(Vy,"Returns the test "),vm=s(Vy,"CODE",{});var cU=i(vm);I4=a(cU,"~torch.utils.data.DataLoader"),cU.forEach(o),U4=a(Vy,"."),Vy.forEach(o),N4=d(gc),bm=s(gc,"P",{});var pU=i(bm);z4=a(pU,"Subclass and override this method if you want to inject some custom behavior."),pU.forEach(o),gc.forEach(o),F4=d(y),Oe=s(y,"DIV",{class:!0});var _a=i(Oe);u(pn.$$.fragment,_a),L4=d(_a),mn=s(_a,"P",{});var Yy=i(mn);R4=a(Yy,"Returns the training "),ym=s(Yy,"CODE",{});var mU=i(ym);W4=a(mU,"~torch.utils.data.DataLoader"),mU.forEach(o),G4=a(Yy,"."),Yy.forEach(o),M4=d(_a),so=s(_a,"P",{});var _c=i(so);j4=a(_c,"Will use no sampler if "),wm=s(_c,"CODE",{});var hU=i(wm);H4=a(hU,"train_dataset"),hU.forEach(o),B4=a(_c," does not implement "),Tm=s(_c,"CODE",{});var uU=i(Tm);V4=a(uU,"__len__"),uU.forEach(o),Y4=a(_c,`, a random sampler (adapted to distributed training if necessary) otherwise.`),_c.forEach(o),Z4=d(_a),Em=s(_a,"P",{});var fU=i(Em);K4=a(fU,"Subclass and override this method if you want to inject some custom behavior."),fU.forEach(o),_a.forEach(o),J4=d(y),_t=s(y,"DIV",{class:!0});var vc=i(_t);u(hn.$$.fragment,vc),X4=d(vc),we=s(vc,"P",{});var Bt=i(we);Q4=a(Bt,"Launch an hyperparameter search using "),$m=s(Bt,"CODE",{});var gU=i($m);eE=a(gU,"optuna"),gU.forEach(o),tE=a(Bt," or "),xm=s(Bt,"CODE",{});var _U=i(xm);oE=a(_U,"Ray Tune"),_U.forEach(o),rE=a(Bt," or "),km=s(Bt,"CODE",{});var vU=i(km);aE=a(vU,"SigOpt"),vU.forEach(o),nE=a(Bt,`. The optimized quantity is determined by `),Am=s(Bt,"CODE",{});var bU=i(Am);sE=a(bU,"compute_objective"),bU.forEach(o),iE=a(Bt,`, which defaults to a function returning the evaluation loss when no metric is provided, the sum of all metrics otherwise.`),Bt.forEach(o),lE=d(vc),u(pr.$$.fragment,vc),vc.forEach(o),dE=d(y),mr=s(y,"DIV",{class:!0});var Zy=i(mr);u(un.$$.fragment,Zy),cE=d(Zy),fn=s(Zy,"P",{});var Ky=i(fn);pE=a(Ky,"Initializes a git repo in "),Pm=s(Ky,"CODE",{});var yU=i(Pm);mE=a(yU,"self.args.hub_model_id"),yU.forEach(o),hE=a(Ky,"."),Ky.forEach(o),Zy.forEach(o),uE=d(y),hr=s(y,"DIV",{class:!0});var Jy=i(hr);u(gn.$$.fragment,Jy),fE=d(Jy),Dm=s(Jy,"P",{});var wU=i(Dm);gE=a(wU,`Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several machines) main process.`),wU.forEach(o),Jy.forEach(o),_E=d(y),ur=s(y,"DIV",{class:!0});var Xy=i(ur);u(_n.$$.fragment,Xy),vE=d(Xy),vn=s(Xy,"P",{});var Qy=i(vn);bE=a(Qy,`Whether or not this process is the global main process (when training in a distributed fashion on several machines, this is only going to be `),Sm=s(Qy,"CODE",{});var TU=i(Sm);yE=a(TU,"True"),TU.forEach(o),wE=a(Qy," for one process)."),Qy.forEach(o),Xy.forEach(o),TE=d(y),vt=s(y,"DIV",{class:!0});var bc=i(vt);u(bn.$$.fragment,bc),EE=d(bc),yn=s(bc,"P",{});var e2=i(yn);$E=a(e2,"Log "),qm=s(e2,"CODE",{});var EU=i(qm);xE=a(EU,"logs"),EU.forEach(o),kE=a(e2," on the various objects watching training."),e2.forEach(o),AE=d(bc),Om=s(bc,"P",{});var $U=i(Om);PE=a($U,"Subclass and override this method to inject custom behavior."),$U.forEach(o),bc.forEach(o),DE=d(y),P=s(y,"DIV",{class:!0});var q=i(P);u(wn.$$.fragment,q),SE=d(q),Cm=s(q,"P",{});var xU=i(Cm);qE=a(xU,"Log metrics in a specially formatted way"),xU.forEach(o),OE=d(q),Im=s(q,"P",{});var kU=i(Im);CE=a(kU,"Under distributed environment this is done only for a process with rank 0."),kU.forEach(o),IE=d(q),Um=s(q,"P",{});var AU=i(Um);UE=a(AU,"Notes on memory reports:"),AU.forEach(o),NE=d(q),io=s(q,"P",{});var yc=i(io);zE=a(yc,"In order to get memory usage report you need to install "),Nm=s(yc,"CODE",{});var PU=i(Nm);FE=a(PU,"psutil"),PU.forEach(o),LE=a(yc,". You can do that with "),zm=s(yc,"CODE",{});var DU=i(zm);RE=a(DU,"pip install psutil"),DU.forEach(o),WE=a(yc,"."),yc.forEach(o),GE=d(q),u(fr.$$.fragment,q),ME=d(q),Fm=s(q,"P",{});var SU=i(Fm);Lm=s(SU,"STRONG",{});var qU=i(Lm);jE=a(qU,"Understanding the reports:"),qU.forEach(o),SU.forEach(o),HE=d(q),Xe=s(q,"UL",{});var va=i(Xe);Te=s(va,"LI",{});var Vt=i(Te);BE=a(Vt,"the first segment, e.g., "),Rm=s(Vt,"CODE",{});var OU=i(Rm);VE=a(OU,"train__"),OU.forEach(o),YE=a(Vt,", tells you which stage the metrics are for. Reports starting with "),Wm=s(Vt,"CODE",{});var CU=i(Wm);ZE=a(CU,"init_"),CU.forEach(o),KE=a(Vt,` will be added to the first stage that gets run. So that if only evaluation is run, the memory usage for the `),Gm=s(Vt,"CODE",{});var IU=i(Gm);JE=a(IU,"__init__"),IU.forEach(o),XE=a(Vt," will be reported along with the "),Mm=s(Vt,"CODE",{});var UU=i(Mm);QE=a(UU,"eval_"),UU.forEach(o),e9=a(Vt," metrics."),Vt.forEach(o),t9=d(va),lo=s(va,"LI",{});var wc=i(lo);o9=a(wc,"the third segment, is either "),jm=s(wc,"CODE",{});var NU=i(jm);r9=a(NU,"cpu"),NU.forEach(o),a9=a(wc," or "),Hm=s(wc,"CODE",{});var zU=i(Hm);n9=a(zU,"gpu"),zU.forEach(o),s9=a(wc,`, tells you whether it\u2019s the general RAM or the gpu0 memory metric.`),wc.forEach(o),i9=d(va),wl=s(va,"LI",{});var LC=i(wl);Bm=s(LC,"CODE",{});var FU=i(Bm);l9=a(FU,"*_alloc_delta"),FU.forEach(o),d9=a(LC,` - is the difference in the used/allocated memory counter between the end and the start of the stage - it can be negative if a function released more memory than it allocated.`),LC.forEach(o),c9=d(va),bt=s(va,"LI",{});var Xi=i(bt);Vm=s(Xi,"CODE",{});var LU=i(Vm);p9=a(LU,"*_peaked_delta"),LU.forEach(o),m9=a(Xi,` - is any extra memory that was consumed and then freed - relative to the current allocated memory counter - it is never negative. When you look at the metrics of any stage you add up `),Ym=s(Xi,"CODE",{});var RU=i(Ym);h9=a(RU,"alloc_delta"),RU.forEach(o),u9=a(Xi,` + `),Zm=s(Xi,"CODE",{});var WU=i(Zm);f9=a(WU,"peaked_delta"),WU.forEach(o),g9=a(Xi," and you know how much memory was needed to complete that stage."),Xi.forEach(o),va.forEach(o),_9=d(q),Km=s(q,"P",{});var GU=i(Km);v9=a(GU,`The reporting happens only for process of rank 0 and gpu 0 (if there is a gpu). Typically this is enough since the main process does the bulk of work, but it could be not quite so if model parallel is used and then other GPUs may use a different amount of gpu memory. This is also not the same under DataParallel where gpu0 may require much more memory than the rest since it stores the gradient and optimizer states for all participating GPUS. Perhaps in the future these reports will evolve to measure those too.`),GU.forEach(o),b9=d(q),Jm=s(q,"P",{});var MU=i(Jm);y9=a(MU,`The CPU RAM metric measures RSS (Resident Set Size) includes both the memory which is unique to the process and the memory shared with other processes. It is important to note that it does not include swapped out memory, so the reports could be imprecise.`),MU.forEach(o),w9=d(q),Tn=s(q,"P",{});var t2=i(Tn);T9=a(t2,`The CPU peak memory is measured using a sampling thread. Due to python\u2019s GIL it may miss some of the peak memory if that thread didn\u2019t get a chance to run when the highest memory was used. Therefore this report can be less than reality. Using `),Xm=s(t2,"CODE",{});var jU=i(Xm);E9=a(jU,"tracemalloc"),jU.forEach(o),$9=a(t2,` would have reported the exact peak memory, but it doesn\u2019t report memory allocations outside of python. So if some C++ CUDA extension allocated its own memory it won\u2019t be reported. And therefore it was dropped in favor of the memory sampling approach, which reads the current process memory usage.`),t2.forEach(o),x9=d(q),Qe=s(q,"P",{});var ba=i(Qe);k9=a(ba,"The GPU allocated and peak memory reporting is done with "),Qm=s(ba,"CODE",{});var HU=i(Qm);A9=a(HU,"torch.cuda.memory_allocated()"),HU.forEach(o),P9=a(ba,` and `),eh=s(ba,"CODE",{});var BU=i(eh);D9=a(BU,"torch.cuda.max_memory_allocated()"),BU.forEach(o),S9=a(ba,`. This metric reports only \u201Cdeltas\u201D for pytorch-specific allocations, as `),th=s(ba,"CODE",{});var VU=i(th);q9=a(VU,"torch.cuda"),VU.forEach(o),O9=a(ba,` memory management system doesn\u2019t track any memory allocated outside of pytorch. For example, the very first cuda call typically loads CUDA kernels, which may take from 0.5 to 2GB of GPU memory.`),ba.forEach(o),C9=d(q),ne=s(q,"P",{});var je=i(ne);I9=a(je,"Note that this tracker doesn\u2019t account for memory allocations outside of "),Tl=s(je,"A",{href:!0});var YU=i(Tl);U9=a(YU,"Trainer"),YU.forEach(o),N9=a(je,"\u2019s "),oh=s(je,"CODE",{});var ZU=i(oh);z9=a(ZU,"__init__"),ZU.forEach(o),F9=a(je,", "),rh=s(je,"CODE",{});var KU=i(rh);L9=a(KU,"train"),KU.forEach(o),R9=a(je,`, `),ah=s(je,"CODE",{});var JU=i(ah);W9=a(JU,"evaluate"),JU.forEach(o),G9=a(je," and "),nh=s(je,"CODE",{});var XU=i(nh);M9=a(XU,"predict"),XU.forEach(o),j9=a(je," calls."),je.forEach(o),H9=d(q),U=s(q,"P",{});var R=i(U);B9=a(R,"Because "),sh=s(R,"CODE",{});var QU=i(sh);V9=a(QU,"evaluation"),QU.forEach(o),Y9=a(R," calls may happen during "),ih=s(R,"CODE",{});var eN=i(ih);Z9=a(eN,"train"),eN.forEach(o),K9=a(R,`, we can\u2019t handle nested invocations because `),lh=s(R,"CODE",{});var tN=i(lh);J9=a(tN,"torch.cuda.max_memory_allocated"),tN.forEach(o),X9=a(R," is a single counter, so if it gets reset by a nested eval call, "),dh=s(R,"CODE",{});var oN=i(dh);Q9=a(oN,"train"),oN.forEach(o),e$=a(R,`\u2019s tracker will report incorrect info. If this `),En=s(R,"A",{href:!0,rel:!0});var rN=i(En);t$=a(rN,"pytorch issue"),rN.forEach(o),o$=a(R,` gets resolved it will be possible to change this class to be re-entrant. Until then we will only track the outer level of `),ch=s(R,"CODE",{});var aN=i(ch);r$=a(aN,"train"),aN.forEach(o),a$=a(R,", "),ph=s(R,"CODE",{});var nN=i(ph);n$=a(nN,"evaluate"),nN.forEach(o),s$=a(R," and "),mh=s(R,"CODE",{});var sN=i(mh);i$=a(sN,"predict"),sN.forEach(o),l$=a(R," methods. Which means that if "),hh=s(R,"CODE",{});var iN=i(hh);d$=a(iN,"eval"),iN.forEach(o),c$=a(R," is called during "),uh=s(R,"CODE",{});var lN=i(uh);p$=a(lN,"train"),lN.forEach(o),m$=a(R,`, it\u2019s the latter that will account for its memory usage and that of the former.`),R.forEach(o),h$=d(q),Ee=s(q,"P",{});var Yt=i(Ee);u$=a(Yt,"This also means that if any other tool that is used along the "),El=s(Yt,"A",{href:!0});var dN=i(El);f$=a(dN,"Trainer"),dN.forEach(o),g$=a(Yt,` calls `),fh=s(Yt,"CODE",{});var cN=i(fh);_$=a(cN,"torch.cuda.reset_peak_memory_stats"),cN.forEach(o),v$=a(Yt,", the gpu peak memory stats could be invalid. And the "),$l=s(Yt,"A",{href:!0});var pN=i($l);b$=a(pN,"Trainer"),pN.forEach(o),y$=a(Yt,` will disrupt the normal behavior of any such tools that rely on calling `),gh=s(Yt,"CODE",{});var mN=i(gh);w$=a(mN,"torch.cuda.reset_peak_memory_stats"),mN.forEach(o),T$=a(Yt," themselves."),Yt.forEach(o),E$=d(q),_h=s(q,"P",{});var hN=i(_h);$$=a(hN,"For best performance you may want to consider turning the memory profiling off for production runs."),hN.forEach(o),q.forEach(o),x$=d(y),gr=s(y,"DIV",{class:!0});var o2=i(gr);u($n.$$.fragment,o2),k$=d(o2),vh=s(o2,"P",{});var uN=i(vh);A$=a(uN,"Reformat Trainer metrics values to a human-readable format"),uN.forEach(o),o2.forEach(o),P$=d(y),_r=s(y,"DIV",{class:!0});var r2=i(_r);u(xn.$$.fragment,r2),D$=d(r2),kn=s(r2,"P",{});var a2=i(kn);S$=a(a2,"Helper to get number of samples in a "),bh=s(a2,"CODE",{});var fN=i(bh);q$=a(fN,"~torch.utils.data.DataLoader"),fN.forEach(o),O$=a(a2,` by accessing its dataset. When dataloader.dataset does not exist or has no length, estimates as best it can`),a2.forEach(o),r2.forEach(o),C$=d(y),yt=s(y,"DIV",{class:!0});var Tc=i(yt);u(An.$$.fragment,Tc),I$=d(Tc),Pn=s(Tc,"P",{});var n2=i(Pn);U$=a(n2,"Remove a callback from the current list of "),yh=s(n2,"CODE",{});var gN=i(yh);N$=a(gN,"~transformer.TrainerCallback"),gN.forEach(o),z$=a(n2," and returns it."),n2.forEach(o),F$=d(Tc),Dn=s(Tc,"P",{});var s2=i(Dn);L$=a(s2,"If the callback is not found, returns "),wh=s(s2,"CODE",{});var _N=i(wh);R$=a(_N,"None"),_N.forEach(o),W$=a(s2," (and no error is raised)."),s2.forEach(o),Tc.forEach(o),G$=d(y),X=s(y,"DIV",{class:!0});var He=i(X);u(Sn.$$.fragment,He),M$=d(He),Th=s(He,"P",{});var vN=i(Th);j$=a(vN,"Run prediction and returns predictions and potential metrics."),vN.forEach(o),H$=d(He),qn=s(He,"P",{});var i2=i(qn);B$=a(i2,`Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in `),Eh=s(i2,"CODE",{});var bN=i(Eh);V$=a(bN,"evaluate()"),bN.forEach(o),Y$=a(i2,"."),i2.forEach(o),Z$=d(He),u(vr.$$.fragment,He),K$=d(He),On=s(He,"P",{});var l2=i(On);J$=a(l2,"Returns: "),$h=s(l2,"EM",{});var yN=i($h);X$=a(yN,"NamedTuple"),yN.forEach(o),Q$=a(l2," A namedtuple with the following keys:"),l2.forEach(o),e3=d(He),co=s(He,"UL",{});var Ec=i(co);po=s(Ec,"LI",{});var $c=i(po);t3=a($c,"predictions ("),xh=s($c,"CODE",{});var wN=i(xh);o3=a(wN,"np.ndarray"),wN.forEach(o),r3=a($c,"): The predictions on "),kh=s($c,"CODE",{});var TN=i(kh);a3=a(TN,"test_dataset"),TN.forEach(o),n3=a($c,"."),$c.forEach(o),s3=d(Ec),mo=s(Ec,"LI",{});var xc=i(mo);i3=a(xc,"label_ids ("),Ah=s(xc,"CODE",{});var EN=i(Ah);l3=a(EN,"np.ndarray"),EN.forEach(o),d3=a(xc,", "),Ph=s(xc,"EM",{});var $N=i(Ph);c3=a($N,"optional"),$N.forEach(o),p3=a(xc,"): The labels (if the dataset contained some)."),xc.forEach(o),m3=d(Ec),ho=s(Ec,"LI",{});var kc=i(ho);h3=a(kc,"metrics ("),Dh=s(kc,"CODE",{});var xN=i(Dh);u3=a(xN,"Dict[str, float]"),xN.forEach(o),f3=a(kc,", "),Sh=s(kc,"EM",{});var kN=i(Sh);g3=a(kN,"optional"),kN.forEach(o),_3=a(kc,`): The potential dictionary of metrics (if the dataset contained labels).`),kc.forEach(o),Ec.forEach(o),He.forEach(o),v3=d(y),wt=s(y,"DIV",{class:!0});var Ac=i(wt);u(Cn.$$.fragment,Ac),b3=d(Ac),uo=s(Ac,"P",{});var Pc=i(uo);y3=a(Pc,"Prediction/evaluation loop, shared by "),qh=s(Pc,"CODE",{});var AN=i(qh);w3=a(AN,"Trainer.evaluate()"),AN.forEach(o),T3=a(Pc," and "),Oh=s(Pc,"CODE",{});var PN=i(Oh);E3=a(PN,"Trainer.predict()"),PN.forEach(o),$3=a(Pc,"."),Pc.forEach(o),x3=d(Ac),Ch=s(Ac,"P",{});var DN=i(Ch);k3=a(DN,"Works both with or without labels."),DN.forEach(o),Ac.forEach(o),A3=d(y),Tt=s(y,"DIV",{class:!0});var Dc=i(Tt);u(In.$$.fragment,Dc),P3=d(Dc),fo=s(Dc,"P",{});var Sc=i(fo);D3=a(Sc,"Perform an evaluation step on "),Ih=s(Sc,"CODE",{});var SN=i(Ih);S3=a(SN,"model"),SN.forEach(o),q3=a(Sc," using "),Uh=s(Sc,"CODE",{});var qN=i(Uh);O3=a(qN,"inputs"),qN.forEach(o),C3=a(Sc,"."),Sc.forEach(o),I3=d(Dc),Nh=s(Dc,"P",{});var ON=i(Nh);U3=a(ON,"Subclass and override to inject custom behavior."),ON.forEach(o),Dc.forEach(o),N3=d(y),br=s(y,"DIV",{class:!0});var d2=i(br);u(Un.$$.fragment,d2),z3=d(d2),et=s(d2,"P",{});var ya=i(et);F3=a(ya,"Upload "),zh=s(ya,"EM",{});var CN=i(zh);L3=a(CN,"self.model"),CN.forEach(o),R3=a(ya," and "),Fh=s(ya,"EM",{});var IN=i(Fh);W3=a(IN,"self.tokenizer"),IN.forEach(o),G3=a(ya," to the \u{1F917} model hub on the repo "),Lh=s(ya,"EM",{});var UN=i(Lh);M3=a(UN,"self.args.hub_model_id"),UN.forEach(o),j3=a(ya,"."),ya.forEach(o),d2.forEach(o),H3=d(y),yr=s(y,"DIV",{class:!0});var c2=i(yr);u(Nn.$$.fragment,c2),B3=d(c2),zn=s(c2,"P",{});var p2=i(zn);V3=a(p2,"Remove a callback from the current list of "),Rh=s(p2,"CODE",{});var NN=i(Rh);Y3=a(NN,"~transformer.TrainerCallback"),NN.forEach(o),Z3=a(p2,"."),p2.forEach(o),c2.forEach(o),K3=d(y),Ce=s(y,"DIV",{class:!0});var wa=i(Ce);u(Fn.$$.fragment,wa),J3=d(wa),Ln=s(wa,"P",{});var m2=i(Ln);X3=a(m2,"Save metrics into a json file for that split, e.g. "),Wh=s(m2,"CODE",{});var zN=i(Wh);Q3=a(zN,"train_results.json"),zN.forEach(o),ex=a(m2,"."),m2.forEach(o),tx=d(wa),Gh=s(wa,"P",{});var FN=i(Gh);ox=a(FN,"Under distributed environment this is done only for a process with rank 0."),FN.forEach(o),rx=d(wa),Rn=s(wa,"P",{});var h2=i(Rn);ax=a(h2,"To understand the metrics please read the docstring of "),xl=s(h2,"A",{href:!0});var LN=i(xl);nx=a(LN,"log_metrics()"),LN.forEach(o),sx=a(h2,`. The only difference is that raw unformatted numbers are saved in the current method.`),h2.forEach(o),wa.forEach(o),ix=d(y),Et=s(y,"DIV",{class:!0});var qc=i(Et);u(Wn.$$.fragment,qc),lx=d(qc),Gn=s(qc,"P",{});var u2=i(Gn);dx=a(u2,"Will save the model, so you can reload it using "),Mh=s(u2,"CODE",{});var RN=i(Mh);cx=a(RN,"from_pretrained()"),RN.forEach(o),px=a(u2,"."),u2.forEach(o),mx=d(qc),jh=s(qc,"P",{});var WN=i(jh);hx=a(WN,"Will only save from the main process."),WN.forEach(o),qc.forEach(o),ux=d(y),$t=s(y,"DIV",{class:!0});var Oc=i($t);u(Mn.$$.fragment,Oc),fx=d(Oc),Hh=s(Oc,"P",{});var GN=i(Hh);gx=a(GN,"Saves the Trainer state, since Trainer.save_model saves only the tokenizer with the model"),GN.forEach(o),_x=d(Oc),Bh=s(Oc,"P",{});var MN=i(Bh);vx=a(MN,"Under distributed environment this is done only for a process with rank 0."),MN.forEach(o),Oc.forEach(o),bx=d(y),wr=s(y,"DIV",{class:!0});var f2=i(wr);u(jn.$$.fragment,f2),yx=d(f2),Hn=s(f2,"P",{});var g2=i(Hn);wx=a(g2,"A helper wrapper that creates an appropriate context manager for "),Vh=s(g2,"CODE",{});var jN=i(Vh);Tx=a(jN,"torchdynamo"),jN.forEach(o),Ex=a(g2,"."),g2.forEach(o),f2.forEach(o),$x=d(y),Tr=s(y,"DIV",{class:!0});var _2=i(Tr);u(Bn.$$.fragment,_2),xx=d(_2),Yh=s(_2,"P",{});var HN=i(Yh);kx=a(HN,"Main training entry point."),HN.forEach(o),_2.forEach(o),Ax=d(y),xt=s(y,"DIV",{class:!0});var Cc=i(xt);u(Vn.$$.fragment,Cc),Px=d(Cc),Zh=s(Cc,"P",{});var BN=i(Zh);Dx=a(BN,"Perform a training step on a batch of inputs."),BN.forEach(o),Sx=d(Cc),Kh=s(Cc,"P",{});var VN=i(Kh);qx=a(VN,"Subclass and override to inject custom behavior."),VN.forEach(o),Cc.forEach(o),y.forEach(o),kv=d(t),go=s(t,"H2",{class:!0});var v2=i(go);Er=s(v2,"A",{id:!0,class:!0,href:!0});var YN=i(Er);Jh=s(YN,"SPAN",{});var ZN=i(Jh);u(Yn.$$.fragment,ZN),ZN.forEach(o),YN.forEach(o),Ox=d(v2),Xh=s(v2,"SPAN",{});var KN=i(Xh);Cx=a(KN,"Seq2SeqTrainer"),KN.forEach(o),v2.forEach(o),Av=d(t),tt=s(t,"DIV",{class:!0});var Ic=i(tt);u(Zn.$$.fragment,Ic),Ix=d(Ic),Ie=s(Ic,"DIV",{class:!0});var Ta=i(Ie);u(Kn.$$.fragment,Ta),Ux=d(Ta),Qh=s(Ta,"P",{});var JN=i(Qh);Nx=a(JN,"Run evaluation and returns metrics."),JN.forEach(o),zx=d(Ta),Jn=s(Ta,"P",{});var b2=i(Jn);Fx=a(b2,`The calling script will be responsible for providing a method to compute metrics, as they are task-dependent (pass it to the init `),eu=s(b2,"CODE",{});var XN=i(eu);Lx=a(XN,"compute_metrics"),XN.forEach(o),Rx=a(b2," argument)."),b2.forEach(o),Wx=d(Ta),tu=s(Ta,"P",{});var QN=i(tu);Gx=a(QN,"You can also subclass and override this method to inject custom behavior."),QN.forEach(o),Ta.forEach(o),Mx=d(Ic),Q=s(Ic,"DIV",{class:!0});var Be=i(Q);u(Xn.$$.fragment,Be),jx=d(Be),ou=s(Be,"P",{});var ez=i(ou);Hx=a(ez,"Run prediction and returns predictions and potential metrics."),ez.forEach(o),Bx=d(Be),Qn=s(Be,"P",{});var y2=i(Qn);Vx=a(y2,`Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in `),ru=s(y2,"CODE",{});var tz=i(ru);Yx=a(tz,"evaluate()"),tz.forEach(o),Zx=a(y2,"."),y2.forEach(o),Kx=d(Be),u($r.$$.fragment,Be),Jx=d(Be),es=s(Be,"P",{});var w2=i(es);Xx=a(w2,"Returns: "),au=s(w2,"EM",{});var oz=i(au);Qx=a(oz,"NamedTuple"),oz.forEach(o),ek=a(w2," A namedtuple with the following keys:"),w2.forEach(o),tk=d(Be),_o=s(Be,"UL",{});var Uc=i(_o);vo=s(Uc,"LI",{});var Nc=i(vo);ok=a(Nc,"predictions ("),nu=s(Nc,"CODE",{});var rz=i(nu);rk=a(rz,"np.ndarray"),rz.forEach(o),ak=a(Nc,"): The predictions on "),su=s(Nc,"CODE",{});var az=i(su);nk=a(az,"test_dataset"),az.forEach(o),sk=a(Nc,"."),Nc.forEach(o),ik=d(Uc),bo=s(Uc,"LI",{});var zc=i(bo);lk=a(zc,"label_ids ("),iu=s(zc,"CODE",{});var nz=i(iu);dk=a(nz,"np.ndarray"),nz.forEach(o),ck=a(zc,", "),lu=s(zc,"EM",{});var sz=i(lu);pk=a(sz,"optional"),sz.forEach(o),mk=a(zc,"): The labels (if the dataset contained some)."),zc.forEach(o),hk=d(Uc),yo=s(Uc,"LI",{});var Fc=i(yo);uk=a(Fc,"metrics ("),du=s(Fc,"CODE",{});var iz=i(du);fk=a(iz,"Dict[str, float]"),iz.forEach(o),gk=a(Fc,", "),cu=s(Fc,"EM",{});var lz=i(cu);_k=a(lz,"optional"),lz.forEach(o),vk=a(Fc,`): The potential dictionary of metrics (if the dataset contained labels).`),Fc.forEach(o),Uc.forEach(o),Be.forEach(o),Ic.forEach(o),Pv=d(t),wo=s(t,"H2",{class:!0});var T2=i(wo);xr=s(T2,"A",{id:!0,class:!0,href:!0});var dz=i(xr);pu=s(dz,"SPAN",{});var cz=i(pu);u(ts.$$.fragment,cz),cz.forEach(o),dz.forEach(o),bk=d(T2),mu=s(T2,"SPAN",{});var pz=i(mu);yk=a(pz,"TrainingArguments"),pz.forEach(o),T2.forEach(o),Dv=d(t),F=s(t,"DIV",{class:!0});var j=i(F);u(os.$$.fragment,j),wk=d(j),rs=s(j,"P",{});var E2=i(rs);Tk=a(E2,"TrainingArguments is the subset of the arguments we use in our example scripts "),hu=s(E2,"STRONG",{});var mz=i(hu);Ek=a(mz,`which relate to the training loop itself`),mz.forEach(o),$k=a(E2,"."),E2.forEach(o),xk=d(j),To=s(j,"P",{});var Lc=i(To);kk=a(Lc,"Using "),kl=s(Lc,"A",{href:!0});var hz=i(kl);Ak=a(hz,"HfArgumentParser"),hz.forEach(o),Pk=a(Lc,` we can turn this class into `),as=s(Lc,"A",{href:!0,rel:!0});var uz=i(as);Dk=a(uz,"argparse"),uz.forEach(o),Sk=a(Lc,` arguments that can be specified on the command line.`),Lc.forEach(o),qk=d(j),de=s(j,"DIV",{class:!0});var Zt=i(de);u(ns.$$.fragment,Zt),Ok=d(Zt),uu=s(Zt,"P",{});var fz=i(uu);Ck=a(fz,`Returns the log level to be used depending on whether this process is the main process of node 0, main process of node non-0, or a non-main process.`),fz.forEach(o),Ik=d(Zt),Eo=s(Zt,"P",{});var Rc=i(Eo);Uk=a(Rc,"For the main process the log level defaults to "),fu=s(Rc,"CODE",{});var gz=i(fu);Nk=a(gz,"logging.INFO"),gz.forEach(o),zk=a(Rc," unless overridden by "),gu=s(Rc,"CODE",{});var _z=i(gu);Fk=a(_z,"log_level"),_z.forEach(o),Lk=a(Rc," argument."),Rc.forEach(o),Rk=d(Zt),$o=s(Zt,"P",{});var Wc=i($o);Wk=a(Wc,"For the replica processes the log level defaults to "),_u=s(Wc,"CODE",{});var vz=i(_u);Gk=a(vz,"logging.WARNING"),vz.forEach(o),Mk=a(Wc," unless overridden by "),vu=s(Wc,"CODE",{});var bz=i(vu);jk=a(bz,"log_level_replica"),bz.forEach(o),Hk=a(Wc,` argument.`),Wc.forEach(o),Bk=d(Zt),ss=s(Zt,"P",{});var $2=i(ss);Vk=a($2,"The choice between the main and replica process settings is made according to the return value of "),bu=s($2,"CODE",{});var yz=i(bu);Yk=a(yz,"should_log"),yz.forEach(o),Zk=a($2,"."),$2.forEach(o),Zt.forEach(o),Kk=d(j),kr=s(j,"DIV",{class:!0});var x2=i(kr);u(is.$$.fragment,x2),Jk=d(x2),yu=s(x2,"P",{});var wz=i(yu);Xk=a(wz,"Get number of steps used for a linear warmup."),wz.forEach(o),x2.forEach(o),Qk=d(j),kt=s(j,"DIV",{class:!0});var Gc=i(kt);u(ls.$$.fragment,Gc),e5=d(Gc),wu=s(Gc,"P",{});var Tz=i(wu);t5=a(Tz,`A context manager for torch distributed environment where on needs to do something on the main process, while blocking replicas, and when it\u2019s finished releasing the replicas.`),Tz.forEach(o),o5=d(Gc),xo=s(Gc,"P",{});var Mc=i(xo);r5=a(Mc,"One such use is for "),Tu=s(Mc,"CODE",{});var Ez=i(Tu);a5=a(Ez,"datasets"),Ez.forEach(o),n5=a(Mc,"\u2019s "),Eu=s(Mc,"CODE",{});var $z=i(Eu);s5=a($z,"map"),$z.forEach(o),i5=a(Mc,` feature which to be efficient should be run once on the main process, which upon completion saves a cached version of results and which then automatically gets loaded by the replicas.`),Mc.forEach(o),Gc.forEach(o),l5=d(j),Ar=s(j,"DIV",{class:!0});var k2=i(Ar);u(ds.$$.fragment,k2),d5=d(k2),cs=s(k2,"P",{});var A2=i(cs);c5=a(A2,"Serializes this instance while replace "),$u=s(A2,"CODE",{});var xz=i($u);p5=a(xz,"Enum"),xz.forEach(o),m5=a(A2,` by their values (for JSON serialization support). It obfuscates the token values by removing their value.`),A2.forEach(o),k2.forEach(o),h5=d(j),Pr=s(j,"DIV",{class:!0});var P2=i(Pr);u(ps.$$.fragment,P2),u5=d(P2),xu=s(P2,"P",{});var kz=i(xu);f5=a(kz,"Serializes this instance to a JSON string."),kz.forEach(o),P2.forEach(o),g5=d(j),Dr=s(j,"DIV",{class:!0});var D2=i(Dr);u(ms.$$.fragment,D2),_5=d(D2),ku=s(D2,"P",{});var Az=i(ku);v5=a(Az,"Sanitized serialization to use with TensorBoard\u2019s hparams"),Az.forEach(o),D2.forEach(o),j.forEach(o),Sv=d(t),ko=s(t,"H2",{class:!0});var S2=i(ko);Sr=s(S2,"A",{id:!0,class:!0,href:!0});var Pz=i(Sr);Au=s(Pz,"SPAN",{});var Dz=i(Au);u(hs.$$.fragment,Dz),Dz.forEach(o),Pz.forEach(o),b5=d(S2),Pu=s(S2,"SPAN",{});var Sz=i(Pu);y5=a(Sz,"Seq2SeqTrainingArguments"),Sz.forEach(o),S2.forEach(o),qv=d(t),ot=s(t,"DIV",{class:!0});var jc=i(ot);u(us.$$.fragment,jc),w5=d(jc),fs=s(jc,"P",{});var q2=i(fs);T5=a(q2,"TrainingArguments is the subset of the arguments we use in our example scripts "),Du=s(q2,"STRONG",{});var qz=i(Du);E5=a(qz,`which relate to the training loop itself`),qz.forEach(o),$5=a(q2,"."),q2.forEach(o),x5=d(jc),Ao=s(jc,"P",{});var Hc=i(Ao);k5=a(Hc,"Using "),Al=s(Hc,"A",{href:!0});var Oz=i(Al);A5=a(Oz,"HfArgumentParser"),Oz.forEach(o),P5=a(Hc,` we can turn this class into `),gs=s(Hc,"A",{href:!0,rel:!0});var Cz=i(gs);D5=a(Cz,"argparse"),Cz.forEach(o),S5=a(Hc,` arguments that can be specified on the command line.`),Hc.forEach(o),jc.forEach(o),Ov=d(t),Po=s(t,"H2",{class:!0});var O2=i(Po);qr=s(O2,"A",{id:!0,class:!0,href:!0});var Iz=i(qr);Su=s(Iz,"SPAN",{});var Uz=i(Su);u(_s.$$.fragment,Uz),Uz.forEach(o),Iz.forEach(o),q5=d(O2),qu=s(O2,"SPAN",{});var Nz=i(qu);O5=a(Nz,"Checkpoints"),Nz.forEach(o),O2.forEach(o),Cv=d(t),ce=s(t,"P",{});var Kt=i(ce);C5=a(Kt,"By default, "),Pl=s(Kt,"A",{href:!0});var zz=i(Pl);I5=a(zz,"Trainer"),zz.forEach(o),U5=a(Kt," will save all checkpoints in the "),Ou=s(Kt,"CODE",{});var Fz=i(Ou);N5=a(Fz,"output_dir"),Fz.forEach(o),z5=a(Kt,` you set in the `),Dl=s(Kt,"A",{href:!0});var Lz=i(Dl);F5=a(Lz,"TrainingArguments"),Lz.forEach(o),L5=a(Kt," you are using. Those will go in subfolder named "),Cu=s(Kt,"CODE",{});var Rz=i(Cu);R5=a(Rz,"checkpoint-xxx"),Rz.forEach(o),W5=a(Kt,` with xxx being the step at which the training was at.`),Kt.forEach(o),Iv=d(t),Or=s(t,"P",{});var C2=i(Or);G5=a(C2,"Resuming training from a checkpoint can be done when calling "),Sl=s(C2,"A",{href:!0});var Wz=i(Sl);M5=a(Wz,"Trainer.train()"),Wz.forEach(o),j5=a(C2," with either:"),C2.forEach(o),Uv=d(t),Cr=s(t,"UL",{});var I2=i(Cr);ql=s(I2,"LI",{});var RC=i(ql);Iu=s(RC,"CODE",{});var Gz=i(Iu);H5=a(Gz,"resume_from_checkpoint=True"),Gz.forEach(o),B5=a(RC," which will resume training from the latest checkpoint"),RC.forEach(o),V5=d(I2),Ol=s(I2,"LI",{});var WC=i(Ol);Uu=s(WC,"CODE",{});var Mz=i(Uu);Y5=a(Mz,"resume_from_checkpoint=checkpoint_dir"),Mz.forEach(o),Z5=a(WC,` which will resume training from the specific checkpoint in the directory passed.`),WC.forEach(o),I2.forEach(o),Nv=d(t),Ue=s(t,"P",{});var Ea=i(Ue);K5=a(Ea,"In addition, you can easily save your checkpoints on the Model Hub when using "),Nu=s(Ea,"CODE",{});var jz=i(Nu);J5=a(jz,"push_to_hub=True"),jz.forEach(o),X5=a(Ea,`. By default, all the models saved in intermediate checkpoints are saved in different commits, but not the optimizer state. You can adapt the `),zu=s(Ea,"CODE",{});var Hz=i(zu);Q5=a(Hz,"hub-strategy"),Hz.forEach(o),e6=a(Ea," value of your "),Cl=s(Ea,"A",{href:!0});var Bz=i(Cl);t6=a(Bz,"TrainingArguments"),Bz.forEach(o),o6=a(Ea," to either:"),Ea.forEach(o),zv=d(t),Ir=s(t,"UL",{});var U2=i(Ir);Ur=s(U2,"LI",{});var gv=i(Ur);Fu=s(gv,"CODE",{});var Vz=i(Fu);r6=a(Vz,'"checkpoint"'),Vz.forEach(o),a6=a(gv,`: the latest checkpoint is also pushed in a subfolder named last-checkpoint, allowing you to resume training easily with `),Lu=s(gv,"CODE",{});var Yz=i(Lu);n6=a(Yz,'trainer.train(resume_from_checkpoint="output_dir/last-checkpoint")'),Yz.forEach(o),s6=a(gv,"."),gv.forEach(o),i6=d(U2),Il=s(U2,"LI",{});var GC=i(Il);Ru=s(GC,"CODE",{});var Zz=i(Ru);l6=a(Zz,'"all_checkpoints"'),Zz.forEach(o),d6=a(GC,`: all checkpoints are pushed like they appear in the output folder (so you will get one checkpoint folder per folder in your final repository)`),GC.forEach(o),U2.forEach(o),Fv=d(t),Do=s(t,"H2",{class:!0});var N2=i(Do);Nr=s(N2,"A",{id:!0,class:!0,href:!0});var Kz=i(Nr);Wu=s(Kz,"SPAN",{});var Jz=i(Wu);u(vs.$$.fragment,Jz),Jz.forEach(o),Kz.forEach(o),c6=d(N2),Gu=s(N2,"SPAN",{});var Xz=i(Gu);p6=a(Xz,"Logging"),Xz.forEach(o),N2.forEach(o),Lv=d(t),Ne=s(t,"P",{});var $a=i(Ne);m6=a($a,"By default "),Ul=s($a,"A",{href:!0});var Qz=i(Ul);h6=a(Qz,"Trainer"),Qz.forEach(o),u6=a($a," will use "),Mu=s($a,"CODE",{});var eF=i(Mu);f6=a(eF,"logging.INFO"),eF.forEach(o),g6=a($a," for the main process and "),ju=s($a,"CODE",{});var tF=i(ju);_6=a(tF,"logging.WARNING"),tF.forEach(o),v6=a($a," for the replicas if any."),$a.forEach(o),Rv=d(t),At=s(t,"P",{});var Bc=i(At);b6=a(Bc,"These defaults can be overridden to use any of the 5 "),Hu=s(Bc,"CODE",{});var oF=i(Hu);y6=a(oF,"logging"),oF.forEach(o),w6=a(Bc," levels with "),Nl=s(Bc,"A",{href:!0});var rF=i(Nl);T6=a(rF,"TrainingArguments"),rF.forEach(o),E6=a(Bc,`\u2019s arguments:`),Bc.forEach(o),Wv=d(t),zr=s(t,"UL",{});var z2=i(zr);zl=s(z2,"LI",{});var MC=i(zl);Bu=s(MC,"CODE",{});var aF=i(Bu);$6=a(aF,"log_level"),aF.forEach(o),x6=a(MC," - for the main process"),MC.forEach(o),k6=d(z2),Fl=s(z2,"LI",{});var jC=i(Fl);Vu=s(jC,"CODE",{});var nF=i(Vu);A6=a(nF,"log_level_replica"),nF.forEach(o),P6=a(jC," - for the replicas"),jC.forEach(o),z2.forEach(o),Gv=d(t),ze=s(t,"P",{});var xa=i(ze);D6=a(xa,"Further, if "),Ll=s(xa,"A",{href:!0});var sF=i(Ll);S6=a(sF,"TrainingArguments"),sF.forEach(o),q6=a(xa,"\u2019s "),Yu=s(xa,"CODE",{});var iF=i(Yu);O6=a(iF,"log_on_each_node"),iF.forEach(o),C6=a(xa," is set to "),Zu=s(xa,"CODE",{});var lF=i(Zu);I6=a(lF,"False"),lF.forEach(o),U6=a(xa,` only the main node will use the log level settings for its main process, all other nodes will use the log level settings for replicas.`),xa.forEach(o),Mv=d(t),ee=s(t,"P",{});var Ve=i(ee);N6=a(Ve,"Note that "),Rl=s(Ve,"A",{href:!0});var dF=i(Rl);z6=a(dF,"Trainer"),dF.forEach(o),F6=a(Ve," is going to set "),Ku=s(Ve,"CODE",{});var cF=i(Ku);L6=a(cF,"transformers"),cF.forEach(o),R6=a(Ve,`\u2019s log level separately for each node in its `),Ju=s(Ve,"CODE",{});var pF=i(Ju);W6=a(pF,"Trainer.__init__()"),pF.forEach(o),G6=a(Ve,`. So you may want to set this sooner (see the next example) if you tap into other `),Xu=s(Ve,"CODE",{});var mF=i(Xu);M6=a(mF,"transformers"),mF.forEach(o),j6=a(Ve," functionality before creating the "),Wl=s(Ve,"A",{href:!0});var hF=i(Wl);H6=a(hF,"Trainer"),hF.forEach(o),B6=a(Ve," object."),Ve.forEach(o),jv=d(t),Gl=s(t,"P",{});var uF=i(Gl);V6=a(uF,"Here is an example of how this can be used in an application:"),uF.forEach(o),Hv=d(t),u(bs.$$.fragment,t),Bv=d(t),Ml=s(t,"P",{});var fF=i(Ml);Y6=a(fF,`And then if you only want to see warnings on the main node and all other nodes to not print any most likely duplicated warnings you could run it as:`),fF.forEach(o),Vv=d(t),u(ys.$$.fragment,t),Yv=d(t),jl=s(t,"P",{});var gF=i(jl);Z6=a(gF,`In the multi-node environment if you also don\u2019t want the logs to repeat for each node\u2019s main process, you will want to change the above to:`),gF.forEach(o),Zv=d(t),u(ws.$$.fragment,t),Kv=d(t),Hl=s(t,"P",{});var _F=i(Hl);K6=a(_F,`and then only the main process of the first node will log at the \u201Cwarning\u201D level, and all other processes on the main node and all processes on other nodes will log at the \u201Cerror\u201D level.`),_F.forEach(o),Jv=d(t),Bl=s(t,"P",{});var vF=i(Bl);J6=a(vF,"If you need your application to be as quiet as possible you could do:"),vF.forEach(o),Xv=d(t),u(Ts.$$.fragment,t),Qv=d(t),Fr=s(t,"P",{});var F2=i(Fr);X6=a(F2,"(add "),Qu=s(F2,"CODE",{});var bF=i(Qu);Q6=a(bF,"--log_on_each_node 0"),bF.forEach(o),eA=a(F2," if on multi-node environment)"),F2.forEach(o),e1=d(t),So=s(t,"H2",{class:!0});var L2=i(So);Lr=s(L2,"A",{id:!0,class:!0,href:!0});var yF=i(Lr);ef=s(yF,"SPAN",{});var wF=i(ef);u(Es.$$.fragment,wF),wF.forEach(o),yF.forEach(o),tA=d(L2),tf=s(L2,"SPAN",{});var TF=i(tf);oA=a(TF,"Randomness"),TF.forEach(o),L2.forEach(o),t1=d(t),pe=s(t,"P",{});var Jt=i(pe);rA=a(Jt,"When resuming from a checkpoint generated by "),Vl=s(Jt,"A",{href:!0});var EF=i(Vl);aA=a(EF,"Trainer"),EF.forEach(o),nA=a(Jt,` all efforts are made to restore the `),of=s(Jt,"EM",{});var $F=i(of);sA=a($F,"python"),$F.forEach(o),iA=a(Jt,", "),rf=s(Jt,"EM",{});var xF=i(rf);lA=a(xF,"numpy"),xF.forEach(o),dA=a(Jt," and "),af=s(Jt,"EM",{});var kF=i(af);cA=a(kF,"pytorch"),kF.forEach(o),pA=a(Jt,` RNG states to the same states as they were at the moment of saving that checkpoint, which should make the \u201Cstop and resume\u201D style of training as close as possible to non-stop training.`),Jt.forEach(o),o1=d(t),Pt=s(t,"P",{});var Vc=i(Pt);mA=a(Vc,`However, due to various default non-deterministic pytorch settings this might not fully work. If you want full determinism please refer to `),$s=s(Vc,"A",{href:!0,rel:!0});var AF=i($s);hA=a(AF,"Controlling sources of randomness"),AF.forEach(o),uA=a(Vc,`. As explained in the document, that some of those settings that make things deterministic (.e.g., `),nf=s(Vc,"CODE",{});var PF=i(nf);fA=a(PF,"torch.backends.cudnn.deterministic"),PF.forEach(o),gA=a(Vc,`) may slow things down, therefore this can\u2019t be done by default, but you can enable those yourself if needed.`),Vc.forEach(o),r1=d(t),qo=s(t,"H2",{class:!0});var R2=i(qo);Rr=s(R2,"A",{id:!0,class:!0,href:!0});var DF=i(Rr);sf=s(DF,"SPAN",{});var SF=i(sf);u(xs.$$.fragment,SF),SF.forEach(o),DF.forEach(o),_A=d(R2),lf=s(R2,"SPAN",{});var qF=i(lf);vA=a(qF,"Specific GPUs Selection"),qF.forEach(o),R2.forEach(o),a1=d(t),Yl=s(t,"P",{});var OF=i(Yl);bA=a(OF,"Let\u2019s discuss how you can tell your program which GPUs are to be used and in what order."),OF.forEach(o),n1=d(t),Wr=s(t,"P",{});var W2=i(Wr);yA=a(W2,"When using "),ks=s(W2,"A",{href:!0,rel:!0});var CF=i(ks);df=s(CF,"CODE",{});var IF=i(df);wA=a(IF,"DistributedDataParallel"),IF.forEach(o),CF.forEach(o),TA=a(W2," to use only a subset of your GPUs, you simply specify the number of GPUs to use. For example, if you have 4 GPUs, but you wish to use the first 2 you can do:"),W2.forEach(o),s1=d(t),u(As.$$.fragment,t),i1=d(t),Dt=s(t,"P",{});var Yc=i(Dt);EA=a(Yc,"if you have either "),Ps=s(Yc,"A",{href:!0,rel:!0});var UF=i(Ps);cf=s(UF,"CODE",{});var NF=i(cf);$A=a(NF,"accelerate"),NF.forEach(o),UF.forEach(o),xA=a(Yc," or "),Ds=s(Yc,"A",{href:!0,rel:!0});var zF=i(Ds);pf=s(zF,"CODE",{});var FF=i(pf);kA=a(FF,"deepspeed"),FF.forEach(o),zF.forEach(o),AA=a(Yc," installed you can also accomplish the same by using one of:"),Yc.forEach(o),l1=d(t),u(Ss.$$.fragment,t),d1=d(t),u(qs.$$.fragment,t),c1=d(t),Gr=s(t,"P",{});var G2=i(Gr);PA=a(G2,"You don\u2019t need to use the Accelerate or "),Zl=s(G2,"A",{href:!0});var LF=i(Zl);DA=a(LF,"the Deepspeed integration"),LF.forEach(o),SA=a(G2," features to use these launchers."),G2.forEach(o),p1=d(t),Kl=s(t,"P",{});var RF=i(Kl);qA=a(RF,"Until now you were able to tell the program how many GPUs to use. Now let\u2019s discuss how to select specific GPUs and control their order."),RF.forEach(o),m1=d(t),Jl=s(t,"P",{});var WF=i(Jl);OA=a(WF,"The following environment variables help you control which GPUs to use and their order."),WF.forEach(o),h1=d(t),Xl=s(t,"P",{});var GF=i(Xl);mf=s(GF,"STRONG",{});var MF=i(mf);hf=s(MF,"CODE",{});var jF=i(hf);CA=a(jF,"CUDA_VISIBLE_DEVICES"),jF.forEach(o),MF.forEach(o),GF.forEach(o),u1=d(t),Mr=s(t,"P",{});var M2=i(Mr);IA=a(M2,"If you have multiple GPUs and you\u2019d like to use only 1 or a few of those GPUs, set the environment variable "),uf=s(M2,"CODE",{});var HF=i(uf);UA=a(HF,"CUDA_VISIBLE_DEVICES"),HF.forEach(o),NA=a(M2," to a list of the GPUs to be used."),M2.forEach(o),f1=d(t),Ql=s(t,"P",{});var BF=i(Ql);zA=a(BF,"For example, let\u2019s say you have 4 GPUs: 0, 1, 2 and 3. To run only on the physical GPUs 0 and 2, you can do:"),BF.forEach(o),g1=d(t),u(Os.$$.fragment,t),_1=d(t),St=s(t,"P",{});var Zc=i(St);FA=a(Zc,"So now pytorch will see only 2 GPUs, where your physical GPUs 0 and 2 are mapped to "),ff=s(Zc,"CODE",{});var VF=i(ff);LA=a(VF,"cuda:0"),VF.forEach(o),RA=a(Zc," and "),gf=s(Zc,"CODE",{});var YF=i(gf);WA=a(YF,"cuda:1"),YF.forEach(o),GA=a(Zc," correspondingly."),Zc.forEach(o),v1=d(t),ed=s(t,"P",{});var ZF=i(ed);MA=a(ZF,"You can even change their order:"),ZF.forEach(o),b1=d(t),u(Cs.$$.fragment,t),y1=d(t),qt=s(t,"P",{});var Kc=i(qt);jA=a(Kc,"Here your physical GPUs 0 and 2 are mapped to "),_f=s(Kc,"CODE",{});var KF=i(_f);HA=a(KF,"cuda:1"),KF.forEach(o),BA=a(Kc," and "),vf=s(Kc,"CODE",{});var JF=i(vf);VA=a(JF,"cuda:0"),JF.forEach(o),YA=a(Kc," correspondingly."),Kc.forEach(o),w1=d(t),Ot=s(t,"P",{});var Jc=i(Ot);ZA=a(Jc,"The above examples were all for "),bf=s(Jc,"CODE",{});var XF=i(bf);KA=a(XF,"DistributedDataParallel"),XF.forEach(o),JA=a(Jc," use pattern, but the same method works for "),Is=s(Jc,"A",{href:!0,rel:!0});var QF=i(Is);yf=s(QF,"CODE",{});var eL=i(yf);XA=a(eL,"DataParallel"),eL.forEach(o),QF.forEach(o),QA=a(Jc," as well:"),Jc.forEach(o),T1=d(t),u(Us.$$.fragment,t),E1=d(t),td=s(t,"P",{});var tL=i(td);e8=a(tL,"To emulate an environment without GPUs simply set this environment variable to an empty value like so:"),tL.forEach(o),$1=d(t),u(Ns.$$.fragment,t),x1=d(t),od=s(t,"P",{});var oL=i(od);t8=a(oL,"As with any environment variable you can, of course, export those instead of adding these to the command line, as in:"),oL.forEach(o),k1=d(t),u(zs.$$.fragment,t),A1=d(t),rd=s(t,"P",{});var rL=i(rd);o8=a(rL,"but this approach can be confusing since you may forget you set up the environment variable earlier and not understand why the wrong GPUs are used. Therefore, it\u2019s a common practice to set the environment variable just for a specific run on the same command line as it\u2019s shown in most examples of this section."),rL.forEach(o),P1=d(t),ad=s(t,"P",{});var aL=i(ad);wf=s(aL,"STRONG",{});var nL=i(wf);Tf=s(nL,"CODE",{});var sL=i(Tf);r8=a(sL,"CUDA_DEVICE_ORDER"),sL.forEach(o),nL.forEach(o),aL.forEach(o),D1=d(t),jr=s(t,"P",{});var j2=i(jr);a8=a(j2,"There is an additional environment variable "),Ef=s(j2,"CODE",{});var iL=i(Ef);n8=a(iL,"CUDA_DEVICE_ORDER"),iL.forEach(o),s8=a(j2," that controls how the physical devices are ordered. The two choices are:"),j2.forEach(o),S1=d(t),nd=s(t,"OL",{});var lL=i(nd);Fs=s(lL,"LI",{});var H2=i(Fs);i8=a(H2,"ordered by PCIe bus IDs (matches "),$f=s(H2,"CODE",{});var dL=i($f);l8=a(dL,"nvidia-smi"),dL.forEach(o),d8=a(H2,"\u2019s order) - this is the default."),H2.forEach(o),lL.forEach(o),q1=d(t),u(Ls.$$.fragment,t),O1=d(t),Rs=s(t,"OL",{start:!0});var cL=i(Rs);xf=s(cL,"LI",{});var pL=i(xf);c8=a(pL,"ordered by GPU compute capabilities"),pL.forEach(o),cL.forEach(o),C1=d(t),u(Ws.$$.fragment,t),I1=d(t),Ct=s(t,"P",{});var Xc=i(Ct);p8=a(Xc,"Most of the time you don\u2019t need to care about this environment variable, but it\u2019s very helpful if you have a lopsided setup where you have an old and a new GPUs physically inserted in such a way so that the slow older card appears to be first. One way to fix that is to swap the cards. But if you can\u2019t swap the cards (e.g., if the cooling of the devices gets impacted) then setting "),kf=s(Xc,"CODE",{});var mL=i(kf);m8=a(mL,"CUDA_DEVICE_ORDER=FASTEST_FIRST"),mL.forEach(o),h8=a(Xc," will always put the newer faster card first. It\u2019ll be somewhat confusing though since "),Af=s(Xc,"CODE",{});var hL=i(Af);u8=a(hL,"nvidia-smi"),hL.forEach(o),f8=a(Xc," will still report them in the PCIe order."),Xc.forEach(o),U1=d(t),sd=s(t,"P",{});var uL=i(sd);g8=a(uL,"The other solution to swapping the order is to use:"),uL.forEach(o),N1=d(t),u(Gs.$$.fragment,t),z1=d(t),id=s(t,"P",{});var fL=i(id);_8=a(fL,"In this example we are working with just 2 GPUs, but of course the same would apply to as many GPUs as your computer has."),fL.forEach(o),F1=d(t),Hr=s(t,"P",{});var B2=i(Hr);v8=a(B2,"Also if you do set this environment variable it\u2019s the best to set it in your "),Pf=s(B2,"CODE",{});var gL=i(Pf);b8=a(gL,"~/.bashrc"),gL.forEach(o),y8=a(B2," file or some other startup config file and forget about it."),B2.forEach(o),L1=d(t),Oo=s(t,"H2",{class:!0});var V2=i(Oo);Br=s(V2,"A",{id:!0,class:!0,href:!0});var _L=i(Br);Df=s(_L,"SPAN",{});var vL=i(Df);u(Ms.$$.fragment,vL),vL.forEach(o),_L.forEach(o),w8=d(V2),Sf=s(V2,"SPAN",{});var bL=i(Sf);T8=a(bL,"Trainer Integrations"),bL.forEach(o),V2.forEach(o),R1=d(t),Vr=s(t,"P",{});var Y2=i(Vr);E8=a(Y2,"The "),ld=s(Y2,"A",{href:!0});var yL=i(ld);$8=a(yL,"Trainer"),yL.forEach(o),x8=a(Y2,` has been extended to support libraries that may dramatically improve your training time and fit much bigger models.`),Y2.forEach(o),W1=d(t),me=s(t,"P",{});var Xt=i(me);k8=a(Xt,"Currently it supports third party solutions, "),js=s(Xt,"A",{href:!0,rel:!0});var wL=i(js);A8=a(wL,"DeepSpeed"),wL.forEach(o),P8=a(Xt,", "),Hs=s(Xt,"A",{href:!0,rel:!0});var TL=i(Hs);D8=a(TL,"PyTorch FSDP"),TL.forEach(o),S8=a(Xt," and "),Bs=s(Xt,"A",{href:!0,rel:!0});var EL=i(Bs);q8=a(EL,"FairScale"),EL.forEach(o),O8=a(Xt,", which implement parts of the paper "),Vs=s(Xt,"A",{href:!0,rel:!0});var $L=i(Vs);C8=a($L,`ZeRO: Memory Optimizations Toward Training Trillion Parameter Models, by Samyam Rajbhandari, Jeff Rasley, Olatunji Ruwase, Yuxiong He`),$L.forEach(o),I8=a(Xt,"."),Xt.forEach(o),G1=d(t),Yr=s(t,"P",{});var Z2=i(Yr);U8=a(Z2,"This provided support is new and experimental as of this writing. While the support for DeepSpeed and PyTorch FSDP is active and we welcome issues around it, we don\u2019t support the FairScale integration anymore since it has been integrated in PyTorch main (see the "),dd=s(Z2,"A",{href:!0});var xL=i(dd);N8=a(xL,"PyTorch FSDP integration"),xL.forEach(o),z8=a(Z2,")"),Z2.forEach(o),M1=d(t),cd=s(t,"A",{id:!0}),i(cd).forEach(o),j1=d(t),Co=s(t,"H3",{class:!0});var K2=i(Co);Zr=s(K2,"A",{id:!0,class:!0,href:!0});var kL=i(Zr);qf=s(kL,"SPAN",{});var AL=i(qf);u(Ys.$$.fragment,AL),AL.forEach(o),kL.forEach(o),F8=d(K2),Of=s(K2,"SPAN",{});var PL=i(Of);L8=a(PL,"CUDA Extension Installation Notes"),PL.forEach(o),K2.forEach(o),H1=d(t),pd=s(t,"P",{});var DL=i(pd);R8=a(DL,"As of this writing, both FairScale and Deepspeed require compilation of CUDA C++ code, before they can be used."),DL.forEach(o),B1=d(t),It=s(t,"P",{});var Qc=i(It);W8=a(Qc,"While all installation issues should be dealt with through the corresponding GitHub Issues of "),Zs=s(Qc,"A",{href:!0,rel:!0});var SL=i(Zs);G8=a(SL,"FairScale"),SL.forEach(o),M8=a(Qc," and "),Ks=s(Qc,"A",{href:!0,rel:!0});var qL=i(Ks);j8=a(qL,"Deepspeed"),qL.forEach(o),H8=a(Qc,`, there are a few common issues that one may encounter while building any PyTorch extension that needs to build CUDA extensions.`),Qc.forEach(o),V1=d(t),md=s(t,"P",{});var OL=i(md);B8=a(OL,"Therefore, if you encounter a CUDA-related build issue while doing one of the following or both:"),OL.forEach(o),Y1=d(t),u(Js.$$.fragment,t),Z1=d(t),hd=s(t,"P",{});var CL=i(hd);V8=a(CL,"please, read the following notes first."),CL.forEach(o),K1=d(t),Ut=s(t,"P",{});var ep=i(Ut);Y8=a(ep,"In these notes we give examples for what to do when "),Cf=s(ep,"CODE",{});var IL=i(Cf);Z8=a(IL,"pytorch"),IL.forEach(o),K8=a(ep," has been built with CUDA "),If=s(ep,"CODE",{});var UL=i(If);J8=a(UL,"10.2"),UL.forEach(o),X8=a(ep,`. If your situation is different remember to adjust the version number to the one you are after.`),ep.forEach(o),J1=d(t),Io=s(t,"H4",{class:!0});var J2=i(Io);Kr=s(J2,"A",{id:!0,class:!0,href:!0});var NL=i(Kr);Uf=s(NL,"SPAN",{});var zL=i(Uf);u(Xs.$$.fragment,zL),zL.forEach(o),NL.forEach(o),Q8=d(J2),Nf=s(J2,"SPAN",{});var FL=i(Nf);eP=a(FL,"Possible problem #1"),FL.forEach(o),J2.forEach(o),X1=d(t),ud=s(t,"P",{});var LL=i(ud);tP=a(LL,`While, Pytorch comes with its own CUDA toolkit, to build these two projects you must have an identical version of CUDA installed system-wide.`),LL.forEach(o),Q1=d(t),Fe=s(t,"P",{});var ka=i(Fe);oP=a(ka,"For example, if you installed "),zf=s(ka,"CODE",{});var RL=i(zf);rP=a(RL,"pytorch"),RL.forEach(o),aP=a(ka," with "),Ff=s(ka,"CODE",{});var WL=i(Ff);nP=a(WL,"cudatoolkit==10.2"),WL.forEach(o),sP=a(ka,` in the Python environment, you also need to have CUDA `),Lf=s(ka,"CODE",{});var GL=i(Lf);iP=a(GL,"10.2"),GL.forEach(o),lP=a(ka," installed system-wide."),ka.forEach(o),eb=d(t),Nt=s(t,"P",{});var tp=i(Nt);dP=a(tp,"The exact location may vary from system to system, but "),Rf=s(tp,"CODE",{});var ML=i(Rf);cP=a(ML,"/usr/local/cuda-10.2"),ML.forEach(o),pP=a(tp,` is the most common location on many Unix systems. When CUDA is correctly set up and added to the `),Wf=s(tp,"CODE",{});var jL=i(Wf);mP=a(jL,"PATH"),jL.forEach(o),hP=a(tp,` environment variable, one can find the installation location by doing:`),tp.forEach(o),tb=d(t),u(Qs.$$.fragment,t),ob=d(t),Jr=s(t,"P",{});var X2=i(Jr);uP=a(X2,`If you don\u2019t have CUDA installed system-wide, install it first. You will find the instructions by using your favorite search engine. For example, if you\u2019re on Ubuntu you may want to search for: `),ei=s(X2,"A",{href:!0,rel:!0});var HL=i(ei);fP=a(HL,"ubuntu cuda 10.2 install"),HL.forEach(o),gP=a(X2,"."),X2.forEach(o),rb=d(t),Uo=s(t,"H4",{class:!0});var Q2=i(Uo);Xr=s(Q2,"A",{id:!0,class:!0,href:!0});var BL=i(Xr);Gf=s(BL,"SPAN",{});var VL=i(Gf);u(ti.$$.fragment,VL),VL.forEach(o),BL.forEach(o),_P=d(Q2),Mf=s(Q2,"SPAN",{});var YL=i(Mf);vP=a(YL,"Possible problem #2"),YL.forEach(o),Q2.forEach(o),ab=d(t),fd=s(t,"P",{});var ZL=i(fd);bP=a(ZL,`Another possible common problem is that you may have more than one CUDA toolkit installed system-wide. For example you may have:`),ZL.forEach(o),nb=d(t),u(oi.$$.fragment,t),sb=d(t),zt=s(t,"P",{});var op=i(zt);yP=a(op,"Now, in this situation you need to make sure that your "),jf=s(op,"CODE",{});var KL=i(jf);wP=a(KL,"PATH"),KL.forEach(o),TP=a(op," and "),Hf=s(op,"CODE",{});var JL=i(Hf);EP=a(JL,"LD_LIBRARY_PATH"),JL.forEach(o),$P=a(op,` environment variables contain the correct paths to the desired CUDA version. Typically, package installers will set these to contain whatever the last version was installed. If you encounter the problem, where the package build fails because it can\u2019t find the right CUDA version despite you having it installed system-wide, it means that you need to adjust the 2 aforementioned environment variables.`),op.forEach(o),ib=d(t),gd=s(t,"P",{});var XL=i(gd);xP=a(XL,"First, you may look at their contents:"),XL.forEach(o),lb=d(t),u(ri.$$.fragment,t),db=d(t),_d=s(t,"P",{});var QL=i(_d);kP=a(QL,"so you get an idea of what is inside."),QL.forEach(o),cb=d(t),Qr=s(t,"P",{});var ew=i(Qr);AP=a(ew,"It\u2019s possible that "),Bf=s(ew,"CODE",{});var eR=i(Bf);PP=a(eR,"LD_LIBRARY_PATH"),eR.forEach(o),DP=a(ew," is empty."),ew.forEach(o),pb=d(t),rt=s(t,"P",{});var Qi=i(rt);Vf=s(Qi,"CODE",{});var tR=i(Vf);SP=a(tR,"PATH"),tR.forEach(o),qP=a(Qi," lists the locations of where executables can be found and "),Yf=s(Qi,"CODE",{});var oR=i(Yf);OP=a(oR,"LD_LIBRARY_PATH"),oR.forEach(o),CP=a(Qi,` is for where shared libraries are to looked for. In both cases, earlier entries have priority over the later ones. `),Zf=s(Qi,"CODE",{});var rR=i(Zf);IP=a(rR,":"),rR.forEach(o),UP=a(Qi,` is used to separate multiple entries.`),Qi.forEach(o),mb=d(t),vd=s(t,"P",{});var aR=i(vd);NP=a(aR,`Now, to tell the build program where to find the specific CUDA toolkit, insert the desired paths to be listed first by doing:`),aR.forEach(o),hb=d(t),u(ai.$$.fragment,t),ub=d(t),bd=s(t,"P",{});var nR=i(bd);zP=a(nR,"Note that we aren\u2019t overwriting the existing values, but prepending instead."),nR.forEach(o),fb=d(t),Le=s(t,"P",{});var Aa=i(Le);FP=a(Aa,`Of course, adjust the version number, the full path if need be. Check that the directories you assign actually do exist. `),Kf=s(Aa,"CODE",{});var sR=i(Kf);LP=a(sR,"lib64"),sR.forEach(o),RP=a(Aa," sub-directory is where the various CUDA "),Jf=s(Aa,"CODE",{});var iR=i(Jf);WP=a(iR,".so"),iR.forEach(o),GP=a(Aa," objects, like "),Xf=s(Aa,"CODE",{});var lR=i(Xf);MP=a(lR,"libcudart.so"),lR.forEach(o),jP=a(Aa,` reside, it\u2019s unlikely that your system will have it named differently, but if it is adjust it to reflect your reality.`),Aa.forEach(o),gb=d(t),No=s(t,"H4",{class:!0});var tw=i(No);ea=s(tw,"A",{id:!0,class:!0,href:!0});var dR=i(ea);Qf=s(dR,"SPAN",{});var cR=i(Qf);u(ni.$$.fragment,cR),cR.forEach(o),dR.forEach(o),HP=d(tw),eg=s(tw,"SPAN",{});var pR=i(eg);BP=a(pR,"Possible problem #3"),pR.forEach(o),tw.forEach(o),_b=d(t),Ft=s(t,"P",{});var rp=i(Ft);VP=a(rp,"Some older CUDA versions may refuse to build with newer compilers. For example, you my have "),tg=s(rp,"CODE",{});var mR=i(tg);YP=a(mR,"gcc-9"),mR.forEach(o),ZP=a(rp,` but it wants `),og=s(rp,"CODE",{});var hR=i(og);KP=a(hR,"gcc-7"),hR.forEach(o),JP=a(rp,"."),rp.forEach(o),vb=d(t),yd=s(t,"P",{});var uR=i(yd);XP=a(uR,"There are various ways to go about it."),uR.forEach(o),bb=d(t),wd=s(t,"P",{});var fR=i(wd);QP=a(fR,"If you can install the latest CUDA toolkit it typically should support the newer compiler."),fR.forEach(o),yb=d(t),ta=s(t,"P",{});var ow=i(ta);eD=a(ow,`Alternatively, you could install the lower version of the compiler in addition to the one you already have, or you may already have it but it\u2019s not the default one, so the build system can\u2019t see it. If you have `),rg=s(ow,"CODE",{});var gR=i(rg);tD=a(gR,"gcc-7"),gR.forEach(o),oD=a(ow,` installed but the build system complains it can\u2019t find it, the following might do the trick:`),ow.forEach(o),wb=d(t),u(si.$$.fragment,t),Tb=d(t),M=s(t,"P",{});var ue=i(M);rD=a(ue,"Here, we are making a symlink to "),ag=s(ue,"CODE",{});var _R=i(ag);aD=a(_R,"gcc-7"),_R.forEach(o),nD=a(ue," from "),ng=s(ue,"CODE",{});var vR=i(ng);sD=a(vR,"/usr/local/cuda-10.2/bin/gcc"),vR.forEach(o),iD=a(ue,` and since `),sg=s(ue,"CODE",{});var bR=i(sg);lD=a(bR,"/usr/local/cuda-10.2/bin/"),bR.forEach(o),dD=a(ue," should be in the "),ig=s(ue,"CODE",{});var yR=i(ig);cD=a(yR,"PATH"),yR.forEach(o),pD=a(ue,` environment variable (see the previous problem\u2019s solution), it should find `),lg=s(ue,"CODE",{});var wR=i(lg);mD=a(wR,"gcc-7"),wR.forEach(o),hD=a(ue," (and "),dg=s(ue,"CODE",{});var TR=i(dg);uD=a(TR,"g++7"),TR.forEach(o),fD=a(ue,") and then the build will succeed."),ue.forEach(o),Eb=d(t),Td=s(t,"P",{});var ER=i(Td);gD=a(ER,"As always make sure to edit the paths in the example to match your situation."),ER.forEach(o),$b=d(t),zo=s(t,"H3",{class:!0});var rw=i(zo);oa=s(rw,"A",{id:!0,class:!0,href:!0});var $R=i(oa);cg=s($R,"SPAN",{});var xR=i(cg);u(ii.$$.fragment,xR),xR.forEach(o),$R.forEach(o),_D=d(rw),pg=s(rw,"SPAN",{});var kR=i(pg);vD=a(kR,"FairScale"),kR.forEach(o),rw.forEach(o),xb=d(t),u(ra.$$.fragment,t),kb=d(t),Re=s(t,"P",{});var Pa=i(Re);bD=a(Pa,"By integrating "),li=s(Pa,"A",{href:!0,rel:!0});var AR=i(li);yD=a(AR,"FairScale"),AR.forEach(o),wD=a(Pa," the "),Ed=s(Pa,"A",{href:!0});var PR=i(Ed);TD=a(PR,"Trainer"),PR.forEach(o),ED=a(Pa,` provides support for the following features from `),di=s(Pa,"A",{href:!0,rel:!0});var DR=i(di);$D=a(DR,"the ZeRO paper"),DR.forEach(o),xD=a(Pa,":"),Pa.forEach(o),Ab=d(t),We=s(t,"OL",{});var Da=i(We);mg=s(Da,"LI",{});var SR=i(mg);kD=a(SR,"Optimizer State Sharding"),SR.forEach(o),AD=d(Da),hg=s(Da,"LI",{});var qR=i(hg);PD=a(qR,"Gradient Sharding"),qR.forEach(o),DD=d(Da),ug=s(Da,"LI",{});var OR=i(ug);SD=a(OR,"Model Parameters Sharding (new and very experimental)"),OR.forEach(o),qD=d(Da),fg=s(Da,"LI",{});var CR=i(fg);OD=a(CR,"CPU offload (new and very experimental)"),CR.forEach(o),Da.forEach(o),Pb=d(t),$d=s(t,"P",{});var IR=i($d);CD=a(IR,"You will need at least two GPUs to use this feature."),IR.forEach(o),Db=d(t),ci=s(t,"P",{});var HC=i(ci);gg=s(HC,"STRONG",{});var UR=i(gg);ID=a(UR,"Installation"),UR.forEach(o),UD=a(HC,":"),HC.forEach(o),Sb=d(t),xd=s(t,"P",{});var NR=i(xd);ND=a(NR,"Install the library via pypi:"),NR.forEach(o),qb=d(t),u(pi.$$.fragment,t),Ob=d(t),Lt=s(t,"P",{});var ap=i(Lt);zD=a(ap,"or via "),_g=s(ap,"CODE",{});var zR=i(_g);FD=a(zR,"transformers"),zR.forEach(o),LD=a(ap,"\u2019 "),vg=s(ap,"CODE",{});var FR=i(vg);RD=a(FR,"extras"),FR.forEach(o),WD=a(ap,":"),ap.forEach(o),Cb=d(t),u(mi.$$.fragment,t),Ib=d(t),Rt=s(t,"P",{});var np=i(Rt);GD=a(np,"(available starting from "),bg=s(np,"CODE",{});var LR=i(bg);MD=a(LR,"transformers==4.6.0"),LR.forEach(o),jD=a(np,") or find more details on "),hi=s(np,"A",{href:!0,rel:!0});var RR=i(hi);HD=a(RR,"the FairScale\u2019s GitHub page"),RR.forEach(o),BD=a(np,"."),np.forEach(o),Ub=d(t),aa=s(t,"P",{});var aw=i(aa);VD=a(aw,"If you\u2019re still struggling with the build, first make sure to read "),kd=s(aw,"A",{href:!0});var WR=i(kd);YD=a(WR,"CUDA Extension Installation Notes"),WR.forEach(o),ZD=a(aw,"."),aw.forEach(o),Nb=d(t),Ad=s(t,"P",{});var GR=i(Ad);KD=a(GR,"If it\u2019s still not resolved the build issue, here are a few more ideas."),GR.forEach(o),zb=d(t),ui=s(t,"P",{});var BC=i(ui);yg=s(BC,"CODE",{});var MR=i(yg);JD=a(MR,"fairscale"),MR.forEach(o),XD=a(BC,` seems to have an issue with the recently introduced by pip build isolation feature. If you have a problem with it, you may want to try one of:`),BC.forEach(o),Fb=d(t),u(fi.$$.fragment,t),Lb=d(t),Pd=s(t,"P",{});var jR=i(Pd);QD=a(jR,"or:"),jR.forEach(o),Rb=d(t),u(gi.$$.fragment,t),Wb=d(t),_i=s(t,"P",{});var VC=i(_i);wg=s(VC,"CODE",{});var HR=i(wg);eS=a(HR,"fairscale"),HR.forEach(o),tS=a(VC," also has issues with building against pytorch-nightly, so if you use it you may have to try one of:"),VC.forEach(o),Gb=d(t),u(vi.$$.fragment,t),Mb=d(t),Dd=s(t,"P",{});var BR=i(Dd);oS=a(BR,"or:"),BR.forEach(o),jb=d(t),u(bi.$$.fragment,t),Hb=d(t),Sd=s(t,"P",{});var VR=i(Sd);rS=a(VR,"Of course, adjust the urls to match the cuda version you use."),VR.forEach(o),Bb=d(t),na=s(t,"P",{});var nw=i(na);aS=a(nw,`If after trying everything suggested you still encounter build issues, please, proceed with the GitHub Issue of `),yi=s(nw,"A",{href:!0,rel:!0});var YR=i(yi);nS=a(YR,"FairScale"),YR.forEach(o),sS=a(nw,"."),nw.forEach(o),Vb=d(t),wi=s(t,"P",{});var YC=i(wi);Tg=s(YC,"STRONG",{});var ZR=i(Tg);iS=a(ZR,"Usage"),ZR.forEach(o),lS=a(YC,":"),YC.forEach(o),Yb=d(t),Wt=s(t,"P",{});var sp=i(Wt);dS=a(sp,"To use the first version of Sharded data-parallelism, add "),Eg=s(sp,"CODE",{});var KR=i(Eg);cS=a(KR,"--sharded_ddp simple"),KR.forEach(o),pS=a(sp,` to the command line arguments, and make sure you have added the distributed launcher `),$g=s(sp,"CODE",{});var JR=i($g);mS=a(JR,"-m torch.distributed.launch --nproc_per_node=NUMBER_OF_GPUS_YOU_HAVE"),JR.forEach(o),hS=a(sp," if you haven\u2019t been using it already."),sp.forEach(o),Zb=d(t),sa=s(t,"P",{});var sw=i(sa);uS=a(sw,"For example here is how you could use it for "),xg=s(sw,"CODE",{});var XR=i(xg);fS=a(XR,"run_translation.py"),XR.forEach(o),gS=a(sw," with 2 GPUs:"),sw.forEach(o),Kb=d(t),u(Ti.$$.fragment,t),Jb=d(t),qd=s(t,"P",{});var QR=i(qd);_S=a(QR,"Notes:"),QR.forEach(o),Xb=d(t),Ge=s(t,"UL",{});var Sa=i(Ge);kg=s(Sa,"LI",{});var eW=i(kg);vS=a(eW,"This feature requires distributed training (so multiple GPUs)."),eW.forEach(o),bS=d(Sa),Ag=s(Sa,"LI",{});var tW=i(Ag);yS=a(tW,"It is not implemented for TPUs."),tW.forEach(o),wS=d(Sa),Ei=s(Sa,"LI",{});var iw=i(Ei);TS=a(iw,"It works with "),Pg=s(iw,"CODE",{});var oW=i(Pg);ES=a(oW,"--fp16"),oW.forEach(o),$S=a(iw," too, to make things even faster."),iw.forEach(o),xS=d(Sa),$i=s(Sa,"LI",{});var lw=i($i);kS=a(lw,"One of the main benefits of enabling "),Dg=s(lw,"CODE",{});var rW=i(Dg);AS=a(rW,"--sharded_ddp simple"),rW.forEach(o),PS=a(lw,` is that it uses a lot less GPU memory, so you should be able to use significantly larger batch sizes using the same hardware (e.g. 3x and even bigger) which should lead to significantly shorter training time.`),lw.forEach(o),Sa.forEach(o),Qb=d(t),xi=s(t,"OL",{start:!0});var aW=i(xi);at=s(aW,"LI",{});var qa=i(at);DS=a(qa,"To use the second version of Sharded data-parallelism, add "),Sg=s(qa,"CODE",{});var nW=i(Sg);SS=a(nW,"--sharded_ddp zero_dp_2"),nW.forEach(o),qS=a(qa," or "),qg=s(qa,"CODE",{});var sW=i(qg);OS=a(sW,"--sharded_ddp zero_dp_3"),sW.forEach(o),CS=a(qa," to the command line arguments, and make sure you have added the distributed launcher "),Og=s(qa,"CODE",{});var iW=i(Og);IS=a(iW,"-m torch.distributed.launch --nproc_per_node=NUMBER_OF_GPUS_YOU_HAVE"),iW.forEach(o),US=a(qa," if you haven\u2019t been using it already."),qa.forEach(o),aW.forEach(o),ey=d(t),ia=s(t,"P",{});var dw=i(ia);NS=a(dw,"For example here is how you could use it for "),Cg=s(dw,"CODE",{});var lW=i(Cg);zS=a(lW,"run_translation.py"),lW.forEach(o),FS=a(dw," with 2 GPUs:"),dw.forEach(o),ty=d(t),u(ki.$$.fragment,t),oy=d(t),Fo=s(t,"P",{});var _v=i(Fo);Ig=s(_v,"CODE",{});var dW=i(Ig);LS=a(dW,"zero_dp_2"),dW.forEach(o),RS=a(_v," is an optimized version of the simple wrapper, while "),Ug=s(_v,"CODE",{});var cW=i(Ug);WS=a(cW,"zero_dp_3"),cW.forEach(o),GS=a(_v,` fully shards model weights, gradients and optimizer states.`),_v.forEach(o),ry=d(t),Gt=s(t,"P",{});var ip=i(Gt);MS=a(ip,"Both are compatible with adding "),Ng=s(ip,"CODE",{});var pW=i(Ng);jS=a(pW,"cpu_offload"),pW.forEach(o),HS=a(ip," to enable ZeRO-offload (activate it like this: "),zg=s(ip,"CODE",{});var mW=i(zg);BS=a(mW,'--sharded_ddp "zero_dp_2 cpu_offload"'),mW.forEach(o),VS=a(ip,")."),ip.forEach(o),ay=d(t),Od=s(t,"P",{});var hW=i(Od);YS=a(hW,"Notes:"),hW.forEach(o),ny=d(t),he=s(t,"UL",{});var Qt=i(he);Fg=s(Qt,"LI",{});var uW=i(Fg);ZS=a(uW,"This feature requires distributed training (so multiple GPUs)."),uW.forEach(o),KS=d(Qt),Lg=s(Qt,"LI",{});var fW=i(Lg);JS=a(fW,"It is not implemented for TPUs."),fW.forEach(o),XS=d(Qt),Ai=s(Qt,"LI",{});var cw=i(Ai);QS=a(cw,"It works with "),Rg=s(cw,"CODE",{});var gW=i(Rg);eq=a(gW,"--fp16"),gW.forEach(o),tq=a(cw," too, to make things even faster."),cw.forEach(o),oq=d(Qt),Lo=s(Qt,"LI",{});var lp=i(Lo);rq=a(lp,"The "),Wg=s(lp,"CODE",{});var _W=i(Wg);aq=a(_W,"cpu_offload"),_W.forEach(o),nq=a(lp," additional option requires "),Gg=s(lp,"CODE",{});var vW=i(Gg);sq=a(vW,"--fp16"),vW.forEach(o),iq=a(lp,"."),lp.forEach(o),lq=d(Qt),Mg=s(Qt,"LI",{});var bW=i(Mg);dq=a(bW,`This is an area of active development, so make sure you have a source install of fairscale to use this feature as some bugs you encounter may have been fixed there already.`),bW.forEach(o),Qt.forEach(o),sy=d(t),Cd=s(t,"P",{});var yW=i(Cd);cq=a(yW,"Known caveats:"),yW.forEach(o),iy=d(t),la=s(t,"UL",{});var pw=i(la);Ro=s(pw,"LI",{});var dp=i(Ro);pq=a(dp,"This feature is incompatible with "),jg=s(dp,"CODE",{});var wW=i(jg);mq=a(wW,"--predict_with_generate"),wW.forEach(o),hq=a(dp," in the "),Hg=s(dp,"EM",{});var TW=i(Hg);uq=a(TW,"run_translation.py"),TW.forEach(o),fq=a(dp," script."),dp.forEach(o),gq=d(pw),$e=s(pw,"LI",{});var eo=i($e);_q=a(eo,"Using "),Bg=s(eo,"CODE",{});var EW=i(Bg);vq=a(EW,"--sharded_ddp zero_dp_3"),EW.forEach(o),bq=a(eo,` requires wrapping each layer of the model in the special container `),Vg=s(eo,"CODE",{});var $W=i(Vg);yq=a($W,"FullyShardedDataParallelism"),$W.forEach(o),wq=a(eo," of fairscale. It should be used with the option "),Yg=s(eo,"CODE",{});var xW=i(Yg);Tq=a(xW,"auto_wrap"),xW.forEach(o),Eq=a(eo,` if you are not doing this yourself: `),Zg=s(eo,"CODE",{});var kW=i(Zg);$q=a(kW,'--sharded_ddp "zero_dp_3 auto_wrap"'),kW.forEach(o),xq=a(eo,"."),eo.forEach(o),pw.forEach(o),ly=d(t),Wo=s(t,"H3",{class:!0});var mw=i(Wo);da=s(mw,"A",{id:!0,class:!0,href:!0});var AW=i(da);Kg=s(AW,"SPAN",{});var PW=i(Kg);u(Pi.$$.fragment,PW),PW.forEach(o),AW.forEach(o),kq=d(mw),Jg=s(mw,"SPAN",{});var DW=i(Jg);Aq=a(DW,"PyTorch Fully Sharded Data parallel"),DW.forEach(o),mw.forEach(o),dy=d(t),ca=s(t,"P",{});var hw=i(ca);Pq=a(hw,`To accelerate training huge models on larger batch sizes, we can use a fully sharded data parallel model. This type of data parallel paradigm enables fitting more data and larger models by sharding the optimizer states, gradients and parameters. To read more about it and the benefits, check out the `),Di=s(hw,"A",{href:!0,rel:!0});var SW=i(Di);Dq=a(SW,"Fully Sharded Data Parallel blog"),SW.forEach(o),Sq=a(hw,`. We have integrated the latest PyTorch\u2019s Fully Sharded Data Parallel (FSDP) training feature. All you need to do is enable it through the config.`),hw.forEach(o),cy=d(t),Si=s(t,"P",{});var ZC=i(Si);Xg=s(ZC,"STRONG",{});var qW=i(Xg);qq=a(qW,"Required PyTorch version for FSDP support"),qW.forEach(o),Oq=a(ZC,`: PyTorch Nightly (or 1.12.0 if you read this after it has been released) as the model saving with FSDP activated is only available with recent fixes.`),ZC.forEach(o),py=d(t),qi=s(t,"P",{});var KC=i(qi);Qg=s(KC,"STRONG",{});var OW=i(Qg);Cq=a(OW,"Usage"),OW.forEach(o),Iq=a(KC,":"),KC.forEach(o),my=d(t),te=s(t,"UL",{});var Ye=i(te);e_=s(Ye,"LI",{});var CW=i(e_);Oi=s(CW,"P",{});var uw=i(Oi);Uq=a(uw,`Make sure you have added the distributed launcher `),t_=s(uw,"CODE",{});var IW=i(t_);Nq=a(IW,"-m torch.distributed.launch --nproc_per_node=NUMBER_OF_GPUS_YOU_HAVE"),IW.forEach(o),zq=a(uw," if you haven\u2019t been using it already."),uw.forEach(o),CW.forEach(o),Fq=d(Ye),Ci=s(Ye,"LI",{});var fw=i(Ci);Id=s(fw,"P",{});var JC=i(Id);o_=s(JC,"STRONG",{});var UW=i(o_);Lq=a(UW,"Sharding Strategy"),UW.forEach(o),Rq=a(JC,":"),JC.forEach(o),Wq=d(fw),Go=s(fw,"UL",{});var cp=i(Go);Ii=s(cp,"LI",{});var gw=i(Ii);Gq=a(gw,`FULL_SHARD : Shards optimizer states + gradients + model parameters across data parallel workers/GPUs. For this, add `),r_=s(gw,"CODE",{});var NW=i(r_);Mq=a(NW,"--fsdp full_shard"),NW.forEach(o),jq=a(gw," to the command line arguments."),gw.forEach(o),Hq=d(cp),Ui=s(cp,"LI",{});var _w=i(Ui);Bq=a(_w,`SHARD_GRAD_OP : Shards optimizer states + gradients across data parallel workers/GPUs. For this, add `),a_=s(_w,"CODE",{});var zW=i(a_);Vq=a(zW,"--fsdp shard_grad_op"),zW.forEach(o),Yq=a(_w," to the command line arguments."),_w.forEach(o),Zq=d(cp),Ni=s(cp,"LI",{});var vw=i(Ni);Kq=a(vw,"NO_SHARD : No sharding. For this, add "),n_=s(vw,"CODE",{});var FW=i(n_);Jq=a(FW,"--fsdp no_shard"),FW.forEach(o),Xq=a(vw," to the command line arguments."),vw.forEach(o),cp.forEach(o),fw.forEach(o),Qq=d(Ye),s_=s(Ye,"LI",{});var LW=i(s_);Mo=s(LW,"P",{});var pp=i(Mo);e7=a(pp,`To offload the parameters and gradients to the CPU, add `),i_=s(pp,"CODE",{});var RW=i(i_);t7=a(RW,'--fsdp "full_shard offload"'),RW.forEach(o),o7=a(pp," or "),l_=s(pp,"CODE",{});var WW=i(l_);r7=a(WW,'--fsdp "shard_grad_op offload"'),WW.forEach(o),a7=a(pp," to the command line arguments."),pp.forEach(o),LW.forEach(o),n7=d(Ye),d_=s(Ye,"LI",{});var GW=i(d_);nt=s(GW,"P",{});var Oa=i(nt);s7=a(Oa,"To automatically recursively wrap layers with FSDP using "),c_=s(Oa,"CODE",{});var MW=i(c_);i7=a(MW,"default_auto_wrap_policy"),MW.forEach(o),l7=a(Oa,`, add `),p_=s(Oa,"CODE",{});var jW=i(p_);d7=a(jW,'--fsdp "full_shard auto_wrap"'),jW.forEach(o),c7=a(Oa," or "),m_=s(Oa,"CODE",{});var HW=i(m_);p7=a(HW,'--fsdp "shard_grad_op auto_wrap"'),HW.forEach(o),m7=a(Oa," to the command line arguments."),Oa.forEach(o),GW.forEach(o),h7=d(Ye),h_=s(Ye,"LI",{});var BW=i(h_);jo=s(BW,"P",{});var mp=i(jo);u7=a(mp,`To enable both CPU offloading and auto wrapping, add `),u_=s(mp,"CODE",{});var VW=i(u_);f7=a(VW,'--fsdp "full_shard offload auto_wrap"'),VW.forEach(o),g7=a(mp," or "),f_=s(mp,"CODE",{});var YW=i(f_);_7=a(YW,'--fsdp "shard_grad_op offload auto_wrap"'),YW.forEach(o),v7=a(mp," to the command line arguments."),mp.forEach(o),BW.forEach(o),b7=d(Ye),zi=s(Ye,"LI",{});var bw=i(zi);g_=s(bw,"P",{});var ZW=i(g_);y7=a(ZW,"If auto wrapping is enabled, you can either use transformer based auto wrap policy or size based auto wrap policy."),ZW.forEach(o),w7=d(bw),Fi=s(bw,"UL",{});var yw=i(Fi);xe=s(yw,"LI",{});var to=i(xe);T7=a(to,"For transformer based auto wrap policy, please add "),__=s(to,"CODE",{});var KW=i(__);E7=a(KW,"--fsdp_transformer_layer_cls_to_wrap <value>"),KW.forEach(o),$7=a(to,` to command line arguments. This specifies the transformer layer class name (case-sensitive) to wrap ,e.g, `),v_=s(to,"CODE",{});var JW=i(v_);x7=a(JW,"BertLayer"),JW.forEach(o),k7=a(to,", "),b_=s(to,"CODE",{});var XW=i(b_);A7=a(XW,"GPTJBlock"),XW.forEach(o),P7=a(to,", "),y_=s(to,"CODE",{});var QW=i(y_);D7=a(QW,"T5Block"),QW.forEach(o),S7=a(to,` \u2026 This is important because submodules that share weights (e.g., embedding layer) should not end up in different FSDP wrapped units. Using this policy, wrapping happens for each block containing Multi-Head Attention followed by couple of MLP layers. Remaining layers including the shared embeddings are conviniently wrapped in same outermost FSDP unit. Therefore, use this for transformer based models.`),to.forEach(o),q7=d(yw),Li=s(yw,"LI",{});var ww=i(Li);O7=a(ww,"For size based auto wrap policy, please add "),w_=s(ww,"CODE",{});var eG=i(w_);C7=a(eG,"--fsdp_min_num_params <number>"),eG.forEach(o),I7=a(ww,` to command line arguments. It specifies FSDP\u2019s minimum number of parameters for auto wrapping.`),ww.forEach(o),yw.forEach(o),bw.forEach(o),Ye.forEach(o),hy=d(t),Ud=s(t,"P",{});var tG=i(Ud);T_=s(tG,"STRONG",{});var oG=i(T_);U7=a(oG,"Few caveats to be aware of"),oG.forEach(o),tG.forEach(o),uy=d(t),pa=s(t,"UL",{});var Tw=i(pa);Ri=s(Tw,"LI",{});var Ew=i(Ri);N7=a(Ew,`Mixed precision is currently not supported with FSDP as we wait for PyTorch to fix support for it. More details in this `),Wi=s(Ew,"A",{href:!0,rel:!0});var rG=i(Wi);z7=a(rG,"issues"),rG.forEach(o),F7=a(Ew,"."),Ew.forEach(o),L7=d(Tw),Ho=s(Tw,"LI",{});var hp=i(Ho);R7=a(hp,`FSDP currently doesn\u2019t support multiple parameter groups. More details mentioned in this `),Gi=s(hp,"A",{href:!0,rel:!0});var aG=i(Gi);W7=a(aG,"issue"),aG.forEach(o),G7=a(hp,` (`),E_=s(hp,"CODE",{});var nG=i(E_);M7=a(nG,"The original model parameters' .grads are not set, meaning that they cannot be optimized separately (which is why we cannot support multiple parameter groups)"),nG.forEach(o),j7=a(hp,")."),hp.forEach(o),Tw.forEach(o),fy=d(t),Bo=s(t,"H3",{class:!0});var $w=i(Bo);ma=s($w,"A",{id:!0,class:!0,href:!0});var sG=i(ma);$_=s(sG,"SPAN",{});var iG=i($_);u(Mi.$$.fragment,iG),iG.forEach(o),sG.forEach(o),H7=d($w),x_=s($w,"SPAN",{});var lG=i(x_);B7=a(lG,"Using Trainer for accelerated PyTorch Training on Mac"),lG.forEach(o),$w.forEach(o),gy=d(t),Me=s(t,"P",{});var Ca=i(Me);V7=a(Ca,`With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac. Apple\u2019s Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `),k_=s(Ca,"CODE",{});var dG=i(k_);Y7=a(dG,'"mps"'),dG.forEach(o),Z7=a(Ca,` device. This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS. For more information please refer official documents `),ji=s(Ca,"A",{href:!0,rel:!0});var cG=i(ji);K7=a(cG,"Introducing Accelerated PyTorch Training on Mac"),cG.forEach(o),J7=a(Ca,` and `),Hi=s(Ca,"A",{href:!0,rel:!0});var pG=i(Hi);X7=a(pG,"MPS BACKEND"),pG.forEach(o),Q7=a(Ca,"."),Ca.forEach(o),_y=d(t),u(ha.$$.fragment,t),vy=d(t),Nd=s(t,"P",{});var mG=i(Nd);A_=s(mG,"STRONG",{});var hG=i(A_);eO=a(hG,"Benefits of Training and Inference using Apple Silicon Chips"),hG.forEach(o),mG.forEach(o),by=d(t),Mt=s(t,"OL",{});var up=i(Mt);P_=s(up,"LI",{});var uG=i(P_);tO=a(uG,"Enables users to train larger networks or batch sizes locally"),uG.forEach(o),oO=d(up),D_=s(up,"LI",{});var fG=i(D_);rO=a(fG,`Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. Therefore, improving end-to-end performance.`),fG.forEach(o),aO=d(up),S_=s(up,"LI",{});var gG=i(S_);nO=a(gG,"Reduces costs associated with cloud-based development or the need for additional local GPUs."),gG.forEach(o),up.forEach(o),yy=d(t),Vo=s(t,"P",{});var vv=i(Vo);q_=s(vv,"STRONG",{});var _G=i(q_);sO=a(_G,"Pre-requisites"),_G.forEach(o),iO=a(vv,`: To install torch with mps support, please follow this nice medium article `),Bi=s(vv,"A",{href:!0,rel:!0});var vG=i(Bi);lO=a(vG,"GPU-Acceleration Comes to PyTorch on M1 Macs"),vG.forEach(o),dO=a(vv,"."),vv.forEach(o),wy=d(t),Yo=s(t,"P",{});var bv=i(Yo);O_=s(bv,"STRONG",{});var bG=i(O_);cO=a(bG,"Usage"),bG.forEach(o),pO=a(bv,`: User has to just pass `),C_=s(bv,"CODE",{});var yG=i(C_);mO=a(yG,"--use_mps_device"),yG.forEach(o),hO=a(bv,` argument. For example, you can run the offical Glue text classififcation task (from the root folder) using Apple Silicon GPU with below command:`),bv.forEach(o),Ty=d(t),u(Vi.$$.fragment,t),Ey=d(t),zd=s(t,"P",{});var wG=i(zd);I_=s(wG,"STRONG",{});var TG=i(I_);uO=a(TG,"A few caveats to be aware of"),TG.forEach(o),wG.forEach(o),$y=d(t),ua=s(t,"OL",{});var xw=i(ua);Yi=s(xw,"LI",{});var kw=i(Yi);fO=a(kw,`Some PyTorch operations have not been implemented in mps and will throw an error. One way to get around that is to set the environment variable `),U_=s(kw,"CODE",{});var EG=i(U_);gO=a(EG,"PYTORCH_ENABLE_MPS_FALLBACK=1"),EG.forEach(o),_O=a(kw,`, which will fallback to CPU for these operations. It still throws a UserWarning however.`),kw.forEach(o),vO=d(xw),ke=s(xw,"LI",{});var oo=i(ke);bO=a(oo,"Distributed setups "),N_=s(oo,"CODE",{});var $G=i(N_);yO=a($G,"gloo"),$G.forEach(o),wO=a(oo," and "),z_=s(oo,"CODE",{});var xG=i(z_);TO=a(xG,"nccl"),xG.forEach(o),EO=a(oo," are not working with "),F_=s(oo,"CODE",{});var kG=i(F_);$O=a(kG,"mps"),kG.forEach(o),xO=a(oo,` device. This means that currently only single GPU of `),L_=s(oo,"CODE",{});var AG=i(L_);kO=a(AG,"mps"),AG.forEach(o),AO=a(oo," device type can be used."),oo.forEach(o),xw.forEach(o),xy=d(t),jt=s(t,"P",{});var fp=i(jt);PO=a(fp,"Finally, please, remember that, \u{1F917} "),R_=s(fp,"CODE",{});var PG=i(R_);DO=a(PG,"Trainer"),PG.forEach(o),SO=a(fp,` only integrates MPS backend, therefore if you have any problems or questions with regards to MPS backend usage, please, file an issue with `),Zi=s(fp,"A",{href:!0,rel:!0});var DG=i(Zi);qO=a(DG,"PyTorch GitHub"),DG.forEach(o),OO=a(fp,"."),fp.forEach(o),ky=d(t),Fd=s(t,"P",{});var SG=i(Fd);CO=a(SG,"Sections that were moved:"),SG.forEach(o),Ay=d(t),w=s(t,"P",{});var E=i(w);IO=a(E,"[ "),Ld=s(E,"A",{href:!0});var qG=i(Ld);UO=a(qG,"DeepSpeed"),qG.forEach(o),W_=s(E,"A",{id:!0}),i(W_).forEach(o),NO=a(E,` | `),Rd=s(E,"A",{href:!0});var OG=i(Rd);zO=a(OG,"Installation"),OG.forEach(o),G_=s(E,"A",{id:!0}),i(G_).forEach(o),FO=a(E,` | `),Wd=s(E,"A",{href:!0});var CG=i(Wd);LO=a(CG,"Deployment with multiple GPUs"),CG.forEach(o),M_=s(E,"A",{id:!0}),i(M_).forEach(o),RO=a(E,` | `),Gd=s(E,"A",{href:!0});var IG=i(Gd);WO=a(IG,"Deployment with one GPU"),IG.forEach(o),j_=s(E,"A",{id:!0}),i(j_).forEach(o),GO=a(E,` | `),Md=s(E,"A",{href:!0});var UG=i(Md);MO=a(UG,"Deployment in Notebooks"),UG.forEach(o),H_=s(E,"A",{id:!0}),i(H_).forEach(o),jO=a(E,` | `),jd=s(E,"A",{href:!0});var NG=i(jd);HO=a(NG,"Configuration"),NG.forEach(o),B_=s(E,"A",{id:!0}),i(B_).forEach(o),BO=a(E,` | `),Hd=s(E,"A",{href:!0});var zG=i(Hd);VO=a(zG,"Passing Configuration"),zG.forEach(o),V_=s(E,"A",{id:!0}),i(V_).forEach(o),YO=a(E,` | `),Bd=s(E,"A",{href:!0});var FG=i(Bd);ZO=a(FG,"Shared Configuration"),FG.forEach(o),Y_=s(E,"A",{id:!0}),i(Y_).forEach(o),KO=a(E,` | `),Vd=s(E,"A",{href:!0});var LG=i(Vd);JO=a(LG,"ZeRO"),LG.forEach(o),Z_=s(E,"A",{id:!0}),i(Z_).forEach(o),XO=a(E,` | `),Yd=s(E,"A",{href:!0});var RG=i(Yd);QO=a(RG,"ZeRO-2 Config"),RG.forEach(o),K_=s(E,"A",{id:!0}),i(K_).forEach(o),eC=a(E,` | `),Zd=s(E,"A",{href:!0});var WG=i(Zd);tC=a(WG,"ZeRO-3 Config"),WG.forEach(o),J_=s(E,"A",{id:!0}),i(J_).forEach(o),oC=a(E,` | `),Kd=s(E,"A",{href:!0});var GG=i(Kd);rC=a(GG,"NVMe Support"),GG.forEach(o),X_=s(E,"A",{id:!0}),i(X_).forEach(o),aC=a(E,` | `),Jd=s(E,"A",{href:!0});var MG=i(Jd);nC=a(MG,"ZeRO-2 vs ZeRO-3 Performance"),MG.forEach(o),Q_=s(E,"A",{id:!0}),i(Q_).forEach(o),sC=a(E,` | `),Xd=s(E,"A",{href:!0});var jG=i(Xd);iC=a(jG,"ZeRO-2 Example"),jG.forEach(o),ev=s(E,"A",{id:!0}),i(ev).forEach(o),lC=a(E,` | `),Qd=s(E,"A",{href:!0});var HG=i(Qd);dC=a(HG,"ZeRO-3 Example"),HG.forEach(o),tv=s(E,"A",{id:!0}),i(tv).forEach(o),cC=a(E,` | `),ec=s(E,"A",{href:!0});var BG=i(ec);pC=a(BG,"Optimizer"),BG.forEach(o),ov=s(E,"A",{id:!0}),i(ov).forEach(o),mC=a(E,` | `),tc=s(E,"A",{href:!0});var VG=i(tc);hC=a(VG,"Scheduler"),VG.forEach(o),rv=s(E,"A",{id:!0}),i(rv).forEach(o),uC=a(E,` | `),oc=s(E,"A",{href:!0});var YG=i(oc);fC=a(YG,"fp32 Precision"),YG.forEach(o),av=s(E,"A",{id:!0}),i(av).forEach(o),gC=a(E,` | `),rc=s(E,"A",{href:!0});var ZG=i(rc);_C=a(ZG,"Automatic Mixed Precision"),ZG.forEach(o),nv=s(E,"A",{id:!0}),i(nv).forEach(o),vC=a(E,` | `),ac=s(E,"A",{href:!0});var KG=i(ac);bC=a(KG,"Batch Size"),KG.forEach(o),sv=s(E,"A",{id:!0}),i(sv).forEach(o),yC=a(E,` | `),nc=s(E,"A",{href:!0});var JG=i(nc);wC=a(JG,"Gradient Accumulation"),JG.forEach(o),iv=s(E,"A",{id:!0}),i(iv).forEach(o),TC=a(E,` | `),sc=s(E,"A",{href:!0});var XG=i(sc);EC=a(XG,"Gradient Clipping"),XG.forEach(o),lv=s(E,"A",{id:!0}),i(lv).forEach(o),$C=a(E,` | `),ic=s(E,"A",{href:!0});var QG=i(ic);xC=a(QG,"Getting The Model Weights Out"),QG.forEach(o),dv=s(E,"A",{id:!0}),i(dv).forEach(o),kC=a(E,` ]`),E.forEach(o),this.h()},h(){m(T,"name","hf:doc:metadata"),m(T,"content",JSON.stringify(fM)),m(k,"id","trainer"),m(k,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(k,"href","#trainer"),m($,"class","relative group"),m(ie,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),m(H,"href","https://github.com/huggingface/transformers/tree/main/examples"),m(H,"rel","nofollow"),m(ae,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),m(_e,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments"),m(ve,"href","https://github.com/NVIDIA/apex"),m(ve,"rel","nofollow"),m(be,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),m(fl,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),m(gl,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),m(_l,"href","callback"),m(or,"id","transformers.Trainer"),m(or,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(or,"href","#transformers.Trainer"),m(ao,"class","relative group"),m(vl,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel"),m(ar,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(nr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(pt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(sr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(ir,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(mt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(ht,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(lr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(qe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(ut,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(yl,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel"),m(dr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(ft,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(cr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(gt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Oe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(_t,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(mr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(hr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(ur,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(vt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Tl,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),m(En,"href","https://github.com/pytorch/pytorch/issues/16266"),m(En,"rel","nofollow"),m(El,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),m($l,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),m(P,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(gr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(_r,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(yt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(X,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(wt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Tt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(br,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(yr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(xl,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.log_metrics"),m(Ce,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Et,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m($t,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(wr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Tr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(xt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(b,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Er,"id","transformers.Seq2SeqTrainer"),m(Er,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Er,"href","#transformers.Seq2SeqTrainer"),m(go,"class","relative group"),m(Ie,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(tt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(xr,"id","transformers.TrainingArguments"),m(xr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(xr,"href","#transformers.TrainingArguments"),m(wo,"class","relative group"),m(kl,"href","/docs/transformers/pr_19429/en/internal/trainer_utils#transformers.HfArgumentParser"),m(as,"href","https://docs.python.org/3/library/argparse#module-argparse"),m(as,"rel","nofollow"),m(de,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(kr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(kt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Ar,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Pr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Dr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(F,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Sr,"id","transformers.Seq2SeqTrainingArguments"),m(Sr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Sr,"href","#transformers.Seq2SeqTrainingArguments"),m(ko,"class","relative group"),m(Al,"href","/docs/transformers/pr_19429/en/internal/trainer_utils#transformers.HfArgumentParser"),m(gs,"href","https://docs.python.org/3/library/argparse#module-argparse"),m(gs,"rel","nofollow"),m(ot,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(qr,"id","checkpoints"),m(qr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(qr,"href","#checkpoints"),m(Po,"class","relative group"),m(Pl,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),m(Dl,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments"),m(Sl,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.train"),m(Cl,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments"),m(Nr,"id","logging"),m(Nr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Nr,"href","#logging"),m(Do,"class","relative group"),m(Ul,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),m(Nl,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments"),m(Ll,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments"),m(Rl,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),m(Wl,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),m(Lr,"id","randomness"),m(Lr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Lr,"href","#randomness"),m(So,"class","relative group"),m(Vl,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),m($s,"href","https://pytorch.org/docs/stable/notes/randomness"),m($s,"rel","nofollow"),m(Rr,"id","specific-gpus-selection"),m(Rr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Rr,"href","#specific-gpus-selection"),m(qo,"class","relative group"),m(ks,"href","https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html"),m(ks,"rel","nofollow"),m(Ps,"href","https://github.com/huggingface/accelerate"),m(Ps,"rel","nofollow"),m(Ds,"href","https://github.com/microsoft/DeepSpeed"),m(Ds,"rel","nofollow"),m(Zl,"href","Deepspeed"),m(Is,"href","https://pytorch.org/docs/stable/generated/torch.nn.DataParallel.html"),m(Is,"rel","nofollow"),m(Rs,"start","2"),m(Br,"id","trainer-integrations"),m(Br,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Br,"href","#trainer-integrations"),m(Oo,"class","relative group"),m(ld,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),m(js,"href","https://github.com/microsoft/DeepSpeed"),m(js,"rel","nofollow"),m(Hs,"href","https://pytorch.org/docs/stable/fsdp.html"),m(Hs,"rel","nofollow"),m(Bs,"href","https://github.com/facebookresearch/fairscale/"),m(Bs,"rel","nofollow"),m(Vs,"href","https://arxiv.org/abs/1910.02054"),m(Vs,"rel","nofollow"),m(dd,"href","#pytorch-fully-sharded-data-parallel"),m(cd,"id","zero-install-notes"),m(Zr,"id","cuda-extension-installation-notes"),m(Zr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Zr,"href","#cuda-extension-installation-notes"),m(Co,"class","relative group"),m(Zs,"href","https://github.com/facebookresearch/fairscale/issues"),m(Zs,"rel","nofollow"),m(Ks,"href","https://github.com/microsoft/DeepSpeed/issues"),m(Ks,"rel","nofollow"),m(Kr,"id","possible-problem-1"),m(Kr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Kr,"href","#possible-problem-1"),m(Io,"class","relative group"),m(ei,"href","https://www.google.com/search?q=ubuntu+cuda+10.2+install"),m(ei,"rel","nofollow"),m(Xr,"id","possible-problem-2"),m(Xr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Xr,"href","#possible-problem-2"),m(Uo,"class","relative group"),m(ea,"id","possible-problem-3"),m(ea,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(ea,"href","#possible-problem-3"),m(No,"class","relative group"),m(oa,"id","fairscale"),m(oa,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(oa,"href","#fairscale"),m(zo,"class","relative group"),m(li,"href","https://github.com/facebookresearch/fairscale/"),m(li,"rel","nofollow"),m(Ed,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),m(di,"href","https://arxiv.org/abs/1910.02054"),m(di,"rel","nofollow"),m(hi,"href","https://github.com/facebookresearch/fairscale/#installation"),m(hi,"rel","nofollow"),m(kd,"href","#zero-install-notes"),m(yi,"href","https://github.com/facebookresearch/fairscale/issues"),m(yi,"rel","nofollow"),m(xi,"start","3"),m(da,"id","pytorch-fully-sharded-data-parallel"),m(da,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(da,"href","#pytorch-fully-sharded-data-parallel"),m(Wo,"class","relative group"),m(Di,"href","https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/"),m(Di,"rel","nofollow"),m(Wi,"href","https://github.com/pytorch/pytorch/issues/75676"),m(Wi,"rel","nofollow"),m(Gi,"href","https://github.com/pytorch/pytorch/issues/76501"),m(Gi,"rel","nofollow"),m(ma,"id","using-trainer-for-accelerated-pytorch-training-on-mac"),m(ma,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(ma,"href","#using-trainer-for-accelerated-pytorch-training-on-mac"),m(Bo,"class","relative group"),m(ji,"href","https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/"),m(ji,"rel","nofollow"),m(Hi,"href","https://pytorch.org/docs/stable/notes/mps.html"),m(Hi,"rel","nofollow"),m(Bi,"href","https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1"),m(Bi,"rel","nofollow"),m(Zi,"href","https://github.com/pytorch/pytorch/issues"),m(Zi,"rel","nofollow"),m(Ld,"href","./deepspeed#deepspeed-trainer-integration"),m(W_,"id","deepspeed"),m(Rd,"href","./deepspeed#deepspeed-installation"),m(G_,"id","installation"),m(Wd,"href","./deepspeed#deepspeed-multi-gpu"),m(M_,"id","deployment-with-multiple-gpus"),m(Gd,"href","./deepspeed#deepspeed-one-gpu"),m(j_,"id","deployment-with-one-gpu"),m(Md,"href","./deepspeed#deepspeed-notebook"),m(H_,"id","deployment-in-notebooks"),m(jd,"href","./deepspeed#deepspeed-config"),m(B_,"id","configuration"),m(Hd,"href","./deepspeed#deepspeed-config-passing"),m(V_,"id","passing-configuration"),m(Bd,"href","./deepspeed#deepspeed-config-shared"),m(Y_,"id","shared-configuration"),m(Vd,"href","./deepspeed#deepspeed-zero"),m(Z_,"id","zero"),m(Yd,"href","./deepspeed#deepspeed-zero2-config"),m(K_,"id","zero-2-config"),m(Zd,"href","./deepspeed#deepspeed-zero3-config"),m(J_,"id","zero-3-config"),m(Kd,"href","./deepspeed#deepspeed-nvme"),m(X_,"id","nvme-support"),m(Jd,"href","./deepspeed#deepspeed-zero2-zero3-performance"),m(Q_,"id","zero-2-vs-zero-3-performance"),m(Xd,"href","./deepspeed#deepspeed-zero2-example"),m(ev,"id","zero-2-example"),m(Qd,"href","./deepspeed#deepspeed-zero3-example"),m(tv,"id","zero-3-example"),m(ec,"href","./deepspeed#deepspeed-optimizer"),m(ov,"id","optimizer"),m(tc,"href","./deepspeed#deepspeed-scheduler"),m(rv,"id","scheduler"),m(oc,"href","./deepspeed#deepspeed-fp32"),m(av,"id","fp32-precision"),m(rc,"href","./deepspeed#deepspeed-amp"),m(nv,"id","automatic-mixed-precision"),m(ac,"href","./deepspeed#deepspeed-bs"),m(sv,"id","batch-size"),m(nc,"href","./deepspeed#deepspeed-grad-acc"),m(iv,"id","gradient-accumulation"),m(sc,"href","./deepspeed#deepspeed-grad-clip"),m(lv,"id","gradient-clipping"),m(ic,"href","./deepspeed#deepspeed-weight-extraction"),m(dv,"id","getting-the-model-weights-out")},m(t,c){e(document.head,T),p(t,D,c),p(t,$,c),e($,k),e(k,L),f(A,L,null),e($,S),e($,W),e(W,fe),p(t,oe,c),p(t,G,c),e(G,se),e(G,ie),e(ie,re),e(G,le),e(G,H),e(H,Ze),e(G,ge),p(t,z,c),p(t,I,c),e(I,st),e(I,ae),e(ae,it),e(I,lt),e(I,_e),e(_e,Ia),e(I,Ua),p(t,Ke,c),p(t,Pe,c),e(Pe,Na),e(Pe,ve),e(ve,za),e(Pe,Fa),p(t,K,c),p(t,B,c),e(B,el),e(B,be),e(be,Xo),e(B,tl),p(t,ro,c),p(t,C,c),e(C,V),e(V,Qo),e(Qo,ol),e(V,rl),e(C,al),e(C,nl),e(nl,_p),e(_p,Aw),e(nl,Pw),e(C,Dw),e(C,sl),e(sl,vp),e(vp,Sw),e(sl,qw),e(C,Ow),e(C,il),e(il,bp),e(bp,Cw),e(il,Iw),e(C,Uw),e(C,dt),e(dt,yp),e(yp,Nw),e(dt,zw),e(dt,wp),e(wp,Fw),e(dt,Lw),e(dt,Tp),e(Tp,Rw),e(dt,Ww),e(C,Gw),e(C,ll),e(ll,Ep),e(Ep,Mw),e(ll,jw),e(C,Hw),e(C,dl),e(dl,$p),e($p,Bw),e(dl,Vw),e(C,Yw),e(C,cl),e(cl,xp),e(xp,Zw),e(cl,Kw),e(C,Jw),e(C,pl),e(pl,kp),e(kp,Xw),e(pl,Qw),e(C,e0),e(C,ml),e(ml,Ap),e(Ap,t0),e(ml,o0),e(C,r0),e(C,hl),e(hl,Pp),e(Pp,a0),e(hl,n0),e(C,s0),e(C,ul),e(ul,Dp),e(Dp,i0),e(ul,l0),p(t,yv,c),f(er,t,c),p(t,wv,c),p(t,tr,c),e(tr,d0),e(tr,fl),e(fl,c0),e(tr,p0),p(t,Tv,c),f(La,t,c),p(t,Ev,c),p(t,ct,c),e(ct,m0),e(ct,gl),e(gl,h0),e(ct,u0),e(ct,_l),e(_l,f0),e(ct,g0),p(t,$v,c),p(t,ao,c),e(ao,or),e(or,Sp),f(Ra,Sp,null),e(ao,_0),e(ao,qp),e(qp,v0),p(t,xv,c),p(t,b,c),f(Wa,b,null),e(b,b0),e(b,Op),e(Op,y0),e(b,w0),e(b,Cp),e(Cp,T0),e(b,E0),e(b,ye),e(ye,rr),e(rr,Ip),e(Ip,$0),e(rr,x0),e(rr,vl),e(vl,k0),e(rr,A0),e(ye,P0),e(ye,J),e(J,Up),e(Up,D0),e(J,S0),e(J,Np),e(Np,q0),e(J,O0),e(J,zp),e(zp,C0),e(J,I0),e(J,Fp),e(Fp,U0),e(J,N0),e(J,Lp),e(Lp,z0),e(J,F0),e(J,Rp),e(Rp,L0),e(J,R0),e(ye,W0),e(ye,bl),e(bl,Wp),e(Wp,G0),e(bl,M0),e(ye,j0),e(ye,De),e(De,Gp),e(Gp,H0),e(De,B0),e(De,Mp),e(Mp,V0),e(De,Y0),e(De,jp),e(jp,Z0),e(De,K0),e(De,Hp),e(Hp,J0),e(De,X0),e(ye,Q0),e(ye,Se),e(Se,Bp),e(Bp,eT),e(Se,tT),e(Se,Vp),e(Vp,oT),e(Se,rT),e(Se,Yp),e(Yp,aT),e(Se,nT),e(Se,Zp),e(Zp,sT),e(Se,iT),e(b,lT),e(b,ar),f(Ga,ar,null),e(ar,dT),e(ar,Ma),e(Ma,cT),e(Ma,Kp),e(Kp,pT),e(Ma,mT),e(b,hT),e(b,nr),f(ja,nr,null),e(nr,uT),e(nr,Ha),e(Ha,fT),e(Ha,Jp),e(Jp,gT),e(Ha,_T),e(b,vT),e(b,pt),f(Ba,pt,null),e(pt,bT),e(pt,Xp),e(Xp,yT),e(pt,wT),e(pt,Qp),e(Qp,TT),e(b,ET),e(b,sr),f(Va,sr,null),e(sr,$T),e(sr,em),e(em,xT),e(b,kT),e(b,ir),f(Ya,ir,null),e(ir,AT),e(ir,Za),e(Za,PT),e(Za,tm),e(tm,DT),e(Za,ST),e(b,qT),e(b,mt),f(Ka,mt,null),e(mt,OT),e(mt,om),e(om,CT),e(mt,IT),e(mt,Ja),e(Ja,UT),e(Ja,rm),e(rm,NT),e(Ja,zT),e(b,FT),e(b,ht),f(Xa,ht,null),e(ht,LT),e(ht,am),e(am,RT),e(ht,WT),e(ht,Je),e(Je,GT),e(Je,nm),e(nm,MT),e(Je,jT),e(Je,sm),e(sm,HT),e(Je,BT),e(Je,im),e(im,VT),e(Je,YT),e(b,ZT),e(b,lr),f(Qa,lr,null),e(lr,KT),e(lr,lm),e(lm,JT),e(b,XT),e(b,qe),f(en,qe,null),e(qe,QT),e(qe,dm),e(dm,e4),e(qe,t4),e(qe,tn),e(tn,o4),e(tn,cm),e(cm,r4),e(tn,a4),e(qe,n4),e(qe,pm),e(pm,s4),e(b,i4),e(b,ut),f(on,ut,null),e(ut,l4),e(ut,no),e(no,d4),e(no,mm),e(mm,c4),e(no,p4),e(no,hm),e(hm,m4),e(no,h4),e(ut,u4),e(ut,um),e(um,f4),e(b,g4),e(b,dr),f(rn,dr,null),e(dr,_4),e(dr,an),e(an,v4),e(an,yl),e(yl,b4),e(an,y4),e(b,w4),e(b,ft),f(nn,ft,null),e(ft,T4),e(ft,sn),e(sn,E4),e(sn,fm),e(fm,$4),e(sn,x4),e(ft,k4),e(ft,gm),e(gm,A4),e(b,P4),e(b,cr),f(ln,cr,null),e(cr,D4),e(cr,_m),e(_m,S4),e(b,q4),e(b,gt),f(dn,gt,null),e(gt,O4),e(gt,cn),e(cn,C4),e(cn,vm),e(vm,I4),e(cn,U4),e(gt,N4),e(gt,bm),e(bm,z4),e(b,F4),e(b,Oe),f(pn,Oe,null),e(Oe,L4),e(Oe,mn),e(mn,R4),e(mn,ym),e(ym,W4),e(mn,G4),e(Oe,M4),e(Oe,so),e(so,j4),e(so,wm),e(wm,H4),e(so,B4),e(so,Tm),e(Tm,V4),e(so,Y4),e(Oe,Z4),e(Oe,Em),e(Em,K4),e(b,J4),e(b,_t),f(hn,_t,null),e(_t,X4),e(_t,we),e(we,Q4),e(we,$m),e($m,eE),e(we,tE),e(we,xm),e(xm,oE),e(we,rE),e(we,km),e(km,aE),e(we,nE),e(we,Am),e(Am,sE),e(we,iE),e(_t,lE),f(pr,_t,null),e(b,dE),e(b,mr),f(un,mr,null),e(mr,cE),e(mr,fn),e(fn,pE),e(fn,Pm),e(Pm,mE),e(fn,hE),e(b,uE),e(b,hr),f(gn,hr,null),e(hr,fE),e(hr,Dm),e(Dm,gE),e(b,_E),e(b,ur),f(_n,ur,null),e(ur,vE),e(ur,vn),e(vn,bE),e(vn,Sm),e(Sm,yE),e(vn,wE),e(b,TE),e(b,vt),f(bn,vt,null),e(vt,EE),e(vt,yn),e(yn,$E),e(yn,qm),e(qm,xE),e(yn,kE),e(vt,AE),e(vt,Om),e(Om,PE),e(b,DE),e(b,P),f(wn,P,null),e(P,SE),e(P,Cm),e(Cm,qE),e(P,OE),e(P,Im),e(Im,CE),e(P,IE),e(P,Um),e(Um,UE),e(P,NE),e(P,io),e(io,zE),e(io,Nm),e(Nm,FE),e(io,LE),e(io,zm),e(zm,RE),e(io,WE),e(P,GE),f(fr,P,null),e(P,ME),e(P,Fm),e(Fm,Lm),e(Lm,jE),e(P,HE),e(P,Xe),e(Xe,Te),e(Te,BE),e(Te,Rm),e(Rm,VE),e(Te,YE),e(Te,Wm),e(Wm,ZE),e(Te,KE),e(Te,Gm),e(Gm,JE),e(Te,XE),e(Te,Mm),e(Mm,QE),e(Te,e9),e(Xe,t9),e(Xe,lo),e(lo,o9),e(lo,jm),e(jm,r9),e(lo,a9),e(lo,Hm),e(Hm,n9),e(lo,s9),e(Xe,i9),e(Xe,wl),e(wl,Bm),e(Bm,l9),e(wl,d9),e(Xe,c9),e(Xe,bt),e(bt,Vm),e(Vm,p9),e(bt,m9),e(bt,Ym),e(Ym,h9),e(bt,u9),e(bt,Zm),e(Zm,f9),e(bt,g9),e(P,_9),e(P,Km),e(Km,v9),e(P,b9),e(P,Jm),e(Jm,y9),e(P,w9),e(P,Tn),e(Tn,T9),e(Tn,Xm),e(Xm,E9),e(Tn,$9),e(P,x9),e(P,Qe),e(Qe,k9),e(Qe,Qm),e(Qm,A9),e(Qe,P9),e(Qe,eh),e(eh,D9),e(Qe,S9),e(Qe,th),e(th,q9),e(Qe,O9),e(P,C9),e(P,ne),e(ne,I9),e(ne,Tl),e(Tl,U9),e(ne,N9),e(ne,oh),e(oh,z9),e(ne,F9),e(ne,rh),e(rh,L9),e(ne,R9),e(ne,ah),e(ah,W9),e(ne,G9),e(ne,nh),e(nh,M9),e(ne,j9),e(P,H9),e(P,U),e(U,B9),e(U,sh),e(sh,V9),e(U,Y9),e(U,ih),e(ih,Z9),e(U,K9),e(U,lh),e(lh,J9),e(U,X9),e(U,dh),e(dh,Q9),e(U,e$),e(U,En),e(En,t$),e(U,o$),e(U,ch),e(ch,r$),e(U,a$),e(U,ph),e(ph,n$),e(U,s$),e(U,mh),e(mh,i$),e(U,l$),e(U,hh),e(hh,d$),e(U,c$),e(U,uh),e(uh,p$),e(U,m$),e(P,h$),e(P,Ee),e(Ee,u$),e(Ee,El),e(El,f$),e(Ee,g$),e(Ee,fh),e(fh,_$),e(Ee,v$),e(Ee,$l),e($l,b$),e(Ee,y$),e(Ee,gh),e(gh,w$),e(Ee,T$),e(P,E$),e(P,_h),e(_h,$$),e(b,x$),e(b,gr),f($n,gr,null),e(gr,k$),e(gr,vh),e(vh,A$),e(b,P$),e(b,_r),f(xn,_r,null),e(_r,D$),e(_r,kn),e(kn,S$),e(kn,bh),e(bh,q$),e(kn,O$),e(b,C$),e(b,yt),f(An,yt,null),e(yt,I$),e(yt,Pn),e(Pn,U$),e(Pn,yh),e(yh,N$),e(Pn,z$),e(yt,F$),e(yt,Dn),e(Dn,L$),e(Dn,wh),e(wh,R$),e(Dn,W$),e(b,G$),e(b,X),f(Sn,X,null),e(X,M$),e(X,Th),e(Th,j$),e(X,H$),e(X,qn),e(qn,B$),e(qn,Eh),e(Eh,V$),e(qn,Y$),e(X,Z$),f(vr,X,null),e(X,K$),e(X,On),e(On,J$),e(On,$h),e($h,X$),e(On,Q$),e(X,e3),e(X,co),e(co,po),e(po,t3),e(po,xh),e(xh,o3),e(po,r3),e(po,kh),e(kh,a3),e(po,n3),e(co,s3),e(co,mo),e(mo,i3),e(mo,Ah),e(Ah,l3),e(mo,d3),e(mo,Ph),e(Ph,c3),e(mo,p3),e(co,m3),e(co,ho),e(ho,h3),e(ho,Dh),e(Dh,u3),e(ho,f3),e(ho,Sh),e(Sh,g3),e(ho,_3),e(b,v3),e(b,wt),f(Cn,wt,null),e(wt,b3),e(wt,uo),e(uo,y3),e(uo,qh),e(qh,w3),e(uo,T3),e(uo,Oh),e(Oh,E3),e(uo,$3),e(wt,x3),e(wt,Ch),e(Ch,k3),e(b,A3),e(b,Tt),f(In,Tt,null),e(Tt,P3),e(Tt,fo),e(fo,D3),e(fo,Ih),e(Ih,S3),e(fo,q3),e(fo,Uh),e(Uh,O3),e(fo,C3),e(Tt,I3),e(Tt,Nh),e(Nh,U3),e(b,N3),e(b,br),f(Un,br,null),e(br,z3),e(br,et),e(et,F3),e(et,zh),e(zh,L3),e(et,R3),e(et,Fh),e(Fh,W3),e(et,G3),e(et,Lh),e(Lh,M3),e(et,j3),e(b,H3),e(b,yr),f(Nn,yr,null),e(yr,B3),e(yr,zn),e(zn,V3),e(zn,Rh),e(Rh,Y3),e(zn,Z3),e(b,K3),e(b,Ce),f(Fn,Ce,null),e(Ce,J3),e(Ce,Ln),e(Ln,X3),e(Ln,Wh),e(Wh,Q3),e(Ln,ex),e(Ce,tx),e(Ce,Gh),e(Gh,ox),e(Ce,rx),e(Ce,Rn),e(Rn,ax),e(Rn,xl),e(xl,nx),e(Rn,sx),e(b,ix),e(b,Et),f(Wn,Et,null),e(Et,lx),e(Et,Gn),e(Gn,dx),e(Gn,Mh),e(Mh,cx),e(Gn,px),e(Et,mx),e(Et,jh),e(jh,hx),e(b,ux),e(b,$t),f(Mn,$t,null),e($t,fx),e($t,Hh),e(Hh,gx),e($t,_x),e($t,Bh),e(Bh,vx),e(b,bx),e(b,wr),f(jn,wr,null),e(wr,yx),e(wr,Hn),e(Hn,wx),e(Hn,Vh),e(Vh,Tx),e(Hn,Ex),e(b,$x),e(b,Tr),f(Bn,Tr,null),e(Tr,xx),e(Tr,Yh),e(Yh,kx),e(b,Ax),e(b,xt),f(Vn,xt,null),e(xt,Px),e(xt,Zh),e(Zh,Dx),e(xt,Sx),e(xt,Kh),e(Kh,qx),p(t,kv,c),p(t,go,c),e(go,Er),e(Er,Jh),f(Yn,Jh,null),e(go,Ox),e(go,Xh),e(Xh,Cx),p(t,Av,c),p(t,tt,c),f(Zn,tt,null),e(tt,Ix),e(tt,Ie),f(Kn,Ie,null),e(Ie,Ux),e(Ie,Qh),e(Qh,Nx),e(Ie,zx),e(Ie,Jn),e(Jn,Fx),e(Jn,eu),e(eu,Lx),e(Jn,Rx),e(Ie,Wx),e(Ie,tu),e(tu,Gx),e(tt,Mx),e(tt,Q),f(Xn,Q,null),e(Q,jx),e(Q,ou),e(ou,Hx),e(Q,Bx),e(Q,Qn),e(Qn,Vx),e(Qn,ru),e(ru,Yx),e(Qn,Zx),e(Q,Kx),f($r,Q,null),e(Q,Jx),e(Q,es),e(es,Xx),e(es,au),e(au,Qx),e(es,ek),e(Q,tk),e(Q,_o),e(_o,vo),e(vo,ok),e(vo,nu),e(nu,rk),e(vo,ak),e(vo,su),e(su,nk),e(vo,sk),e(_o,ik),e(_o,bo),e(bo,lk),e(bo,iu),e(iu,dk),e(bo,ck),e(bo,lu),e(lu,pk),e(bo,mk),e(_o,hk),e(_o,yo),e(yo,uk),e(yo,du),e(du,fk),e(yo,gk),e(yo,cu),e(cu,_k),e(yo,vk),p(t,Pv,c),p(t,wo,c),e(wo,xr),e(xr,pu),f(ts,pu,null),e(wo,bk),e(wo,mu),e(mu,yk),p(t,Dv,c),p(t,F,c),f(os,F,null),e(F,wk),e(F,rs),e(rs,Tk),e(rs,hu),e(hu,Ek),e(rs,$k),e(F,xk),e(F,To),e(To,kk),e(To,kl),e(kl,Ak),e(To,Pk),e(To,as),e(as,Dk),e(To,Sk),e(F,qk),e(F,de),f(ns,de,null),e(de,Ok),e(de,uu),e(uu,Ck),e(de,Ik),e(de,Eo),e(Eo,Uk),e(Eo,fu),e(fu,Nk),e(Eo,zk),e(Eo,gu),e(gu,Fk),e(Eo,Lk),e(de,Rk),e(de,$o),e($o,Wk),e($o,_u),e(_u,Gk),e($o,Mk),e($o,vu),e(vu,jk),e($o,Hk),e(de,Bk),e(de,ss),e(ss,Vk),e(ss,bu),e(bu,Yk),e(ss,Zk),e(F,Kk),e(F,kr),f(is,kr,null),e(kr,Jk),e(kr,yu),e(yu,Xk),e(F,Qk),e(F,kt),f(ls,kt,null),e(kt,e5),e(kt,wu),e(wu,t5),e(kt,o5),e(kt,xo),e(xo,r5),e(xo,Tu),e(Tu,a5),e(xo,n5),e(xo,Eu),e(Eu,s5),e(xo,i5),e(F,l5),e(F,Ar),f(ds,Ar,null),e(Ar,d5),e(Ar,cs),e(cs,c5),e(cs,$u),e($u,p5),e(cs,m5),e(F,h5),e(F,Pr),f(ps,Pr,null),e(Pr,u5),e(Pr,xu),e(xu,f5),e(F,g5),e(F,Dr),f(ms,Dr,null),e(Dr,_5),e(Dr,ku),e(ku,v5),p(t,Sv,c),p(t,ko,c),e(ko,Sr),e(Sr,Au),f(hs,Au,null),e(ko,b5),e(ko,Pu),e(Pu,y5),p(t,qv,c),p(t,ot,c),f(us,ot,null),e(ot,w5),e(ot,fs),e(fs,T5),e(fs,Du),e(Du,E5),e(fs,$5),e(ot,x5),e(ot,Ao),e(Ao,k5),e(Ao,Al),e(Al,A5),e(Ao,P5),e(Ao,gs),e(gs,D5),e(Ao,S5),p(t,Ov,c),p(t,Po,c),e(Po,qr),e(qr,Su),f(_s,Su,null),e(Po,q5),e(Po,qu),e(qu,O5),p(t,Cv,c),p(t,ce,c),e(ce,C5),e(ce,Pl),e(Pl,I5),e(ce,U5),e(ce,Ou),e(Ou,N5),e(ce,z5),e(ce,Dl),e(Dl,F5),e(ce,L5),e(ce,Cu),e(Cu,R5),e(ce,W5),p(t,Iv,c),p(t,Or,c),e(Or,G5),e(Or,Sl),e(Sl,M5),e(Or,j5),p(t,Uv,c),p(t,Cr,c),e(Cr,ql),e(ql,Iu),e(Iu,H5),e(ql,B5),e(Cr,V5),e(Cr,Ol),e(Ol,Uu),e(Uu,Y5),e(Ol,Z5),p(t,Nv,c),p(t,Ue,c),e(Ue,K5),e(Ue,Nu),e(Nu,J5),e(Ue,X5),e(Ue,zu),e(zu,Q5),e(Ue,e6),e(Ue,Cl),e(Cl,t6),e(Ue,o6),p(t,zv,c),p(t,Ir,c),e(Ir,Ur),e(Ur,Fu),e(Fu,r6),e(Ur,a6),e(Ur,Lu),e(Lu,n6),e(Ur,s6),e(Ir,i6),e(Ir,Il),e(Il,Ru),e(Ru,l6),e(Il,d6),p(t,Fv,c),p(t,Do,c),e(Do,Nr),e(Nr,Wu),f(vs,Wu,null),e(Do,c6),e(Do,Gu),e(Gu,p6),p(t,Lv,c),p(t,Ne,c),e(Ne,m6),e(Ne,Ul),e(Ul,h6),e(Ne,u6),e(Ne,Mu),e(Mu,f6),e(Ne,g6),e(Ne,ju),e(ju,_6),e(Ne,v6),p(t,Rv,c),p(t,At,c),e(At,b6),e(At,Hu),e(Hu,y6),e(At,w6),e(At,Nl),e(Nl,T6),e(At,E6),p(t,Wv,c),p(t,zr,c),e(zr,zl),e(zl,Bu),e(Bu,$6),e(zl,x6),e(zr,k6),e(zr,Fl),e(Fl,Vu),e(Vu,A6),e(Fl,P6),p(t,Gv,c),p(t,ze,c),e(ze,D6),e(ze,Ll),e(Ll,S6),e(ze,q6),e(ze,Yu),e(Yu,O6),e(ze,C6),e(ze,Zu),e(Zu,I6),e(ze,U6),p(t,Mv,c),p(t,ee,c),e(ee,N6),e(ee,Rl),e(Rl,z6),e(ee,F6),e(ee,Ku),e(Ku,L6),e(ee,R6),e(ee,Ju),e(Ju,W6),e(ee,G6),e(ee,Xu),e(Xu,M6),e(ee,j6),e(ee,Wl),e(Wl,H6),e(ee,B6),p(t,jv,c),p(t,Gl,c),e(Gl,V6),p(t,Hv,c),f(bs,t,c),p(t,Bv,c),p(t,Ml,c),e(Ml,Y6),p(t,Vv,c),f(ys,t,c),p(t,Yv,c),p(t,jl,c),e(jl,Z6),p(t,Zv,c),f(ws,t,c),p(t,Kv,c),p(t,Hl,c),e(Hl,K6),p(t,Jv,c),p(t,Bl,c),e(Bl,J6),p(t,Xv,c),f(Ts,t,c),p(t,Qv,c),p(t,Fr,c),e(Fr,X6),e(Fr,Qu),e(Qu,Q6),e(Fr,eA),p(t,e1,c),p(t,So,c),e(So,Lr),e(Lr,ef),f(Es,ef,null),e(So,tA),e(So,tf),e(tf,oA),p(t,t1,c),p(t,pe,c),e(pe,rA),e(pe,Vl),e(Vl,aA),e(pe,nA),e(pe,of),e(of,sA),e(pe,iA),e(pe,rf),e(rf,lA),e(pe,dA),e(pe,af),e(af,cA),e(pe,pA),p(t,o1,c),p(t,Pt,c),e(Pt,mA),e(Pt,$s),e($s,hA),e(Pt,uA),e(Pt,nf),e(nf,fA),e(Pt,gA),p(t,r1,c),p(t,qo,c),e(qo,Rr),e(Rr,sf),f(xs,sf,null),e(qo,_A),e(qo,lf),e(lf,vA),p(t,a1,c),p(t,Yl,c),e(Yl,bA),p(t,n1,c),p(t,Wr,c),e(Wr,yA),e(Wr,ks),e(ks,df),e(df,wA),e(Wr,TA),p(t,s1,c),f(As,t,c),p(t,i1,c),p(t,Dt,c),e(Dt,EA),e(Dt,Ps),e(Ps,cf),e(cf,$A),e(Dt,xA),e(Dt,Ds),e(Ds,pf),e(pf,kA),e(Dt,AA),p(t,l1,c),f(Ss,t,c),p(t,d1,c),f(qs,t,c),p(t,c1,c),p(t,Gr,c),e(Gr,PA),e(Gr,Zl),e(Zl,DA),e(Gr,SA),p(t,p1,c),p(t,Kl,c),e(Kl,qA),p(t,m1,c),p(t,Jl,c),e(Jl,OA),p(t,h1,c),p(t,Xl,c),e(Xl,mf),e(mf,hf),e(hf,CA),p(t,u1,c),p(t,Mr,c),e(Mr,IA),e(Mr,uf),e(uf,UA),e(Mr,NA),p(t,f1,c),p(t,Ql,c),e(Ql,zA),p(t,g1,c),f(Os,t,c),p(t,_1,c),p(t,St,c),e(St,FA),e(St,ff),e(ff,LA),e(St,RA),e(St,gf),e(gf,WA),e(St,GA),p(t,v1,c),p(t,ed,c),e(ed,MA),p(t,b1,c),f(Cs,t,c),p(t,y1,c),p(t,qt,c),e(qt,jA),e(qt,_f),e(_f,HA),e(qt,BA),e(qt,vf),e(vf,VA),e(qt,YA),p(t,w1,c),p(t,Ot,c),e(Ot,ZA),e(Ot,bf),e(bf,KA),e(Ot,JA),e(Ot,Is),e(Is,yf),e(yf,XA),e(Ot,QA),p(t,T1,c),f(Us,t,c),p(t,E1,c),p(t,td,c),e(td,e8),p(t,$1,c),f(Ns,t,c),p(t,x1,c),p(t,od,c),e(od,t8),p(t,k1,c),f(zs,t,c),p(t,A1,c),p(t,rd,c),e(rd,o8),p(t,P1,c),p(t,ad,c),e(ad,wf),e(wf,Tf),e(Tf,r8),p(t,D1,c),p(t,jr,c),e(jr,a8),e(jr,Ef),e(Ef,n8),e(jr,s8),p(t,S1,c),p(t,nd,c),e(nd,Fs),e(Fs,i8),e(Fs,$f),e($f,l8),e(Fs,d8),p(t,q1,c),f(Ls,t,c),p(t,O1,c),p(t,Rs,c),e(Rs,xf),e(xf,c8),p(t,C1,c),f(Ws,t,c),p(t,I1,c),p(t,Ct,c),e(Ct,p8),e(Ct,kf),e(kf,m8),e(Ct,h8),e(Ct,Af),e(Af,u8),e(Ct,f8),p(t,U1,c),p(t,sd,c),e(sd,g8),p(t,N1,c),f(Gs,t,c),p(t,z1,c),p(t,id,c),e(id,_8),p(t,F1,c),p(t,Hr,c),e(Hr,v8),e(Hr,Pf),e(Pf,b8),e(Hr,y8),p(t,L1,c),p(t,Oo,c),e(Oo,Br),e(Br,Df),f(Ms,Df,null),e(Oo,w8),e(Oo,Sf),e(Sf,T8),p(t,R1,c),p(t,Vr,c),e(Vr,E8),e(Vr,ld),e(ld,$8),e(Vr,x8),p(t,W1,c),p(t,me,c),e(me,k8),e(me,js),e(js,A8),e(me,P8),e(me,Hs),e(Hs,D8),e(me,S8),e(me,Bs),e(Bs,q8),e(me,O8),e(me,Vs),e(Vs,C8),e(me,I8),p(t,G1,c),p(t,Yr,c),e(Yr,U8),e(Yr,dd),e(dd,N8),e(Yr,z8),p(t,M1,c),p(t,cd,c),p(t,j1,c),p(t,Co,c),e(Co,Zr),e(Zr,qf),f(Ys,qf,null),e(Co,F8),e(Co,Of),e(Of,L8),p(t,H1,c),p(t,pd,c),e(pd,R8),p(t,B1,c),p(t,It,c),e(It,W8),e(It,Zs),e(Zs,G8),e(It,M8),e(It,Ks),e(Ks,j8),e(It,H8),p(t,V1,c),p(t,md,c),e(md,B8),p(t,Y1,c),f(Js,t,c),p(t,Z1,c),p(t,hd,c),e(hd,V8),p(t,K1,c),p(t,Ut,c),e(Ut,Y8),e(Ut,Cf),e(Cf,Z8),e(Ut,K8),e(Ut,If),e(If,J8),e(Ut,X8),p(t,J1,c),p(t,Io,c),e(Io,Kr),e(Kr,Uf),f(Xs,Uf,null),e(Io,Q8),e(Io,Nf),e(Nf,eP),p(t,X1,c),p(t,ud,c),e(ud,tP),p(t,Q1,c),p(t,Fe,c),e(Fe,oP),e(Fe,zf),e(zf,rP),e(Fe,aP),e(Fe,Ff),e(Ff,nP),e(Fe,sP),e(Fe,Lf),e(Lf,iP),e(Fe,lP),p(t,eb,c),p(t,Nt,c),e(Nt,dP),e(Nt,Rf),e(Rf,cP),e(Nt,pP),e(Nt,Wf),e(Wf,mP),e(Nt,hP),p(t,tb,c),f(Qs,t,c),p(t,ob,c),p(t,Jr,c),e(Jr,uP),e(Jr,ei),e(ei,fP),e(Jr,gP),p(t,rb,c),p(t,Uo,c),e(Uo,Xr),e(Xr,Gf),f(ti,Gf,null),e(Uo,_P),e(Uo,Mf),e(Mf,vP),p(t,ab,c),p(t,fd,c),e(fd,bP),p(t,nb,c),f(oi,t,c),p(t,sb,c),p(t,zt,c),e(zt,yP),e(zt,jf),e(jf,wP),e(zt,TP),e(zt,Hf),e(Hf,EP),e(zt,$P),p(t,ib,c),p(t,gd,c),e(gd,xP),p(t,lb,c),f(ri,t,c),p(t,db,c),p(t,_d,c),e(_d,kP),p(t,cb,c),p(t,Qr,c),e(Qr,AP),e(Qr,Bf),e(Bf,PP),e(Qr,DP),p(t,pb,c),p(t,rt,c),e(rt,Vf),e(Vf,SP),e(rt,qP),e(rt,Yf),e(Yf,OP),e(rt,CP),e(rt,Zf),e(Zf,IP),e(rt,UP),p(t,mb,c),p(t,vd,c),e(vd,NP),p(t,hb,c),f(ai,t,c),p(t,ub,c),p(t,bd,c),e(bd,zP),p(t,fb,c),p(t,Le,c),e(Le,FP),e(Le,Kf),e(Kf,LP),e(Le,RP),e(Le,Jf),e(Jf,WP),e(Le,GP),e(Le,Xf),e(Xf,MP),e(Le,jP),p(t,gb,c),p(t,No,c),e(No,ea),e(ea,Qf),f(ni,Qf,null),e(No,HP),e(No,eg),e(eg,BP),p(t,_b,c),p(t,Ft,c),e(Ft,VP),e(Ft,tg),e(tg,YP),e(Ft,ZP),e(Ft,og),e(og,KP),e(Ft,JP),p(t,vb,c),p(t,yd,c),e(yd,XP),p(t,bb,c),p(t,wd,c),e(wd,QP),p(t,yb,c),p(t,ta,c),e(ta,eD),e(ta,rg),e(rg,tD),e(ta,oD),p(t,wb,c),f(si,t,c),p(t,Tb,c),p(t,M,c),e(M,rD),e(M,ag),e(ag,aD),e(M,nD),e(M,ng),e(ng,sD),e(M,iD),e(M,sg),e(sg,lD),e(M,dD),e(M,ig),e(ig,cD),e(M,pD),e(M,lg),e(lg,mD),e(M,hD),e(M,dg),e(dg,uD),e(M,fD),p(t,Eb,c),p(t,Td,c),e(Td,gD),p(t,$b,c),p(t,zo,c),e(zo,oa),e(oa,cg),f(ii,cg,null),e(zo,_D),e(zo,pg),e(pg,vD),p(t,xb,c),f(ra,t,c),p(t,kb,c),p(t,Re,c),e(Re,bD),e(Re,li),e(li,yD),e(Re,wD),e(Re,Ed),e(Ed,TD),e(Re,ED),e(Re,di),e(di,$D),e(Re,xD),p(t,Ab,c),p(t,We,c),e(We,mg),e(mg,kD),e(We,AD),e(We,hg),e(hg,PD),e(We,DD),e(We,ug),e(ug,SD),e(We,qD),e(We,fg),e(fg,OD),p(t,Pb,c),p(t,$d,c),e($d,CD),p(t,Db,c),p(t,ci,c),e(ci,gg),e(gg,ID),e(ci,UD),p(t,Sb,c),p(t,xd,c),e(xd,ND),p(t,qb,c),f(pi,t,c),p(t,Ob,c),p(t,Lt,c),e(Lt,zD),e(Lt,_g),e(_g,FD),e(Lt,LD),e(Lt,vg),e(vg,RD),e(Lt,WD),p(t,Cb,c),f(mi,t,c),p(t,Ib,c),p(t,Rt,c),e(Rt,GD),e(Rt,bg),e(bg,MD),e(Rt,jD),e(Rt,hi),e(hi,HD),e(Rt,BD),p(t,Ub,c),p(t,aa,c),e(aa,VD),e(aa,kd),e(kd,YD),e(aa,ZD),p(t,Nb,c),p(t,Ad,c),e(Ad,KD),p(t,zb,c),p(t,ui,c),e(ui,yg),e(yg,JD),e(ui,XD),p(t,Fb,c),f(fi,t,c),p(t,Lb,c),p(t,Pd,c),e(Pd,QD),p(t,Rb,c),f(gi,t,c),p(t,Wb,c),p(t,_i,c),e(_i,wg),e(wg,eS),e(_i,tS),p(t,Gb,c),f(vi,t,c),p(t,Mb,c),p(t,Dd,c),e(Dd,oS),p(t,jb,c),f(bi,t,c),p(t,Hb,c),p(t,Sd,c),e(Sd,rS),p(t,Bb,c),p(t,na,c),e(na,aS),e(na,yi),e(yi,nS),e(na,sS),p(t,Vb,c),p(t,wi,c),e(wi,Tg),e(Tg,iS),e(wi,lS),p(t,Yb,c),p(t,Wt,c),e(Wt,dS),e(Wt,Eg),e(Eg,cS),e(Wt,pS),e(Wt,$g),e($g,mS),e(Wt,hS),p(t,Zb,c),p(t,sa,c),e(sa,uS),e(sa,xg),e(xg,fS),e(sa,gS),p(t,Kb,c),f(Ti,t,c),p(t,Jb,c),p(t,qd,c),e(qd,_S),p(t,Xb,c),p(t,Ge,c),e(Ge,kg),e(kg,vS),e(Ge,bS),e(Ge,Ag),e(Ag,yS),e(Ge,wS),e(Ge,Ei),e(Ei,TS),e(Ei,Pg),e(Pg,ES),e(Ei,$S),e(Ge,xS),e(Ge,$i),e($i,kS),e($i,Dg),e(Dg,AS),e($i,PS),p(t,Qb,c),p(t,xi,c),e(xi,at),e(at,DS),e(at,Sg),e(Sg,SS),e(at,qS),e(at,qg),e(qg,OS),e(at,CS),e(at,Og),e(Og,IS),e(at,US),p(t,ey,c),p(t,ia,c),e(ia,NS),e(ia,Cg),e(Cg,zS),e(ia,FS),p(t,ty,c),f(ki,t,c),p(t,oy,c),p(t,Fo,c),e(Fo,Ig),e(Ig,LS),e(Fo,RS),e(Fo,Ug),e(Ug,WS),e(Fo,GS),p(t,ry,c),p(t,Gt,c),e(Gt,MS),e(Gt,Ng),e(Ng,jS),e(Gt,HS),e(Gt,zg),e(zg,BS),e(Gt,VS),p(t,ay,c),p(t,Od,c),e(Od,YS),p(t,ny,c),p(t,he,c),e(he,Fg),e(Fg,ZS),e(he,KS),e(he,Lg),e(Lg,JS),e(he,XS),e(he,Ai),e(Ai,QS),e(Ai,Rg),e(Rg,eq),e(Ai,tq),e(he,oq),e(he,Lo),e(Lo,rq),e(Lo,Wg),e(Wg,aq),e(Lo,nq),e(Lo,Gg),e(Gg,sq),e(Lo,iq),e(he,lq),e(he,Mg),e(Mg,dq),p(t,sy,c),p(t,Cd,c),e(Cd,cq),p(t,iy,c),p(t,la,c),e(la,Ro),e(Ro,pq),e(Ro,jg),e(jg,mq),e(Ro,hq),e(Ro,Hg),e(Hg,uq),e(Ro,fq),e(la,gq),e(la,$e),e($e,_q),e($e,Bg),e(Bg,vq),e($e,bq),e($e,Vg),e(Vg,yq),e($e,wq),e($e,Yg),e(Yg,Tq),e($e,Eq),e($e,Zg),e(Zg,$q),e($e,xq),p(t,ly,c),p(t,Wo,c),e(Wo,da),e(da,Kg),f(Pi,Kg,null),e(Wo,kq),e(Wo,Jg),e(Jg,Aq),p(t,dy,c),p(t,ca,c),e(ca,Pq),e(ca,Di),e(Di,Dq),e(ca,Sq),p(t,cy,c),p(t,Si,c),e(Si,Xg),e(Xg,qq),e(Si,Oq),p(t,py,c),p(t,qi,c),e(qi,Qg),e(Qg,Cq),e(qi,Iq),p(t,my,c),p(t,te,c),e(te,e_),e(e_,Oi),e(Oi,Uq),e(Oi,t_),e(t_,Nq),e(Oi,zq),e(te,Fq),e(te,Ci),e(Ci,Id),e(Id,o_),e(o_,Lq),e(Id,Rq),e(Ci,Wq),e(Ci,Go),e(Go,Ii),e(Ii,Gq),e(Ii,r_),e(r_,Mq),e(Ii,jq),e(Go,Hq),e(Go,Ui),e(Ui,Bq),e(Ui,a_),e(a_,Vq),e(Ui,Yq),e(Go,Zq),e(Go,Ni),e(Ni,Kq),e(Ni,n_),e(n_,Jq),e(Ni,Xq),e(te,Qq),e(te,s_),e(s_,Mo),e(Mo,e7),e(Mo,i_),e(i_,t7),e(Mo,o7),e(Mo,l_),e(l_,r7),e(Mo,a7),e(te,n7),e(te,d_),e(d_,nt),e(nt,s7),e(nt,c_),e(c_,i7),e(nt,l7),e(nt,p_),e(p_,d7),e(nt,c7),e(nt,m_),e(m_,p7),e(nt,m7),e(te,h7),e(te,h_),e(h_,jo),e(jo,u7),e(jo,u_),e(u_,f7),e(jo,g7),e(jo,f_),e(f_,_7),e(jo,v7),e(te,b7),e(te,zi),e(zi,g_),e(g_,y7),e(zi,w7),e(zi,Fi),e(Fi,xe),e(xe,T7),e(xe,__),e(__,E7),e(xe,$7),e(xe,v_),e(v_,x7),e(xe,k7),e(xe,b_),e(b_,A7),e(xe,P7),e(xe,y_),e(y_,D7),e(xe,S7),e(Fi,q7),e(Fi,Li),e(Li,O7),e(Li,w_),e(w_,C7),e(Li,I7),p(t,hy,c),p(t,Ud,c),e(Ud,T_),e(T_,U7),p(t,uy,c),p(t,pa,c),e(pa,Ri),e(Ri,N7),e(Ri,Wi),e(Wi,z7),e(Ri,F7),e(pa,L7),e(pa,Ho),e(Ho,R7),e(Ho,Gi),e(Gi,W7),e(Ho,G7),e(Ho,E_),e(E_,M7),e(Ho,j7),p(t,fy,c),p(t,Bo,c),e(Bo,ma),e(ma,$_),f(Mi,$_,null),e(Bo,H7),e(Bo,x_),e(x_,B7),p(t,gy,c),p(t,Me,c),e(Me,V7),e(Me,k_),e(k_,Y7),e(Me,Z7),e(Me,ji),e(ji,K7),e(Me,J7),e(Me,Hi),e(Hi,X7),e(Me,Q7),p(t,_y,c),f(ha,t,c),p(t,vy,c),p(t,Nd,c),e(Nd,A_),e(A_,eO),p(t,by,c),p(t,Mt,c),e(Mt,P_),e(P_,tO),e(Mt,oO),e(Mt,D_),e(D_,rO),e(Mt,aO),e(Mt,S_),e(S_,nO),p(t,yy,c),p(t,Vo,c),e(Vo,q_),e(q_,sO),e(Vo,iO),e(Vo,Bi),e(Bi,lO),e(Vo,dO),p(t,wy,c),p(t,Yo,c),e(Yo,O_),e(O_,cO),e(Yo,pO),e(Yo,C_),e(C_,mO),e(Yo,hO),p(t,Ty,c),f(Vi,t,c),p(t,Ey,c),p(t,zd,c),e(zd,I_),e(I_,uO),p(t,$y,c),p(t,ua,c),e(ua,Yi),e(Yi,fO),e(Yi,U_),e(U_,gO),e(Yi,_O),e(ua,vO),e(ua,ke),e(ke,bO),e(ke,N_),e(N_,yO),e(ke,wO),e(ke,z_),e(z_,TO),e(ke,EO),e(ke,F_),e(F_,$O),e(ke,xO),e(ke,L_),e(L_,kO),e(ke,AO),p(t,xy,c),p(t,jt,c),e(jt,PO),e(jt,R_),e(R_,DO),e(jt,SO),e(jt,Zi),e(Zi,qO),e(jt,OO),p(t,ky,c),p(t,Fd,c),e(Fd,CO),p(t,Ay,c),p(t,w,c),e(w,IO),e(w,Ld),e(Ld,UO),e(w,W_),e(w,NO),e(w,Rd),e(Rd,zO),e(w,G_),e(w,FO),e(w,Wd),e(Wd,LO),e(w,M_),e(w,RO),e(w,Gd),e(Gd,WO),e(w,j_),e(w,GO),e(w,Md),e(Md,MO),e(w,H_),e(w,jO),e(w,jd),e(jd,HO),e(w,B_),e(w,BO),e(w,Hd),e(Hd,VO),e(w,V_),e(w,YO),e(w,Bd),e(Bd,ZO),e(w,Y_),e(w,KO),e(w,Vd),e(Vd,JO),e(w,Z_),e(w,XO),e(w,Yd),e(Yd,QO),e(w,K_),e(w,eC),e(w,Zd),e(Zd,tC),e(w,J_),e(w,oC),e(w,Kd),e(Kd,rC),e(w,X_),e(w,aC),e(w,Jd),e(Jd,nC),e(w,Q_),e(w,sC),e(w,Xd),e(Xd,iC),e(w,ev),e(w,lC),e(w,Qd),e(Qd,dC),e(w,tv),e(w,cC),e(w,ec),e(ec,pC),e(w,ov),e(w,mC),e(w,tc),e(tc,hC),e(w,rv),e(w,uC),e(w,oc),e(oc,fC),e(w,av),e(w,gC),e(w,rc),e(rc,_C),e(w,nv),e(w,vC),e(w,ac),e(ac,bC),e(w,sv),e(w,yC),e(w,nc),e(nc,wC),e(w,iv),e(w,TC),e(w,sc),e(sc,EC),e(w,lv),e(w,$C),e(w,ic),e(ic,xC),e(w,dv),e(w,kC),Py=!0},p(t,[c]){const Ki={};c&2&&(Ki.$$scope={dirty:c,ctx:t}),er.$set(Ki);const cv={};c&2&&(cv.$$scope={dirty:c,ctx:t}),pr.$set(cv);const pv={};c&2&&(pv.$$scope={dirty:c,ctx:t}),fr.$set(pv);const mv={};c&2&&(mv.$$scope={dirty:c,ctx:t}),vr.$set(mv);const Zo={};c&2&&(Zo.$$scope={dirty:c,ctx:t}),$r.$set(Zo);const hv={};c&2&&(hv.$$scope={dirty:c,ctx:t}),ra.$set(hv);const uv={};c&2&&(uv.$$scope={dirty:c,ctx:t}),ha.$set(uv)},i(t){Py||(g(A.$$.fragment,t),g(er.$$.fragment,t),g(La.$$.fragment,t),g(Ra.$$.fragment,t),g(Wa.$$.fragment,t),g(Ga.$$.fragment,t),g(ja.$$.fragment,t),g(Ba.$$.fragment,t),g(Va.$$.fragment,t),g(Ya.$$.fragment,t),g(Ka.$$.fragment,t),g(Xa.$$.fragment,t),g(Qa.$$.fragment,t),g(en.$$.fragment,t),g(on.$$.fragment,t),g(rn.$$.fragment,t),g(nn.$$.fragment,t),g(ln.$$.fragment,t),g(dn.$$.fragment,t),g(pn.$$.fragment,t),g(hn.$$.fragment,t),g(pr.$$.fragment,t),g(un.$$.fragment,t),g(gn.$$.fragment,t),g(_n.$$.fragment,t),g(bn.$$.fragment,t),g(wn.$$.fragment,t),g(fr.$$.fragment,t),g($n.$$.fragment,t),g(xn.$$.fragment,t),g(An.$$.fragment,t),g(Sn.$$.fragment,t),g(vr.$$.fragment,t),g(Cn.$$.fragment,t),g(In.$$.fragment,t),g(Un.$$.fragment,t),g(Nn.$$.fragment,t),g(Fn.$$.fragment,t),g(Wn.$$.fragment,t),g(Mn.$$.fragment,t),g(jn.$$.fragment,t),g(Bn.$$.fragment,t),g(Vn.$$.fragment,t),g(Yn.$$.fragment,t),g(Zn.$$.fragment,t),g(Kn.$$.fragment,t),g(Xn.$$.fragment,t),g($r.$$.fragment,t),g(ts.$$.fragment,t),g(os.$$.fragment,t),g(ns.$$.fragment,t),g(is.$$.fragment,t),g(ls.$$.fragment,t),g(ds.$$.fragment,t),g(ps.$$.fragment,t),g(ms.$$.fragment,t),g(hs.$$.fragment,t),g(us.$$.fragment,t),g(_s.$$.fragment,t),g(vs.$$.fragment,t),g(bs.$$.fragment,t),g(ys.$$.fragment,t),g(ws.$$.fragment,t),g(Ts.$$.fragment,t),g(Es.$$.fragment,t),g(xs.$$.fragment,t),g(As.$$.fragment,t),g(Ss.$$.fragment,t),g(qs.$$.fragment,t),g(Os.$$.fragment,t),g(Cs.$$.fragment,t),g(Us.$$.fragment,t),g(Ns.$$.fragment,t),g(zs.$$.fragment,t),g(Ls.$$.fragment,t),g(Ws.$$.fragment,t),g(Gs.$$.fragment,t),g(Ms.$$.fragment,t),g(Ys.$$.fragment,t),g(Js.$$.fragment,t),g(Xs.$$.fragment,t),g(Qs.$$.fragment,t),g(ti.$$.fragment,t),g(oi.$$.fragment,t),g(ri.$$.fragment,t),g(ai.$$.fragment,t),g(ni.$$.fragment,t),g(si.$$.fragment,t),g(ii.$$.fragment,t),g(ra.$$.fragment,t),g(pi.$$.fragment,t),g(mi.$$.fragment,t),g(fi.$$.fragment,t),g(gi.$$.fragment,t),g(vi.$$.fragment,t),g(bi.$$.fragment,t),g(Ti.$$.fragment,t),g(ki.$$.fragment,t),g(Pi.$$.fragment,t),g(Mi.$$.fragment,t),g(ha.$$.fragment,t),g(Vi.$$.fragment,t),Py=!0)},o(t){_(A.$$.fragment,t),_(er.$$.fragment,t),_(La.$$.fragment,t),_(Ra.$$.fragment,t),_(Wa.$$.fragment,t),_(Ga.$$.fragment,t),_(ja.$$.fragment,t),_(Ba.$$.fragment,t),_(Va.$$.fragment,t),_(Ya.$$.fragment,t),_(Ka.$$.fragment,t),_(Xa.$$.fragment,t),_(Qa.$$.fragment,t),_(en.$$.fragment,t),_(on.$$.fragment,t),_(rn.$$.fragment,t),_(nn.$$.fragment,t),_(ln.$$.fragment,t),_(dn.$$.fragment,t),_(pn.$$.fragment,t),_(hn.$$.fragment,t),_(pr.$$.fragment,t),_(un.$$.fragment,t),_(gn.$$.fragment,t),_(_n.$$.fragment,t),_(bn.$$.fragment,t),_(wn.$$.fragment,t),_(fr.$$.fragment,t),_($n.$$.fragment,t),_(xn.$$.fragment,t),_(An.$$.fragment,t),_(Sn.$$.fragment,t),_(vr.$$.fragment,t),_(Cn.$$.fragment,t),_(In.$$.fragment,t),_(Un.$$.fragment,t),_(Nn.$$.fragment,t),_(Fn.$$.fragment,t),_(Wn.$$.fragment,t),_(Mn.$$.fragment,t),_(jn.$$.fragment,t),_(Bn.$$.fragment,t),_(Vn.$$.fragment,t),_(Yn.$$.fragment,t),_(Zn.$$.fragment,t),_(Kn.$$.fragment,t),_(Xn.$$.fragment,t),_($r.$$.fragment,t),_(ts.$$.fragment,t),_(os.$$.fragment,t),_(ns.$$.fragment,t),_(is.$$.fragment,t),_(ls.$$.fragment,t),_(ds.$$.fragment,t),_(ps.$$.fragment,t),_(ms.$$.fragment,t),_(hs.$$.fragment,t),_(us.$$.fragment,t),_(_s.$$.fragment,t),_(vs.$$.fragment,t),_(bs.$$.fragment,t),_(ys.$$.fragment,t),_(ws.$$.fragment,t),_(Ts.$$.fragment,t),_(Es.$$.fragment,t),_(xs.$$.fragment,t),_(As.$$.fragment,t),_(Ss.$$.fragment,t),_(qs.$$.fragment,t),_(Os.$$.fragment,t),_(Cs.$$.fragment,t),_(Us.$$.fragment,t),_(Ns.$$.fragment,t),_(zs.$$.fragment,t),_(Ls.$$.fragment,t),_(Ws.$$.fragment,t),_(Gs.$$.fragment,t),_(Ms.$$.fragment,t),_(Ys.$$.fragment,t),_(Js.$$.fragment,t),_(Xs.$$.fragment,t),_(Qs.$$.fragment,t),_(ti.$$.fragment,t),_(oi.$$.fragment,t),_(ri.$$.fragment,t),_(ai.$$.fragment,t),_(ni.$$.fragment,t),_(si.$$.fragment,t),_(ii.$$.fragment,t),_(ra.$$.fragment,t),_(pi.$$.fragment,t),_(mi.$$.fragment,t),_(fi.$$.fragment,t),_(gi.$$.fragment,t),_(vi.$$.fragment,t),_(bi.$$.fragment,t),_(Ti.$$.fragment,t),_(ki.$$.fragment,t),_(Pi.$$.fragment,t),_(Mi.$$.fragment,t),_(ha.$$.fragment,t),_(Vi.$$.fragment,t),Py=!1},d(t){o(T),t&&o(D),t&&o($),v(A),t&&o(oe),t&&o(G),t&&o(z),t&&o(I),t&&o(Ke),t&&o(Pe),t&&o(K),t&&o(B),t&&o(ro),t&&o(C),t&&o(yv),v(er,t),t&&o(wv),t&&o(tr),t&&o(Tv),v(La,t),t&&o(Ev),t&&o(ct),t&&o($v),t&&o(ao),v(Ra),t&&o(xv),t&&o(b),v(Wa),v(Ga),v(ja),v(Ba),v(Va),v(Ya),v(Ka),v(Xa),v(Qa),v(en),v(on),v(rn),v(nn),v(ln),v(dn),v(pn),v(hn),v(pr),v(un),v(gn),v(_n),v(bn),v(wn),v(fr),v($n),v(xn),v(An),v(Sn),v(vr),v(Cn),v(In),v(Un),v(Nn),v(Fn),v(Wn),v(Mn),v(jn),v(Bn),v(Vn),t&&o(kv),t&&o(go),v(Yn),t&&o(Av),t&&o(tt),v(Zn),v(Kn),v(Xn),v($r),t&&o(Pv),t&&o(wo),v(ts),t&&o(Dv),t&&o(F),v(os),v(ns),v(is),v(ls),v(ds),v(ps),v(ms),t&&o(Sv),t&&o(ko),v(hs),t&&o(qv),t&&o(ot),v(us),t&&o(Ov),t&&o(Po),v(_s),t&&o(Cv),t&&o(ce),t&&o(Iv),t&&o(Or),t&&o(Uv),t&&o(Cr),t&&o(Nv),t&&o(Ue),t&&o(zv),t&&o(Ir),t&&o(Fv),t&&o(Do),v(vs),t&&o(Lv),t&&o(Ne),t&&o(Rv),t&&o(At),t&&o(Wv),t&&o(zr),t&&o(Gv),t&&o(ze),t&&o(Mv),t&&o(ee),t&&o(jv),t&&o(Gl),t&&o(Hv),v(bs,t),t&&o(Bv),t&&o(Ml),t&&o(Vv),v(ys,t),t&&o(Yv),t&&o(jl),t&&o(Zv),v(ws,t),t&&o(Kv),t&&o(Hl),t&&o(Jv),t&&o(Bl),t&&o(Xv),v(Ts,t),t&&o(Qv),t&&o(Fr),t&&o(e1),t&&o(So),v(Es),t&&o(t1),t&&o(pe),t&&o(o1),t&&o(Pt),t&&o(r1),t&&o(qo),v(xs),t&&o(a1),t&&o(Yl),t&&o(n1),t&&o(Wr),t&&o(s1),v(As,t),t&&o(i1),t&&o(Dt),t&&o(l1),v(Ss,t),t&&o(d1),v(qs,t),t&&o(c1),t&&o(Gr),t&&o(p1),t&&o(Kl),t&&o(m1),t&&o(Jl),t&&o(h1),t&&o(Xl),t&&o(u1),t&&o(Mr),t&&o(f1),t&&o(Ql),t&&o(g1),v(Os,t),t&&o(_1),t&&o(St),t&&o(v1),t&&o(ed),t&&o(b1),v(Cs,t),t&&o(y1),t&&o(qt),t&&o(w1),t&&o(Ot),t&&o(T1),v(Us,t),t&&o(E1),t&&o(td),t&&o($1),v(Ns,t),t&&o(x1),t&&o(od),t&&o(k1),v(zs,t),t&&o(A1),t&&o(rd),t&&o(P1),t&&o(ad),t&&o(D1),t&&o(jr),t&&o(S1),t&&o(nd),t&&o(q1),v(Ls,t),t&&o(O1),t&&o(Rs),t&&o(C1),v(Ws,t),t&&o(I1),t&&o(Ct),t&&o(U1),t&&o(sd),t&&o(N1),v(Gs,t),t&&o(z1),t&&o(id),t&&o(F1),t&&o(Hr),t&&o(L1),t&&o(Oo),v(Ms),t&&o(R1),t&&o(Vr),t&&o(W1),t&&o(me),t&&o(G1),t&&o(Yr),t&&o(M1),t&&o(cd),t&&o(j1),t&&o(Co),v(Ys),t&&o(H1),t&&o(pd),t&&o(B1),t&&o(It),t&&o(V1),t&&o(md),t&&o(Y1),v(Js,t),t&&o(Z1),t&&o(hd),t&&o(K1),t&&o(Ut),t&&o(J1),t&&o(Io),v(Xs),t&&o(X1),t&&o(ud),t&&o(Q1),t&&o(Fe),t&&o(eb),t&&o(Nt),t&&o(tb),v(Qs,t),t&&o(ob),t&&o(Jr),t&&o(rb),t&&o(Uo),v(ti),t&&o(ab),t&&o(fd),t&&o(nb),v(oi,t),t&&o(sb),t&&o(zt),t&&o(ib),t&&o(gd),t&&o(lb),v(ri,t),t&&o(db),t&&o(_d),t&&o(cb),t&&o(Qr),t&&o(pb),t&&o(rt),t&&o(mb),t&&o(vd),t&&o(hb),v(ai,t),t&&o(ub),t&&o(bd),t&&o(fb),t&&o(Le),t&&o(gb),t&&o(No),v(ni),t&&o(_b),t&&o(Ft),t&&o(vb),t&&o(yd),t&&o(bb),t&&o(wd),t&&o(yb),t&&o(ta),t&&o(wb),v(si,t),t&&o(Tb),t&&o(M),t&&o(Eb),t&&o(Td),t&&o($b),t&&o(zo),v(ii),t&&o(xb),v(ra,t),t&&o(kb),t&&o(Re),t&&o(Ab),t&&o(We),t&&o(Pb),t&&o($d),t&&o(Db),t&&o(ci),t&&o(Sb),t&&o(xd),t&&o(qb),v(pi,t),t&&o(Ob),t&&o(Lt),t&&o(Cb),v(mi,t),t&&o(Ib),t&&o(Rt),t&&o(Ub),t&&o(aa),t&&o(Nb),t&&o(Ad),t&&o(zb),t&&o(ui),t&&o(Fb),v(fi,t),t&&o(Lb),t&&o(Pd),t&&o(Rb),v(gi,t),t&&o(Wb),t&&o(_i),t&&o(Gb),v(vi,t),t&&o(Mb),t&&o(Dd),t&&o(jb),v(bi,t),t&&o(Hb),t&&o(Sd),t&&o(Bb),t&&o(na),t&&o(Vb),t&&o(wi),t&&o(Yb),t&&o(Wt),t&&o(Zb),t&&o(sa),t&&o(Kb),v(Ti,t),t&&o(Jb),t&&o(qd),t&&o(Xb),t&&o(Ge),t&&o(Qb),t&&o(xi),t&&o(ey),t&&o(ia),t&&o(ty),v(ki,t),t&&o(oy),t&&o(Fo),t&&o(ry),t&&o(Gt),t&&o(ay),t&&o(Od),t&&o(ny),t&&o(he),t&&o(sy),t&&o(Cd),t&&o(iy),t&&o(la),t&&o(ly),t&&o(Wo),v(Pi),t&&o(dy),t&&o(ca),t&&o(cy),t&&o(Si),t&&o(py),t&&o(qi),t&&o(my),t&&o(te),t&&o(hy),t&&o(Ud),t&&o(uy),t&&o(pa),t&&o(fy),t&&o(Bo),v(Mi),t&&o(gy),t&&o(Me),t&&o(_y),v(ha,t),t&&o(vy),t&&o(Nd),t&&o(by),t&&o(Mt),t&&o(yy),t&&o(Vo),t&&o(wy),t&&o(Yo),t&&o(Ty),v(Vi,t),t&&o(Ey),t&&o(zd),t&&o($y),t&&o(ua),t&&o(xy),t&&o(jt),t&&o(ky),t&&o(Fd),t&&o(Ay),t&&o(w)}}}const fM={local:"trainer",sections:[{local:"transformers.Trainer",title:"Trainer"},{local:"transformers.Seq2SeqTrainer",title:"Seq2SeqTrainer"},{local:"transformers.TrainingArguments",title:"TrainingArguments"},{local:"transformers.Seq2SeqTrainingArguments",title:"Seq2SeqTrainingArguments"},{local:"checkpoints",title:"Checkpoints"},{local:"logging",title:"Logging"},{local:"randomness",title:"Randomness"},{local:"specific-gpus-selection",title:"Specific GPUs Selection"},{local:"trainer-integrations",sections:[{local:"cuda-extension-installation-notes",sections:[{local:"possible-problem-1",title:"Possible problem #1"},{local:"possible-problem-2",title:"Possible problem #2"},{local:"possible-problem-3",title:"Possible problem #3"}],title:"CUDA Extension Installation Notes"},{local:"fairscale",title:"FairScale"},{local:"pytorch-fully-sharded-data-parallel",title:"PyTorch Fully Sharded Data parallel"},{local:"using-trainer-for-accelerated-pytorch-training-on-mac",title:"Using Trainer for accelerated PyTorch Training on Mac "}],title:"Trainer Integrations"}],title:"Trainer"};function gM(Z){return aM(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class EM extends eM{constructor(T){super();tM(this,T,gM,uM,oM,{})}}export{EM as default,fM as metadata};
16
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/main_classes/onnx.mdx-hf-doc-builder.js
import{S as Do,i as Mo,s as qo,e as n,k as s,w as c,t as f,M as Ao,c as o,d as t,m as i,a,x as h,h as m,b as l,G as r,g as p,y as g,L as Io,q as u,o as _,B as v,v as Lo}from"../../chunks/vendor-hf-doc-builder.js";import{D as b}from"../../chunks/Docstring-hf-doc-builder.js";import{I as Te}from"../../chunks/IconCopyLink-hf-doc-builder.js";function Wo(Wn){let C,br,P,A,ze,oe,it,He,lt,yr,I,ft,Ge,mt,dt,wr,L,pt,Fe,ct,ht,Er,N,W,Ue,ae,gt,Be,ut,kr,Se,_t,Or,O,De,vt,Me,xt,$t,qe,bt,Ae,yt,wt,Ie,Et,Le,kt,Cr,T,X,Re,se,Ot,je,Ct,Pr,$,ie,Pt,Je,Nt,Tt,V,le,Ft,Ke,St,Dt,z,fe,Mt,Qe,qt,At,H,me,It,Ye,Lt,Wt,G,de,Xt,Ze,Vt,Nr,F,U,er,pe,zt,rr,Ht,Tr,k,ce,Gt,B,he,Ut,tr,Bt,Rt,R,ge,jt,ue,Jt,nr,Kt,Qt,Fr,S,j,or,_e,Yt,ar,Zt,Sr,ve,xe,Dr,D,J,sr,$e,en,ir,rn,Mr,K,tn,lr,nn,on,qr,M,Q,fr,be,an,mr,sn,Ar,x,ye,ln,Y,we,fn,dr,mn,dn,w,Ee,pn,pr,cn,hn,cr,gn,un,q,ke,_n,hr,vn,xn,$n,gr,bn,yn,ur,wn,En,Z,Oe,kn,_r,On,Cn,ee,Ce,Pn,vr,Nn,Tn,re,Pe,Fn,xr,Sn,Dn,te,Ne,Mn,$r,qn,Ir;return oe=new Te({}),ae=new Te({}),se=new Te({}),ie=new b({props:{name:"class transformers.onnx.OnnxConfig",anchor:"transformers.onnx.OnnxConfig",parameters:[{name:"config",val:": PretrainedConfig"},{name:"task",val:": str = 'default'"},{name:"patching_specs",val:": typing.List[transformers.onnx.config.PatchingSpec] = None"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/config.py#L67"}}),le=new b({props:{name:"flatten_output_collection_property",anchor:"transformers.onnx.OnnxConfig.flatten_output_collection_property",parameters:[{name:"name",val:": str"},{name:"field",val:": typing.Iterable[typing.Any]"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/config.py#L362",returnDescription:` <p>Outputs with flattened structure and key mapping this new structure.</p> `,returnType:` <p>(Dict[str, Any])</p> `}}),fe=new b({props:{name:"from_model_config",anchor:"transformers.onnx.OnnxConfig.from_model_config",parameters:[{name:"config",val:": PretrainedConfig"},{name:"task",val:": str = 'default'"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/config.py#L124",returnDescription:` <p>OnnxConfig for this model</p> `}}),me=new b({props:{name:"generate_dummy_inputs",anchor:"transformers.onnx.OnnxConfig.generate_dummy_inputs",parameters:[{name:"preprocessor",val:": typing.Union[ForwardRef('PreTrainedTokenizerBase'), ForwardRef('FeatureExtractionMixin')]"},{name:"batch_size",val:": int = -1"},{name:"seq_length",val:": int = -1"},{name:"num_choices",val:": int = -1"},{name:"is_pair",val:": bool = False"},{name:"framework",val:": typing.Optional[transformers.utils.generic.TensorType] = None"},{name:"num_channels",val:": int = 3"},{name:"image_width",val:": int = 40"},{name:"image_height",val:": int = 40"},{name:"tokenizer",val:": PreTrainedTokenizerBase = None"}],parametersDescription:[{anchor:"transformers.onnx.OnnxConfig.generate_dummy_inputs.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The batch size to export the model for (-1 means dynamic axis).`,name:"batch_size"},{anchor:"transformers.onnx.OnnxConfig.generate_dummy_inputs.num_choices",description:`<strong>num_choices</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The number of candidate answers provided for multiple choice task (-1 means dynamic axis).`,name:"num_choices"},{anchor:"transformers.onnx.OnnxConfig.generate_dummy_inputs.seq_length",description:`<strong>seq_length</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The sequence length to export the model for (-1 means dynamic axis).`,name:"seq_length"},{anchor:"transformers.onnx.OnnxConfig.generate_dummy_inputs.is_pair",description:`<strong>is_pair</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Indicate if the input is a pair (sentence 1, sentence 2)`,name:"is_pair"},{anchor:"transformers.onnx.OnnxConfig.generate_dummy_inputs.framework",description:`<strong>framework</strong> (<code>TensorType</code>, <em>optional</em>, defaults to <code>None</code>) &#x2014; The framework (PyTorch or TensorFlow) that the tokenizer will generate tensors for.`,name:"framework"},{anchor:"transformers.onnx.OnnxConfig.generate_dummy_inputs.num_channels",description:`<strong>num_channels</strong> (<code>int</code>, <em>optional</em>, defaults to 3) &#x2014; The number of channels of the generated images.`,name:"num_channels"},{anchor:"transformers.onnx.OnnxConfig.generate_dummy_inputs.image_width",description:`<strong>image_width</strong> (<code>int</code>, <em>optional</em>, defaults to 40) &#x2014; The width of the generated images.`,name:"image_width"},{anchor:"transformers.onnx.OnnxConfig.generate_dummy_inputs.image_height",description:`<strong>image_height</strong> (<code>int</code>, <em>optional</em>, defaults to 40) &#x2014; The height of the generated images.`,name:"image_height"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/config.py#L264",returnDescription:` <p>Mapping[str, Tensor] holding the kwargs to provide to the model\u2019s forward function</p> `}}),de=new b({props:{name:"use_external_data_format",anchor:"transformers.onnx.OnnxConfig.use_external_data_format",parameters:[{name:"num_parameters",val:": int"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/config.py#L238",returnDescription:` <p>True if model.num_parameters() * size_of(float32) >= 2Gb False otherwise</p> `}}),pe=new Te({}),ce=new b({props:{name:"class transformers.onnx.OnnxConfigWithPast",anchor:"transformers.onnx.OnnxConfigWithPast",parameters:[{name:"config",val:": PretrainedConfig"},{name:"task",val:": str = 'default'"},{name:"patching_specs",val:": typing.List[transformers.onnx.config.PatchingSpec] = None"},{name:"use_past",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/config.py#L381"}}),he=new b({props:{name:"fill_with_past_key_values_",anchor:"transformers.onnx.OnnxConfigWithPast.fill_with_past_key_values_",parameters:[{name:"inputs_or_outputs",val:": typing.Mapping[str, typing.Mapping[int, str]]"},{name:"direction",val:": str"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/config.py#L489"}}),ge=new b({props:{name:"with_past",anchor:"transformers.onnx.OnnxConfigWithPast.with_past",parameters:[{name:"config",val:": PretrainedConfig"},{name:"task",val:": str = 'default'"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/config.py#L392",returnDescription:` <p>OnnxConfig with <code>.use_past = True</code></p> `}}),_e=new Te({}),xe=new b({props:{name:"class transformers.onnx.OnnxSeq2SeqConfigWithPast",anchor:"transformers.onnx.OnnxSeq2SeqConfigWithPast",parameters:[{name:"config",val:": PretrainedConfig"},{name:"task",val:": str = 'default'"},{name:"patching_specs",val:": typing.List[transformers.onnx.config.PatchingSpec] = None"},{name:"use_past",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/config.py#L522"}}),$e=new Te({}),be=new Te({}),ye=new b({props:{name:"class transformers.onnx.FeaturesManager",anchor:"transformers.onnx.FeaturesManager",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/features.py#L83"}}),we=new b({props:{name:"check_supported_model_or_raise",anchor:"transformers.onnx.FeaturesManager.check_supported_model_or_raise",parameters:[{name:"model",val:": typing.Union[ForwardRef('PreTrainedModel'), ForwardRef('TFPreTrainedModel')]"},{name:"feature",val:": str = 'default'"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/features.py#L670",returnDescription:` <p>(str) The type of the model (OnnxConfig) The OnnxConfig instance holding the model export properties.</p> `}}),Ee=new b({props:{name:"determine_framework",anchor:"transformers.onnx.FeaturesManager.determine_framework",parameters:[{name:"model",val:": str"},{name:"framework",val:": str = None"}],parametersDescription:[{anchor:"transformers.onnx.FeaturesManager.determine_framework.model",description:`<strong>model</strong> (<code>str</code>) &#x2014; The name of the model to export.`,name:"model"},{anchor:"transformers.onnx.FeaturesManager.determine_framework.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>, defaults to <code>None</code>) &#x2014; The framework to use for the export. See above for priority if none provided.`,name:"framework"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/features.py#L587",returnDescription:` <p>The framework to use for the export.</p> `}}),Oe=new b({props:{name:"get_config",anchor:"transformers.onnx.FeaturesManager.get_config",parameters:[{name:"model_type",val:": str"},{name:"feature",val:": str"}],parametersDescription:[{anchor:"transformers.onnx.FeaturesManager.get_config.model_type",description:`<strong>model_type</strong> (<code>str</code>) &#x2014; The model type to retrieve the config for.`,name:"model_type"},{anchor:"transformers.onnx.FeaturesManager.get_config.feature",description:`<strong>feature</strong> (<code>str</code>) &#x2014; The feature to retrieve the config for.`,name:"feature"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/features.py#L695",returnDescription:` <p>config for the combination</p> `,returnType:` <p><code>OnnxConfig</code></p> `}}),Ce=new b({props:{name:"get_model_class_for_feature",anchor:"transformers.onnx.FeaturesManager.get_model_class_for_feature",parameters:[{name:"feature",val:": str"},{name:"framework",val:": str = 'pt'"}],parametersDescription:[{anchor:"transformers.onnx.FeaturesManager.get_model_class_for_feature.feature",description:`<strong>feature</strong> (<code>str</code>) &#x2014; The feature required.`,name:"feature"},{anchor:"transformers.onnx.FeaturesManager.get_model_class_for_feature.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;pt&quot;</code>) &#x2014; The framework to use for the export.`,name:"framework"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/features.py#L561",returnDescription:` <p>The AutoModel class corresponding to the feature.</p> `}}),Pe=new b({props:{name:"get_model_from_feature",anchor:"transformers.onnx.FeaturesManager.get_model_from_feature",parameters:[{name:"feature",val:": str"},{name:"model",val:": str"},{name:"framework",val:": str = None"},{name:"cache_dir",val:": str = None"}],parametersDescription:[{anchor:"transformers.onnx.FeaturesManager.get_model_from_feature.feature",description:`<strong>feature</strong> (<code>str</code>) &#x2014; The feature required.`,name:"feature"},{anchor:"transformers.onnx.FeaturesManager.get_model_from_feature.model",description:`<strong>model</strong> (<code>str</code>) &#x2014; The name of the model to export.`,name:"model"},{anchor:"transformers.onnx.FeaturesManager.get_model_from_feature.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>, defaults to <code>None</code>) &#x2014; The framework to use for the export. See <code>FeaturesManager.determine_framework</code> for the priority should none be provided.`,name:"framework"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/features.py#L637",returnDescription:` <p>The instance of the model.</p> `}}),Ne=new b({props:{name:"get_supported_features_for_model_type",anchor:"transformers.onnx.FeaturesManager.get_supported_features_for_model_type",parameters:[{name:"model_type",val:": str"},{name:"model_name",val:": typing.Optional[str] = None"}],parametersDescription:[{anchor:"transformers.onnx.FeaturesManager.get_supported_features_for_model_type.model_type",description:`<strong>model_type</strong> (<code>str</code>) &#x2014; The model type to retrieve the supported features for.`,name:"model_type"},{anchor:"transformers.onnx.FeaturesManager.get_supported_features_for_model_type.model_name",description:`<strong>model_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; The name attribute of the model object, only used for the exception message.`,name:"model_name"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/features.py#L516",returnDescription:` <p>The dictionary mapping each feature to a corresponding OnnxConfig constructor.</p> `}}),{c(){C=n("meta"),br=s(),P=n("h1"),A=n("a"),ze=n("span"),c(oe.$$.fragment),it=s(),He=n("span"),lt=f("Exporting \u{1F917} Transformers models to ONNX"),yr=s(),I=n("p"),ft=f("\u{1F917} Transformers provides a "),Ge=n("code"),mt=f("transformers.onnx"),dt=f(` package that enables you to convert model checkpoints to an ONNX graph by leveraging configuration objects.`),wr=s(),L=n("p"),pt=f("See the "),Fe=n("a"),ct=f("guide"),ht=f(` on exporting \u{1F917} Transformers models for more details.`),Er=s(),N=n("h2"),W=n("a"),Ue=n("span"),c(ae.$$.fragment),gt=s(),Be=n("span"),ut=f("ONNX Configurations"),kr=s(),Se=n("p"),_t=f(`We provide three abstract classes that you should inherit from, depending on the type of model architecture you wish to export:`),Or=s(),O=n("ul"),De=n("li"),vt=f("Encoder-based models inherit from "),Me=n("a"),xt=f("OnnxConfig"),$t=s(),qe=n("li"),bt=f("Decoder-based models inherit from "),Ae=n("a"),yt=f("OnnxConfigWithPast"),wt=s(),Ie=n("li"),Et=f("Encoder-decoder models inherit from "),Le=n("a"),kt=f("OnnxSeq2SeqConfigWithPast"),Cr=s(),T=n("h3"),X=n("a"),Re=n("span"),c(se.$$.fragment),Ot=s(),je=n("span"),Ct=f("OnnxConfig"),Pr=s(),$=n("div"),c(ie.$$.fragment),Pt=s(),Je=n("p"),Nt=f("Base class for ONNX exportable model describing metadata on how to export the model through the ONNX format."),Tt=s(),V=n("div"),c(le.$$.fragment),Ft=s(),Ke=n("p"),St=f(`Flatten any potential nested structure expanding the name of the field with the index of the element within the structure.`),Dt=s(),z=n("div"),c(fe.$$.fragment),Mt=s(),Qe=n("p"),qt=f("Instantiate a OnnxConfig for a specific model"),At=s(),H=n("div"),c(me.$$.fragment),It=s(),Ye=n("p"),Lt=f("Generate inputs to provide to the ONNX exporter for the specific framework"),Wt=s(),G=n("div"),c(de.$$.fragment),Xt=s(),Ze=n("p"),Vt=f("Flag indicating if the model requires using external data format"),Nr=s(),F=n("h3"),U=n("a"),er=n("span"),c(pe.$$.fragment),zt=s(),rr=n("span"),Ht=f("OnnxConfigWithPast"),Tr=s(),k=n("div"),c(ce.$$.fragment),Gt=s(),B=n("div"),c(he.$$.fragment),Ut=s(),tr=n("p"),Bt=f("Fill the input_or_outputs mapping with past_key_values dynamic axes considering."),Rt=s(),R=n("div"),c(ge.$$.fragment),jt=s(),ue=n("p"),Jt=f("Instantiate a OnnxConfig with "),nr=n("code"),Kt=f("use_past"),Qt=f(" attribute set to True"),Fr=s(),S=n("h3"),j=n("a"),or=n("span"),c(_e.$$.fragment),Yt=s(),ar=n("span"),Zt=f("OnnxSeq2SeqConfigWithPast"),Sr=s(),ve=n("div"),c(xe.$$.fragment),Dr=s(),D=n("h2"),J=n("a"),sr=n("span"),c($e.$$.fragment),en=s(),ir=n("span"),rn=f("ONNX Features"),Mr=s(),K=n("p"),tn=f("Each ONNX configuration is associated with a set of "),lr=n("em"),nn=f("features"),on=f(` that enable you to export models for different types of topologies or tasks.`),qr=s(),M=n("h3"),Q=n("a"),fr=n("span"),c(be.$$.fragment),an=s(),mr=n("span"),sn=f("FeaturesManager"),Ar=s(),x=n("div"),c(ye.$$.fragment),ln=s(),Y=n("div"),c(we.$$.fragment),fn=s(),dr=n("p"),mn=f("Check whether or not the model has the requested features."),dn=s(),w=n("div"),c(Ee.$$.fragment),pn=s(),pr=n("p"),cn=f("Determines the framework to use for the export."),hn=s(),cr=n("p"),gn=f("The priority is in the following order:"),un=s(),q=n("ol"),ke=n("li"),_n=f("User input via "),hr=n("code"),vn=f("framework"),xn=f("."),$n=s(),gr=n("li"),bn=f("If local checkpoint is provided, use the same framework as the checkpoint."),yn=s(),ur=n("li"),wn=f("Available framework in environment, with priority given to PyTorch"),En=s(),Z=n("div"),c(Oe.$$.fragment),kn=s(),_r=n("p"),On=f("Gets the OnnxConfig for a model_type and feature combination."),Cn=s(),ee=n("div"),c(Ce.$$.fragment),Pn=s(),vr=n("p"),Nn=f("Attempts to retrieve an AutoModel class from a feature name."),Tn=s(),re=n("div"),c(Pe.$$.fragment),Fn=s(),xr=n("p"),Sn=f("Attempts to retrieve a model from a model\u2019s name and the feature to be enabled."),Dn=s(),te=n("div"),c(Ne.$$.fragment),Mn=s(),$r=n("p"),qn=f("Tries to retrieve the feature -> OnnxConfig constructor map from the model type."),this.h()},l(e){const d=Ao('[data-svelte="svelte-1phssyn"]',document.head);C=o(d,"META",{name:!0,content:!0}),d.forEach(t),br=i(e),P=o(e,"H1",{class:!0});var Lr=a(P);A=o(Lr,"A",{id:!0,class:!0,href:!0});var Xn=a(A);ze=o(Xn,"SPAN",{});var Vn=a(ze);h(oe.$$.fragment,Vn),Vn.forEach(t),Xn.forEach(t),it=i(Lr),He=o(Lr,"SPAN",{});var zn=a(He);lt=m(zn,"Exporting \u{1F917} Transformers models to ONNX"),zn.forEach(t),Lr.forEach(t),yr=i(e),I=o(e,"P",{});var Wr=a(I);ft=m(Wr,"\u{1F917} Transformers provides a "),Ge=o(Wr,"CODE",{});var Hn=a(Ge);mt=m(Hn,"transformers.onnx"),Hn.forEach(t),dt=m(Wr,` package that enables you to convert model checkpoints to an ONNX graph by leveraging configuration objects.`),Wr.forEach(t),wr=i(e),L=o(e,"P",{});var Xr=a(L);pt=m(Xr,"See the "),Fe=o(Xr,"A",{href:!0});var Gn=a(Fe);ct=m(Gn,"guide"),Gn.forEach(t),ht=m(Xr,` on exporting \u{1F917} Transformers models for more details.`),Xr.forEach(t),Er=i(e),N=o(e,"H2",{class:!0});var Vr=a(N);W=o(Vr,"A",{id:!0,class:!0,href:!0});var Un=a(W);Ue=o(Un,"SPAN",{});var Bn=a(Ue);h(ae.$$.fragment,Bn),Bn.forEach(t),Un.forEach(t),gt=i(Vr),Be=o(Vr,"SPAN",{});var Rn=a(Be);ut=m(Rn,"ONNX Configurations"),Rn.forEach(t),Vr.forEach(t),kr=i(e),Se=o(e,"P",{});var jn=a(Se);_t=m(jn,`We provide three abstract classes that you should inherit from, depending on the type of model architecture you wish to export:`),jn.forEach(t),Or=i(e),O=o(e,"UL",{});var We=a(O);De=o(We,"LI",{});var An=a(De);vt=m(An,"Encoder-based models inherit from "),Me=o(An,"A",{href:!0});var Jn=a(Me);xt=m(Jn,"OnnxConfig"),Jn.forEach(t),An.forEach(t),$t=i(We),qe=o(We,"LI",{});var In=a(qe);bt=m(In,"Decoder-based models inherit from "),Ae=o(In,"A",{href:!0});var Kn=a(Ae);yt=m(Kn,"OnnxConfigWithPast"),Kn.forEach(t),In.forEach(t),wt=i(We),Ie=o(We,"LI",{});var Ln=a(Ie);Et=m(Ln,"Encoder-decoder models inherit from "),Le=o(Ln,"A",{href:!0});var Qn=a(Le);kt=m(Qn,"OnnxSeq2SeqConfigWithPast"),Qn.forEach(t),Ln.forEach(t),We.forEach(t),Cr=i(e),T=o(e,"H3",{class:!0});var zr=a(T);X=o(zr,"A",{id:!0,class:!0,href:!0});var Yn=a(X);Re=o(Yn,"SPAN",{});var Zn=a(Re);h(se.$$.fragment,Zn),Zn.forEach(t),Yn.forEach(t),Ot=i(zr),je=o(zr,"SPAN",{});var eo=a(je);Ct=m(eo,"OnnxConfig"),eo.forEach(t),zr.forEach(t),Pr=i(e),$=o(e,"DIV",{class:!0});var E=a($);h(ie.$$.fragment,E),Pt=i(E),Je=o(E,"P",{});var ro=a(Je);Nt=m(ro,"Base class for ONNX exportable model describing metadata on how to export the model through the ONNX format."),ro.forEach(t),Tt=i(E),V=o(E,"DIV",{class:!0});var Hr=a(V);h(le.$$.fragment,Hr),Ft=i(Hr),Ke=o(Hr,"P",{});var to=a(Ke);St=m(to,`Flatten any potential nested structure expanding the name of the field with the index of the element within the structure.`),to.forEach(t),Hr.forEach(t),Dt=i(E),z=o(E,"DIV",{class:!0});var Gr=a(z);h(fe.$$.fragment,Gr),Mt=i(Gr),Qe=o(Gr,"P",{});var no=a(Qe);qt=m(no,"Instantiate a OnnxConfig for a specific model"),no.forEach(t),Gr.forEach(t),At=i(E),H=o(E,"DIV",{class:!0});var Ur=a(H);h(me.$$.fragment,Ur),It=i(Ur),Ye=o(Ur,"P",{});var oo=a(Ye);Lt=m(oo,"Generate inputs to provide to the ONNX exporter for the specific framework"),oo.forEach(t),Ur.forEach(t),Wt=i(E),G=o(E,"DIV",{class:!0});var Br=a(G);h(de.$$.fragment,Br),Xt=i(Br),Ze=o(Br,"P",{});var ao=a(Ze);Vt=m(ao,"Flag indicating if the model requires using external data format"),ao.forEach(t),Br.forEach(t),E.forEach(t),Nr=i(e),F=o(e,"H3",{class:!0});var Rr=a(F);U=o(Rr,"A",{id:!0,class:!0,href:!0});var so=a(U);er=o(so,"SPAN",{});var io=a(er);h(pe.$$.fragment,io),io.forEach(t),so.forEach(t),zt=i(Rr),rr=o(Rr,"SPAN",{});var lo=a(rr);Ht=m(lo,"OnnxConfigWithPast"),lo.forEach(t),Rr.forEach(t),Tr=i(e),k=o(e,"DIV",{class:!0});var Xe=a(k);h(ce.$$.fragment,Xe),Gt=i(Xe),B=o(Xe,"DIV",{class:!0});var jr=a(B);h(he.$$.fragment,jr),Ut=i(jr),tr=o(jr,"P",{});var fo=a(tr);Bt=m(fo,"Fill the input_or_outputs mapping with past_key_values dynamic axes considering."),fo.forEach(t),jr.forEach(t),Rt=i(Xe),R=o(Xe,"DIV",{class:!0});var Jr=a(R);h(ge.$$.fragment,Jr),jt=i(Jr),ue=o(Jr,"P",{});var Kr=a(ue);Jt=m(Kr,"Instantiate a OnnxConfig with "),nr=o(Kr,"CODE",{});var mo=a(nr);Kt=m(mo,"use_past"),mo.forEach(t),Qt=m(Kr," attribute set to True"),Kr.forEach(t),Jr.forEach(t),Xe.forEach(t),Fr=i(e),S=o(e,"H3",{class:!0});var Qr=a(S);j=o(Qr,"A",{id:!0,class:!0,href:!0});var po=a(j);or=o(po,"SPAN",{});var co=a(or);h(_e.$$.fragment,co),co.forEach(t),po.forEach(t),Yt=i(Qr),ar=o(Qr,"SPAN",{});var ho=a(ar);Zt=m(ho,"OnnxSeq2SeqConfigWithPast"),ho.forEach(t),Qr.forEach(t),Sr=i(e),ve=o(e,"DIV",{class:!0});var go=a(ve);h(xe.$$.fragment,go),go.forEach(t),Dr=i(e),D=o(e,"H2",{class:!0});var Yr=a(D);J=o(Yr,"A",{id:!0,class:!0,href:!0});var uo=a(J);sr=o(uo,"SPAN",{});var _o=a(sr);h($e.$$.fragment,_o),_o.forEach(t),uo.forEach(t),en=i(Yr),ir=o(Yr,"SPAN",{});var vo=a(ir);rn=m(vo,"ONNX Features"),vo.forEach(t),Yr.forEach(t),Mr=i(e),K=o(e,"P",{});var Zr=a(K);tn=m(Zr,"Each ONNX configuration is associated with a set of "),lr=o(Zr,"EM",{});var xo=a(lr);nn=m(xo,"features"),xo.forEach(t),on=m(Zr,` that enable you to export models for different types of topologies or tasks.`),Zr.forEach(t),qr=i(e),M=o(e,"H3",{class:!0});var et=a(M);Q=o(et,"A",{id:!0,class:!0,href:!0});var $o=a(Q);fr=o($o,"SPAN",{});var bo=a(fr);h(be.$$.fragment,bo),bo.forEach(t),$o.forEach(t),an=i(et),mr=o(et,"SPAN",{});var yo=a(mr);sn=m(yo,"FeaturesManager"),yo.forEach(t),et.forEach(t),Ar=i(e),x=o(e,"DIV",{class:!0});var y=a(x);h(ye.$$.fragment,y),ln=i(y),Y=o(y,"DIV",{class:!0});var rt=a(Y);h(we.$$.fragment,rt),fn=i(rt),dr=o(rt,"P",{});var wo=a(dr);mn=m(wo,"Check whether or not the model has the requested features."),wo.forEach(t),rt.forEach(t),dn=i(y),w=o(y,"DIV",{class:!0});var ne=a(w);h(Ee.$$.fragment,ne),pn=i(ne),pr=o(ne,"P",{});var Eo=a(pr);cn=m(Eo,"Determines the framework to use for the export."),Eo.forEach(t),hn=i(ne),cr=o(ne,"P",{});var ko=a(cr);gn=m(ko,"The priority is in the following order:"),ko.forEach(t),un=i(ne),q=o(ne,"OL",{});var Ve=a(q);ke=o(Ve,"LI",{});var tt=a(ke);_n=m(tt,"User input via "),hr=o(tt,"CODE",{});var Oo=a(hr);vn=m(Oo,"framework"),Oo.forEach(t),xn=m(tt,"."),tt.forEach(t),$n=i(Ve),gr=o(Ve,"LI",{});var Co=a(gr);bn=m(Co,"If local checkpoint is provided, use the same framework as the checkpoint."),Co.forEach(t),yn=i(Ve),ur=o(Ve,"LI",{});var Po=a(ur);wn=m(Po,"Available framework in environment, with priority given to PyTorch"),Po.forEach(t),Ve.forEach(t),ne.forEach(t),En=i(y),Z=o(y,"DIV",{class:!0});var nt=a(Z);h(Oe.$$.fragment,nt),kn=i(nt),_r=o(nt,"P",{});var No=a(_r);On=m(No,"Gets the OnnxConfig for a model_type and feature combination."),No.forEach(t),nt.forEach(t),Cn=i(y),ee=o(y,"DIV",{class:!0});var ot=a(ee);h(Ce.$$.fragment,ot),Pn=i(ot),vr=o(ot,"P",{});var To=a(vr);Nn=m(To,"Attempts to retrieve an AutoModel class from a feature name."),To.forEach(t),ot.forEach(t),Tn=i(y),re=o(y,"DIV",{class:!0});var at=a(re);h(Pe.$$.fragment,at),Fn=i(at),xr=o(at,"P",{});var Fo=a(xr);Sn=m(Fo,"Attempts to retrieve a model from a model\u2019s name and the feature to be enabled."),Fo.forEach(t),at.forEach(t),Dn=i(y),te=o(y,"DIV",{class:!0});var st=a(te);h(Ne.$$.fragment,st),Mn=i(st),$r=o(st,"P",{});var So=a($r);qn=m(So,"Tries to retrieve the feature -> OnnxConfig constructor map from the model type."),So.forEach(t),st.forEach(t),y.forEach(t),this.h()},h(){l(C,"name","hf:doc:metadata"),l(C,"content",JSON.stringify(Xo)),l(A,"id","exporting-transformers-models-to-onnx"),l(A,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(A,"href","#exporting-transformers-models-to-onnx"),l(P,"class","relative group"),l(Fe,"href","../serialization"),l(W,"id","onnx-configurations"),l(W,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(W,"href","#onnx-configurations"),l(N,"class","relative group"),l(Me,"href","/docs/transformers/pr_19429/en/main_classes/onnx#transformers.onnx.OnnxConfig"),l(Ae,"href","/docs/transformers/pr_19429/en/main_classes/onnx#transformers.onnx.OnnxConfigWithPast"),l(Le,"href","/docs/transformers/pr_19429/en/main_classes/onnx#transformers.onnx.OnnxSeq2SeqConfigWithPast"),l(X,"id","transformers.onnx.OnnxConfig"),l(X,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(X,"href","#transformers.onnx.OnnxConfig"),l(T,"class","relative group"),l(V,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(z,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(H,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(G,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l($,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(U,"id","transformers.onnx.OnnxConfigWithPast"),l(U,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(U,"href","#transformers.onnx.OnnxConfigWithPast"),l(F,"class","relative group"),l(B,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(R,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(k,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(j,"id","transformers.onnx.OnnxSeq2SeqConfigWithPast"),l(j,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(j,"href","#transformers.onnx.OnnxSeq2SeqConfigWithPast"),l(S,"class","relative group"),l(ve,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(J,"id","onnx-features"),l(J,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(J,"href","#onnx-features"),l(D,"class","relative group"),l(Q,"id","transformers.onnx.FeaturesManager"),l(Q,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Q,"href","#transformers.onnx.FeaturesManager"),l(M,"class","relative group"),l(Y,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(w,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(Z,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(ee,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(re,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(te,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(x,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,d){r(document.head,C),p(e,br,d),p(e,P,d),r(P,A),r(A,ze),g(oe,ze,null),r(P,it),r(P,He),r(He,lt),p(e,yr,d),p(e,I,d),r(I,ft),r(I,Ge),r(Ge,mt),r(I,dt),p(e,wr,d),p(e,L,d),r(L,pt),r(L,Fe),r(Fe,ct),r(L,ht),p(e,Er,d),p(e,N,d),r(N,W),r(W,Ue),g(ae,Ue,null),r(N,gt),r(N,Be),r(Be,ut),p(e,kr,d),p(e,Se,d),r(Se,_t),p(e,Or,d),p(e,O,d),r(O,De),r(De,vt),r(De,Me),r(Me,xt),r(O,$t),r(O,qe),r(qe,bt),r(qe,Ae),r(Ae,yt),r(O,wt),r(O,Ie),r(Ie,Et),r(Ie,Le),r(Le,kt),p(e,Cr,d),p(e,T,d),r(T,X),r(X,Re),g(se,Re,null),r(T,Ot),r(T,je),r(je,Ct),p(e,Pr,d),p(e,$,d),g(ie,$,null),r($,Pt),r($,Je),r(Je,Nt),r($,Tt),r($,V),g(le,V,null),r(V,Ft),r(V,Ke),r(Ke,St),r($,Dt),r($,z),g(fe,z,null),r(z,Mt),r(z,Qe),r(Qe,qt),r($,At),r($,H),g(me,H,null),r(H,It),r(H,Ye),r(Ye,Lt),r($,Wt),r($,G),g(de,G,null),r(G,Xt),r(G,Ze),r(Ze,Vt),p(e,Nr,d),p(e,F,d),r(F,U),r(U,er),g(pe,er,null),r(F,zt),r(F,rr),r(rr,Ht),p(e,Tr,d),p(e,k,d),g(ce,k,null),r(k,Gt),r(k,B),g(he,B,null),r(B,Ut),r(B,tr),r(tr,Bt),r(k,Rt),r(k,R),g(ge,R,null),r(R,jt),r(R,ue),r(ue,Jt),r(ue,nr),r(nr,Kt),r(ue,Qt),p(e,Fr,d),p(e,S,d),r(S,j),r(j,or),g(_e,or,null),r(S,Yt),r(S,ar),r(ar,Zt),p(e,Sr,d),p(e,ve,d),g(xe,ve,null),p(e,Dr,d),p(e,D,d),r(D,J),r(J,sr),g($e,sr,null),r(D,en),r(D,ir),r(ir,rn),p(e,Mr,d),p(e,K,d),r(K,tn),r(K,lr),r(lr,nn),r(K,on),p(e,qr,d),p(e,M,d),r(M,Q),r(Q,fr),g(be,fr,null),r(M,an),r(M,mr),r(mr,sn),p(e,Ar,d),p(e,x,d),g(ye,x,null),r(x,ln),r(x,Y),g(we,Y,null),r(Y,fn),r(Y,dr),r(dr,mn),r(x,dn),r(x,w),g(Ee,w,null),r(w,pn),r(w,pr),r(pr,cn),r(w,hn),r(w,cr),r(cr,gn),r(w,un),r(w,q),r(q,ke),r(ke,_n),r(ke,hr),r(hr,vn),r(ke,xn),r(q,$n),r(q,gr),r(gr,bn),r(q,yn),r(q,ur),r(ur,wn),r(x,En),r(x,Z),g(Oe,Z,null),r(Z,kn),r(Z,_r),r(_r,On),r(x,Cn),r(x,ee),g(Ce,ee,null),r(ee,Pn),r(ee,vr),r(vr,Nn),r(x,Tn),r(x,re),g(Pe,re,null),r(re,Fn),r(re,xr),r(xr,Sn),r(x,Dn),r(x,te),g(Ne,te,null),r(te,Mn),r(te,$r),r($r,qn),Ir=!0},p:Io,i(e){Ir||(u(oe.$$.fragment,e),u(ae.$$.fragment,e),u(se.$$.fragment,e),u(ie.$$.fragment,e),u(le.$$.fragment,e),u(fe.$$.fragment,e),u(me.$$.fragment,e),u(de.$$.fragment,e),u(pe.$$.fragment,e),u(ce.$$.fragment,e),u(he.$$.fragment,e),u(ge.$$.fragment,e),u(_e.$$.fragment,e),u(xe.$$.fragment,e),u($e.$$.fragment,e),u(be.$$.fragment,e),u(ye.$$.fragment,e),u(we.$$.fragment,e),u(Ee.$$.fragment,e),u(Oe.$$.fragment,e),u(Ce.$$.fragment,e),u(Pe.$$.fragment,e),u(Ne.$$.fragment,e),Ir=!0)},o(e){_(oe.$$.fragment,e),_(ae.$$.fragment,e),_(se.$$.fragment,e),_(ie.$$.fragment,e),_(le.$$.fragment,e),_(fe.$$.fragment,e),_(me.$$.fragment,e),_(de.$$.fragment,e),_(pe.$$.fragment,e),_(ce.$$.fragment,e),_(he.$$.fragment,e),_(ge.$$.fragment,e),_(_e.$$.fragment,e),_(xe.$$.fragment,e),_($e.$$.fragment,e),_(be.$$.fragment,e),_(ye.$$.fragment,e),_(we.$$.fragment,e),_(Ee.$$.fragment,e),_(Oe.$$.fragment,e),_(Ce.$$.fragment,e),_(Pe.$$.fragment,e),_(Ne.$$.fragment,e),Ir=!1},d(e){t(C),e&&t(br),e&&t(P),v(oe),e&&t(yr),e&&t(I),e&&t(wr),e&&t(L),e&&t(Er),e&&t(N),v(ae),e&&t(kr),e&&t(Se),e&&t(Or),e&&t(O),e&&t(Cr),e&&t(T),v(se),e&&t(Pr),e&&t($),v(ie),v(le),v(fe),v(me),v(de),e&&t(Nr),e&&t(F),v(pe),e&&t(Tr),e&&t(k),v(ce),v(he),v(ge),e&&t(Fr),e&&t(S),v(_e),e&&t(Sr),e&&t(ve),v(xe),e&&t(Dr),e&&t(D),v($e),e&&t(Mr),e&&t(K),e&&t(qr),e&&t(M),v(be),e&&t(Ar),e&&t(x),v(ye),v(we),v(Ee),v(Oe),v(Ce),v(Pe),v(Ne)}}}const Xo={local:"exporting-transformers-models-to-onnx",sections:[{local:"onnx-configurations",sections:[{local:"transformers.onnx.OnnxConfig",title:"OnnxConfig"},{local:"transformers.onnx.OnnxConfigWithPast",title:"OnnxConfigWithPast"},{local:"transformers.onnx.OnnxSeq2SeqConfigWithPast",title:"OnnxSeq2SeqConfigWithPast"}],title:"ONNX Configurations"},{local:"onnx-features",sections:[{local:"transformers.onnx.FeaturesManager",title:"FeaturesManager"}],title:"ONNX Features"}],title:"Exporting \u{1F917} Transformers models to ONNX"};function Vo(Wn){return Lo(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Uo extends Do{constructor(C){super();Mo(this,C,Vo,Wo,qo,{})}}export{Uo as default,Xo as metadata};
17
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/main_classes/optimizer_schedules.mdx-hf-doc-builder.js
import{S as Fi,i as Oi,s as Ri,e as a,k as l,w as g,t as s,M as ji,c as n,d as r,m,a as o,x as _,h as i,b as c,N as Ca,G as t,g as h,y as w,q as v,o as b,B as y,v as qi,L as Oa}from"../../chunks/vendor-hf-doc-builder.js";import{D as L}from"../../chunks/Docstring-hf-doc-builder.js";import{C as Ra}from"../../chunks/CodeBlock-hf-doc-builder.js";import{I as ne}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{E as Fa}from"../../chunks/ExampleCodeBlock-hf-doc-builder.js";function Ui(C){let f,T,x,u,z;return u=new Ra({props:{code:"Adafactor(model.parameters(), scale_parameter=False, relative_step=False, warmup_init=False, lr=1e-3)",highlighted:'Adafactor(model.parameters(), scale_parameter=<span class="hljs-literal">False</span>, relative_step=<span class="hljs-literal">False</span>, warmup_init=<span class="hljs-literal">False</span>, lr=<span class="hljs-number">1e-3</span>)'}}),{c(){f=a("p"),T=s("Example:"),x=l(),g(u.$$.fragment)},l(p){f=n(p,"P",{});var $=o(f);T=i($,"Example:"),$.forEach(r),x=m(p),_(u.$$.fragment,p)},m(p,$){h(p,f,$),t(f,T),h(p,x,$),w(u,p,$),z=!0},p:Oa,i(p){z||(v(u.$$.fragment,p),z=!0)},o(p){b(u.$$.fragment,p),z=!1},d(p){p&&r(f),p&&r(x),y(u,p)}}}function Gi(C){let f,T,x,u,z;return u=new Ra({props:{code:"Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None)",highlighted:'Adafactor(model.parameters(), scale_parameter=<span class="hljs-literal">True</span>, relative_step=<span class="hljs-literal">True</span>, warmup_init=<span class="hljs-literal">True</span>, lr=<span class="hljs-literal">None</span>)'}}),{c(){f=a("p"),T=s("Others reported the following combination to work well:"),x=l(),g(u.$$.fragment)},l(p){f=n(p,"P",{});var $=o(f);T=i($,"Others reported the following combination to work well:"),$.forEach(r),x=m(p),_(u.$$.fragment,p)},m(p,$){h(p,f,$),t(f,T),h(p,x,$),w(u,p,$),z=!0},p:Oa,i(p){z||(v(u.$$.fragment,p),z=!0)},o(p){b(u.$$.fragment,p),z=!1},d(p){p&&r(f),p&&r(x),y(u,p)}}}function Vi(C){let f,T,x,u,z;return u=new Ra({props:{code:`from transformers.optimization import Adafactor, AdafactorSchedule optimizer = Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None) lr_scheduler = AdafactorSchedule(optimizer) trainer = Trainer(..., optimizers=(optimizer, lr_scheduler))`,highlighted:`<span class="hljs-keyword">from</span> transformers.optimization <span class="hljs-keyword">import</span> Adafactor, AdafactorSchedule optimizer = Adafactor(model.parameters(), scale_parameter=<span class="hljs-literal">True</span>, relative_step=<span class="hljs-literal">True</span>, warmup_init=<span class="hljs-literal">True</span>, lr=<span class="hljs-literal">None</span>) lr_scheduler = AdafactorSchedule(optimizer) trainer = Trainer(..., optimizers=(optimizer, lr_scheduler))`}}),{c(){f=a("p"),T=s("scheduler as following:"),x=l(),g(u.$$.fragment)},l(p){f=n(p,"P",{});var $=o(f);T=i($,"scheduler as following:"),$.forEach(r),x=m(p),_(u.$$.fragment,p)},m(p,$){h(p,f,$),t(f,T),h(p,x,$),w(u,p,$),z=!0},p:Oa,i(p){z||(v(u.$$.fragment,p),z=!0)},o(p){b(u.$$.fragment,p),z=!1},d(p){p&&r(f),p&&r(x),y(u,p)}}}function Mi(C){let f,T,x,u,z;return u=new Ra({props:{code:`# replace AdamW with Adafactor optimizer = Adafactor( model.parameters(), lr=1e-3, eps=(1e-30, 1e-3), clip_threshold=1.0, decay_rate=-0.8, beta1=None, weight_decay=0.0, relative_step=False, scale_parameter=False, warmup_init=False, )`,highlighted:`<span class="hljs-comment"># replace AdamW with Adafactor</span> optimizer = Adafactor( model.parameters(), lr=<span class="hljs-number">1e-3</span>, eps=(<span class="hljs-number">1e-30</span>, <span class="hljs-number">1e-3</span>), clip_threshold=<span class="hljs-number">1.0</span>, decay_rate=-<span class="hljs-number">0.8</span>, beta1=<span class="hljs-literal">None</span>, weight_decay=<span class="hljs-number">0.0</span>, relative_step=<span class="hljs-literal">False</span>, scale_parameter=<span class="hljs-literal">False</span>, warmup_init=<span class="hljs-literal">False</span>, )`}}),{c(){f=a("p"),T=s("Usage:"),x=l(),g(u.$$.fragment)},l(p){f=n(p,"P",{});var $=o(f);T=i($,"Usage:"),$.forEach(r),x=m(p),_(u.$$.fragment,p)},m(p,$){h(p,f,$),t(f,T),h(p,x,$),w(u,p,$),z=!0},p:Oa,i(p){z||(v(u.$$.fragment,p),z=!0)},o(p){b(u.$$.fragment,p),z=!1},d(p){p&&r(f),p&&r(x),y(u,p)}}}function Hi(C){let f,T,x,u,z,p,$,zt,ja,kr,oe,qa,Et,Ua,Ga,Sr,F,Tt,Va,Ma,ze,Ha,Dt,Ba,Ja,Ka,Lt,Qa,Nr,R,se,Pt,Ee,Xa,Wt,Ya,Ir,k,Te,Za,De,en,Le,tn,rn,an,ie,Pe,nn,kt,on,Cr,j,le,St,We,sn,Nt,ln,Fr,A,ke,mn,_t,cn,Se,pn,dn,D,hn,It,un,fn,Ne,gn,_n,Ct,wn,vn,Ft,bn,yn,Ot,$n,xn,Rt,An,zn,jt,En,Tn,Dn,qt,Ln,Pn,Ie,Wn,Ce,kn,Sn,Nn,S,Fe,Ut,In,Cn,Oe,Gt,Fn,On,Re,Rn,je,jn,qn,Un,Vt,Mt,Gn,Vn,Ht,Bt,Mn,Hn,Jt,Kt,Bn,Jn,me,Kn,ce,Qn,O,Xn,Qt,Yn,Zn,wt,eo,to,Xt,ro,ao,pe,no,de,oo,he,qe,so,Yt,io,Or,q,ue,Zt,Ue,lo,er,mo,Rr,W,Ge,co,U,po,tr,ho,uo,Ve,fo,go,_o,rr,wo,vo,fe,Me,bo,ar,yo,jr,G,He,$o,nr,xo,qr,V,ge,or,Be,Ao,sr,zo,Ur,M,_e,ir,Je,Eo,lr,To,Gr,H,Ke,Do,mr,Lo,Vr,B,Qe,Po,cr,Wo,Mr,J,Xe,ko,pr,So,Hr,K,Ye,No,dr,Io,Br,Ze,fs,Jr,Q,et,Co,hr,Fo,Kr,tt,gs,Qr,X,rt,Oo,ur,Ro,Xr,at,_s,Yr,Y,nt,jo,fr,qo,Zr,ot,ws,ea,N,st,Uo,it,Go,gr,Vo,Mo,Ho,we,Bo,_r,Jo,Ko,lt,Qo,ta,Z,ve,wr,mt,Xo,vr,Yo,ra,ee,ct,Zo,br,es,aa,te,be,yr,pt,ts,$r,rs,na,re,ye,xr,dt,as,Ar,ns,oa,I,ht,os,ae,ss,zr,is,ls,Er,ms,cs,ps,$e,ut,ds,Tr,hs,sa;return p=new ne({}),Ee=new ne({}),Te=new L({props:{name:"class transformers.AdamW",anchor:"transformers.AdamW",parameters:[{name:"params",val:": typing.Iterable[torch.nn.parameter.Parameter]"},{name:"lr",val:": float = 0.001"},{name:"betas",val:": typing.Tuple[float, float] = (0.9, 0.999)"},{name:"eps",val:": float = 1e-06"},{name:"weight_decay",val:": float = 0.0"},{name:"correct_bias",val:": bool = True"},{name:"no_deprecation_warning",val:": bool = False"}],parametersDescription:[{anchor:"transformers.AdamW.params",description:`<strong>params</strong> (<code>Iterable[nn.parameter.Parameter]</code>) &#x2014; Iterable of parameters to optimize or dictionaries defining parameter groups.`,name:"params"},{anchor:"transformers.AdamW.lr",description:`<strong>lr</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-3) &#x2014; The learning rate to use.`,name:"lr"},{anchor:"transformers.AdamW.betas",description:`<strong>betas</strong> (<code>Tuple[float,float]</code>, <em>optional</em>, defaults to (0.9, 0.999)) &#x2014; Adam&#x2019;s betas parameters (b1, b2).`,name:"betas"},{anchor:"transformers.AdamW.eps",description:`<strong>eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-6) &#x2014; Adam&#x2019;s epsilon for numerical stability.`,name:"eps"},{anchor:"transformers.AdamW.weight_decay",description:`<strong>weight_decay</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; Decoupled weight decay to apply.`,name:"weight_decay"},{anchor:"transformers.AdamW.correct_bias",description:`<strong>correct_bias</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to correct bias in Adam (for instance, in Bert TF repository they use <code>False</code>).`,name:"correct_bias"},{anchor:"transformers.AdamW.no_deprecation_warning",description:`<strong>no_deprecation_warning</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; A flag used to disable the deprecation warning (set to <code>True</code> to disable the warning).`,name:"no_deprecation_warning"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization.py#L273"}}),Pe=new L({props:{name:"step",anchor:"transformers.AdamW.step",parameters:[{name:"closure",val:": typing.Callable = None"}],parametersDescription:[{anchor:"transformers.AdamW.step.closure",description:"<strong>closure</strong> (<code>Callable</code>, <em>optional</em>) &#x2014; A closure that reevaluates the model and returns the loss.",name:"closure"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization.py#L324"}}),We=new ne({}),ke=new L({props:{name:"class transformers.Adafactor",anchor:"transformers.Adafactor",parameters:[{name:"params",val:""},{name:"lr",val:" = None"},{name:"eps",val:" = (1e-30, 0.001)"},{name:"clip_threshold",val:" = 1.0"},{name:"decay_rate",val:" = -0.8"},{name:"beta1",val:" = None"},{name:"weight_decay",val:" = 0.0"},{name:"scale_parameter",val:" = True"},{name:"relative_step",val:" = True"},{name:"warmup_init",val:" = False"}],parametersDescription:[{anchor:"transformers.Adafactor.params",description:`<strong>params</strong> (<code>Iterable[nn.parameter.Parameter]</code>) &#x2014; Iterable of parameters to optimize or dictionaries defining parameter groups.`,name:"params"},{anchor:"transformers.Adafactor.lr",description:`<strong>lr</strong> (<code>float</code>, <em>optional</em>) &#x2014; The external learning rate.`,name:"lr"},{anchor:"transformers.Adafactor.eps",description:`<strong>eps</strong> (<code>Tuple[float, float]</code>, <em>optional</em>, defaults to (1e-30, 1e-3)) &#x2014; Regularization constants for square gradient and parameter scale respectively`,name:"eps"},{anchor:"transformers.Adafactor.clip_threshold",description:`<strong>clip_threshold</strong> (<code>float</code>, <em>optional</em>, defaults 1.0) &#x2014; Threshold of root mean square of final gradient update`,name:"clip_threshold"},{anchor:"transformers.Adafactor.decay_rate",description:`<strong>decay_rate</strong> (<code>float</code>, <em>optional</em>, defaults to -0.8) &#x2014; Coefficient used to compute running averages of square`,name:"decay_rate"},{anchor:"transformers.Adafactor.beta1",description:`<strong>beta1</strong> (<code>float</code>, <em>optional</em>) &#x2014; Coefficient used for computing running averages of gradient`,name:"beta1"},{anchor:"transformers.Adafactor.weight_decay",description:`<strong>weight_decay</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; Weight decay (L2 penalty)`,name:"weight_decay"},{anchor:"transformers.Adafactor.scale_parameter",description:`<strong>scale_parameter</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If True, learning rate is scaled by root mean square`,name:"scale_parameter"},{anchor:"transformers.Adafactor.relative_step",description:`<strong>relative_step</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If True, time-dependent learning rate is computed instead of external learning rate`,name:"relative_step"},{anchor:"transformers.Adafactor.warmup_init",description:`<strong>warmup_init</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Time-dependent learning rate computation depends on whether warm-up initialization is being used`,name:"warmup_init"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization.py#L386"}}),me=new Fa({props:{anchor:"transformers.Adafactor.example",$$slots:{default:[Ui]},$$scope:{ctx:C}}}),ce=new Fa({props:{anchor:"transformers.Adafactor.example-2",$$slots:{default:[Gi]},$$scope:{ctx:C}}}),pe=new Fa({props:{anchor:"transformers.Adafactor.example-3",$$slots:{default:[Vi]},$$scope:{ctx:C}}}),de=new Fa({props:{anchor:"transformers.Adafactor.example-4",$$slots:{default:[Mi]},$$scope:{ctx:C}}}),qe=new L({props:{name:"step",anchor:"transformers.Adafactor.step",parameters:[{name:"closure",val:" = None"}],parametersDescription:[{anchor:"transformers.Adafactor.step.closure",description:`<strong>closure</strong> (callable, optional) &#x2014; A closure that reevaluates the model and returns the loss.`,name:"closure"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization.py#L532"}}),Ue=new ne({}),Ge=new L({props:{name:"class transformers.AdamWeightDecay",anchor:"transformers.AdamWeightDecay",parameters:[{name:"learning_rate",val:": typing.Union[float, keras.optimizers.schedules.learning_rate_schedule.LearningRateSchedule] = 0.001"},{name:"beta_1",val:": float = 0.9"},{name:"beta_2",val:": float = 0.999"},{name:"epsilon",val:": float = 1e-07"},{name:"amsgrad",val:": bool = False"},{name:"weight_decay_rate",val:": float = 0.0"},{name:"include_in_weight_decay",val:": typing.Optional[typing.List[str]] = None"},{name:"exclude_from_weight_decay",val:": typing.Optional[typing.List[str]] = None"},{name:"name",val:": str = 'AdamWeightDecay'"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.AdamWeightDecay.learning_rate",description:`<strong>learning_rate</strong> (<code>Union[float, tf.keras.optimizers.schedules.LearningRateSchedule]</code>, <em>optional</em>, defaults to 1e-3) &#x2014; The learning rate to use or a schedule.`,name:"learning_rate"},{anchor:"transformers.AdamWeightDecay.beta_1",description:`<strong>beta_1</strong> (<code>float</code>, <em>optional</em>, defaults to 0.9) &#x2014; The beta1 parameter in Adam, which is the exponential decay rate for the 1st momentum estimates.`,name:"beta_1"},{anchor:"transformers.AdamWeightDecay.beta_2",description:`<strong>beta_2</strong> (<code>float</code>, <em>optional</em>, defaults to 0.999) &#x2014; The beta2 parameter in Adam, which is the exponential decay rate for the 2nd momentum estimates.`,name:"beta_2"},{anchor:"transformers.AdamWeightDecay.epsilon",description:`<strong>epsilon</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-7) &#x2014; The epsilon parameter in Adam, which is a small constant for numerical stability.`,name:"epsilon"},{anchor:"transformers.AdamWeightDecay.amsgrad",description:`<strong>amsgrad</strong> (<code>bool</code>, <em>optional</em>, default to <code>False</code>) &#x2014; Whether to apply AMSGrad variant of this algorithm or not, see <a href="https://arxiv.org/abs/1904.09237" rel="nofollow">On the Convergence of Adam and Beyond</a>.`,name:"amsgrad"},{anchor:"transformers.AdamWeightDecay.weight_decay_rate",description:`<strong>weight_decay_rate</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The weight decay to apply.`,name:"weight_decay_rate"},{anchor:"transformers.AdamWeightDecay.include_in_weight_decay",description:`<strong>include_in_weight_decay</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; List of the parameter names (or re patterns) to apply weight decay to. If none is passed, weight decay is applied to all parameters by default (unless they are in <code>exclude_from_weight_decay</code>).`,name:"include_in_weight_decay"},{anchor:"transformers.AdamWeightDecay.exclude_from_weight_decay",description:`<strong>exclude_from_weight_decay</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; List of the parameter names (or re patterns) to exclude from applying weight decay to. If a <code>include_in_weight_decay</code> is passed, the names in it will supersede this list.`,name:"exclude_from_weight_decay"},{anchor:"transformers.AdamWeightDecay.name",description:`<strong>name</strong> (<code>str</code>, <em>optional</em>, defaults to &#x2018;AdamWeightDecay&#x2019;) &#x2014; Optional name for the operations created when applying gradients. kwargs &#x2014; Keyword arguments. Allowed to be {<code>clipnorm</code>, <code>clipvalue</code>, <code>lr</code>, <code>decay</code>}. <code>clipnorm</code> is clip gradients by norm; <code>clipvalue</code> is clip gradients by value, <code>decay</code> is included for backward compatibility to allow time inverse decay of learning rate. <code>lr</code> is included for backward compatibility, recommended to use <code>learning_rate</code> instead.`,name:"name"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization_tf.py#L166"}}),Me=new L({props:{name:"from_config",anchor:"transformers.AdamWeightDecay.from_config",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization_tf.py#L223"}}),He=new L({props:{name:"transformers.create_optimizer",anchor:"transformers.create_optimizer",parameters:[{name:"init_lr",val:": float"},{name:"num_train_steps",val:": int"},{name:"num_warmup_steps",val:": int"},{name:"min_lr_ratio",val:": float = 0.0"},{name:"adam_beta1",val:": float = 0.9"},{name:"adam_beta2",val:": float = 0.999"},{name:"adam_epsilon",val:": float = 1e-08"},{name:"adam_clipnorm",val:": typing.Optional[float] = None"},{name:"adam_global_clipnorm",val:": typing.Optional[float] = None"},{name:"weight_decay_rate",val:": float = 0.0"},{name:"power",val:": float = 1.0"},{name:"include_in_weight_decay",val:": typing.Optional[typing.List[str]] = None"}],parametersDescription:[{anchor:"transformers.create_optimizer.init_lr",description:`<strong>init_lr</strong> (<code>float</code>) &#x2014; The desired learning rate at the end of the warmup phase.`,name:"init_lr"},{anchor:"transformers.create_optimizer.num_train_steps",description:`<strong>num_train_steps</strong> (<code>int</code>) &#x2014; The total number of training steps.`,name:"num_train_steps"},{anchor:"transformers.create_optimizer.num_warmup_steps",description:`<strong>num_warmup_steps</strong> (<code>int</code>) &#x2014; The number of warmup steps.`,name:"num_warmup_steps"},{anchor:"transformers.create_optimizer.min_lr_ratio",description:`<strong>min_lr_ratio</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The final learning rate at the end of the linear decay will be <code>init_lr * min_lr_ratio</code>.`,name:"min_lr_ratio"},{anchor:"transformers.create_optimizer.adam_beta1",description:`<strong>adam_beta1</strong> (<code>float</code>, <em>optional</em>, defaults to 0.9) &#x2014; The beta1 to use in Adam.`,name:"adam_beta1"},{anchor:"transformers.create_optimizer.adam_beta2",description:`<strong>adam_beta2</strong> (<code>float</code>, <em>optional</em>, defaults to 0.999) &#x2014; The beta2 to use in Adam.`,name:"adam_beta2"},{anchor:"transformers.create_optimizer.adam_epsilon",description:`<strong>adam_epsilon</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-8) &#x2014; The epsilon to use in Adam. adam_clipnorm &#x2014; (<code>float</code>, <em>optional</em>, defaults to <code>None</code>): If not <code>None</code>, clip the gradient norm for each weight tensor to this value. adam_global_clipnorm &#x2014; (<code>float</code>, <em>optional</em>, defaults to <code>None</code>) If not <code>None</code>, clip gradient norm to this value. When using this argument, the norm is computed over all weight tensors, as if they were concatenated into a single vector.`,name:"adam_epsilon"},{anchor:"transformers.create_optimizer.weight_decay_rate",description:`<strong>weight_decay_rate</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The weight decay to use.`,name:"weight_decay_rate"},{anchor:"transformers.create_optimizer.power",description:`<strong>power</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; The power to use for PolynomialDecay.`,name:"power"},{anchor:"transformers.create_optimizer.include_in_weight_decay",description:`<strong>include_in_weight_decay</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; List of the parameter names (or re patterns) to apply weight decay to. If none is passed, weight decay is applied to all parameters except bias and layer norm parameters.`,name:"include_in_weight_decay"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization_tf.py#L82"}}),Be=new ne({}),Je=new ne({}),Ke=new L({props:{name:"class transformers.SchedulerType",anchor:"transformers.SchedulerType",parameters:[{name:"value",val:""},{name:"names",val:" = None"},{name:"module",val:" = None"},{name:"qualname",val:" = None"},{name:"type",val:" = None"},{name:"start",val:" = 1"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_utils.py#L355"}}),Qe=new L({props:{name:"transformers.get_scheduler",anchor:"transformers.get_scheduler",parameters:[{name:"name",val:": typing.Union[str, transformers.trainer_utils.SchedulerType]"},{name:"optimizer",val:": Optimizer"},{name:"num_warmup_steps",val:": typing.Optional[int] = None"},{name:"num_training_steps",val:": typing.Optional[int] = None"}],parametersDescription:[{anchor:"transformers.get_scheduler.name",description:`<strong>name</strong> (<code>str</code> or <code>SchedulerType</code>) &#x2014; The name of the scheduler to use.`,name:"name"},{anchor:"transformers.get_scheduler.optimizer",description:`<strong>optimizer</strong> (<code>torch.optim.Optimizer</code>) &#x2014; The optimizer that will be used during training.`,name:"optimizer"},{anchor:"transformers.get_scheduler.num_warmup_steps",description:`<strong>num_warmup_steps</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of warmup steps to do. This is not required by all schedulers (hence the argument being optional), the function will raise an error if it&#x2019;s unset and the scheduler type requires it.`,name:"num_warmup_steps"},{anchor:"transformers.get_scheduler.num_training_steps",description:`<strong>num_training_steps</strong> (\`int&#x201C;, <em>optional</em>) &#x2014; The number of training steps to do. This is not required by all schedulers (hence the argument being optional), the function will raise an error if it&#x2019;s unset and the scheduler type requires it.`,name:"num_training_steps"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization.py#L233"}}),Xe=new L({props:{name:"transformers.get_constant_schedule",anchor:"transformers.get_constant_schedule",parameters:[{name:"optimizer",val:": Optimizer"},{name:"last_epoch",val:": int = -1"}],parametersDescription:[{anchor:"transformers.get_constant_schedule.optimizer",description:`<strong>optimizer</strong> (<code>~torch.optim.Optimizer</code>) &#x2014; The optimizer for which to schedule the learning rate.`,name:"optimizer"},{anchor:"transformers.get_constant_schedule.last_epoch",description:`<strong>last_epoch</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The index of the last epoch when resuming training.`,name:"last_epoch"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization.py#L34",returnDescription:` <p><code>torch.optim.lr_scheduler.LambdaLR</code> with the appropriate schedule.</p> `}}),Ye=new L({props:{name:"transformers.get_constant_schedule_with_warmup",anchor:"transformers.get_constant_schedule_with_warmup",parameters:[{name:"optimizer",val:": Optimizer"},{name:"num_warmup_steps",val:": int"},{name:"last_epoch",val:": int = -1"}],parametersDescription:[{anchor:"transformers.get_constant_schedule_with_warmup.optimizer",description:`<strong>optimizer</strong> (<code>~torch.optim.Optimizer</code>) &#x2014; The optimizer for which to schedule the learning rate.`,name:"optimizer"},{anchor:"transformers.get_constant_schedule_with_warmup.num_warmup_steps",description:`<strong>num_warmup_steps</strong> (<code>int</code>) &#x2014; The number of steps for the warmup phase.`,name:"num_warmup_steps"},{anchor:"transformers.get_constant_schedule_with_warmup.last_epoch",description:`<strong>last_epoch</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The index of the last epoch when resuming training.`,name:"last_epoch"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization.py#L50",returnDescription:` <p><code>torch.optim.lr_scheduler.LambdaLR</code> with the appropriate schedule.</p> `}}),et=new L({props:{name:"transformers.get_cosine_schedule_with_warmup",anchor:"transformers.get_cosine_schedule_with_warmup",parameters:[{name:"optimizer",val:": Optimizer"},{name:"num_warmup_steps",val:": int"},{name:"num_training_steps",val:": int"},{name:"num_cycles",val:": float = 0.5"},{name:"last_epoch",val:": int = -1"}],parametersDescription:[{anchor:"transformers.get_cosine_schedule_with_warmup.optimizer",description:`<strong>optimizer</strong> (<code>~torch.optim.Optimizer</code>) &#x2014; The optimizer for which to schedule the learning rate.`,name:"optimizer"},{anchor:"transformers.get_cosine_schedule_with_warmup.num_warmup_steps",description:`<strong>num_warmup_steps</strong> (<code>int</code>) &#x2014; The number of steps for the warmup phase.`,name:"num_warmup_steps"},{anchor:"transformers.get_cosine_schedule_with_warmup.num_training_steps",description:`<strong>num_training_steps</strong> (<code>int</code>) &#x2014; The total number of training steps.`,name:"num_training_steps"},{anchor:"transformers.get_cosine_schedule_with_warmup.num_cycles",description:`<strong>num_cycles</strong> (<code>float</code>, <em>optional</em>, defaults to 0.5) &#x2014; The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0 following a half-cosine).`,name:"num_cycles"},{anchor:"transformers.get_cosine_schedule_with_warmup.last_epoch",description:`<strong>last_epoch</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The index of the last epoch when resuming training.`,name:"last_epoch"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization.py#L104",returnDescription:` <p><code>torch.optim.lr_scheduler.LambdaLR</code> with the appropriate schedule.</p> `}}),rt=new L({props:{name:"transformers.get_cosine_with_hard_restarts_schedule_with_warmup",anchor:"transformers.get_cosine_with_hard_restarts_schedule_with_warmup",parameters:[{name:"optimizer",val:": Optimizer"},{name:"num_warmup_steps",val:": int"},{name:"num_training_steps",val:": int"},{name:"num_cycles",val:": int = 1"},{name:"last_epoch",val:": int = -1"}],parametersDescription:[{anchor:"transformers.get_cosine_with_hard_restarts_schedule_with_warmup.optimizer",description:`<strong>optimizer</strong> (<code>~torch.optim.Optimizer</code>) &#x2014; The optimizer for which to schedule the learning rate.`,name:"optimizer"},{anchor:"transformers.get_cosine_with_hard_restarts_schedule_with_warmup.num_warmup_steps",description:`<strong>num_warmup_steps</strong> (<code>int</code>) &#x2014; The number of steps for the warmup phase.`,name:"num_warmup_steps"},{anchor:"transformers.get_cosine_with_hard_restarts_schedule_with_warmup.num_training_steps",description:`<strong>num_training_steps</strong> (<code>int</code>) &#x2014; The total number of training steps.`,name:"num_training_steps"},{anchor:"transformers.get_cosine_with_hard_restarts_schedule_with_warmup.num_cycles",description:`<strong>num_cycles</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The number of hard restarts to use.`,name:"num_cycles"},{anchor:"transformers.get_cosine_with_hard_restarts_schedule_with_warmup.last_epoch",description:`<strong>last_epoch</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The index of the last epoch when resuming training.`,name:"last_epoch"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization.py#L138",returnDescription:` <p><code>torch.optim.lr_scheduler.LambdaLR</code> with the appropriate schedule.</p> `}}),nt=new L({props:{name:"transformers.get_linear_schedule_with_warmup",anchor:"transformers.get_linear_schedule_with_warmup",parameters:[{name:"optimizer",val:""},{name:"num_warmup_steps",val:""},{name:"num_training_steps",val:""},{name:"last_epoch",val:" = -1"}],parametersDescription:[{anchor:"transformers.get_linear_schedule_with_warmup.optimizer",description:`<strong>optimizer</strong> (<code>~torch.optim.Optimizer</code>) &#x2014; The optimizer for which to schedule the learning rate.`,name:"optimizer"},{anchor:"transformers.get_linear_schedule_with_warmup.num_warmup_steps",description:`<strong>num_warmup_steps</strong> (<code>int</code>) &#x2014; The number of steps for the warmup phase.`,name:"num_warmup_steps"},{anchor:"transformers.get_linear_schedule_with_warmup.num_training_steps",description:`<strong>num_training_steps</strong> (<code>int</code>) &#x2014; The total number of training steps.`,name:"num_training_steps"},{anchor:"transformers.get_linear_schedule_with_warmup.last_epoch",description:`<strong>last_epoch</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The index of the last epoch when resuming training.`,name:"last_epoch"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization.py#L75",returnDescription:` <p><code>torch.optim.lr_scheduler.LambdaLR</code> with the appropriate schedule.</p> `}}),st=new L({props:{name:"transformers.get_polynomial_decay_schedule_with_warmup",anchor:"transformers.get_polynomial_decay_schedule_with_warmup",parameters:[{name:"optimizer",val:""},{name:"num_warmup_steps",val:""},{name:"num_training_steps",val:""},{name:"lr_end",val:" = 1e-07"},{name:"power",val:" = 1.0"},{name:"last_epoch",val:" = -1"}],parametersDescription:[{anchor:"transformers.get_polynomial_decay_schedule_with_warmup.optimizer",description:`<strong>optimizer</strong> (<code>~torch.optim.Optimizer</code>) &#x2014; The optimizer for which to schedule the learning rate.`,name:"optimizer"},{anchor:"transformers.get_polynomial_decay_schedule_with_warmup.num_warmup_steps",description:`<strong>num_warmup_steps</strong> (<code>int</code>) &#x2014; The number of steps for the warmup phase.`,name:"num_warmup_steps"},{anchor:"transformers.get_polynomial_decay_schedule_with_warmup.num_training_steps",description:`<strong>num_training_steps</strong> (<code>int</code>) &#x2014; The total number of training steps.`,name:"num_training_steps"},{anchor:"transformers.get_polynomial_decay_schedule_with_warmup.lr_end",description:`<strong>lr_end</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-7) &#x2014; The end LR.`,name:"lr_end"},{anchor:"transformers.get_polynomial_decay_schedule_with_warmup.power",description:`<strong>power</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Power factor.`,name:"power"},{anchor:"transformers.get_polynomial_decay_schedule_with_warmup.last_epoch",description:`<strong>last_epoch</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The index of the last epoch when resuming training.`,name:"last_epoch"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization.py#L173",returnDescription:` <p><code>torch.optim.lr_scheduler.LambdaLR</code> with the appropriate schedule.</p> `}}),mt=new ne({}),ct=new L({props:{name:"class transformers.WarmUp",anchor:"transformers.WarmUp",parameters:[{name:"initial_learning_rate",val:": float"},{name:"decay_schedule_fn",val:": typing.Callable"},{name:"warmup_steps",val:": int"},{name:"power",val:": float = 1.0"},{name:"name",val:": str = None"}],parametersDescription:[{anchor:"transformers.WarmUp.initial_learning_rate",description:`<strong>initial_learning_rate</strong> (<code>float</code>) &#x2014; The initial learning rate for the schedule after the warmup (so this will be the learning rate at the end of the warmup).`,name:"initial_learning_rate"},{anchor:"transformers.WarmUp.decay_schedule_fn",description:`<strong>decay_schedule_fn</strong> (<code>Callable</code>) &#x2014; The schedule function to apply after the warmup for the rest of training.`,name:"decay_schedule_fn"},{anchor:"transformers.WarmUp.warmup_steps",description:`<strong>warmup_steps</strong> (<code>int</code>) &#x2014; The number of steps for the warmup part of training.`,name:"warmup_steps"},{anchor:"transformers.WarmUp.power",description:`<strong>power</strong> (<code>float</code>, <em>optional</em>, defaults to 1) &#x2014; The power to use for the polynomial warmup (defaults is a linear warmup).`,name:"power"},{anchor:"transformers.WarmUp.name",description:`<strong>name</strong> (<code>str</code>, <em>optional</em>) &#x2014; Optional name prefix for the returned tensors during the schedule.`,name:"name"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization_tf.py#L24"}}),pt=new ne({}),dt=new ne({}),ht=new L({props:{name:"class transformers.GradientAccumulator",anchor:"transformers.GradientAccumulator",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization_tf.py#L296"}}),ut=new L({props:{name:"reset",anchor:"transformers.GradientAccumulator.reset",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization_tf.py#L358"}}),{c(){f=a("meta"),T=l(),x=a("h1"),u=a("a"),z=a("span"),g(p.$$.fragment),$=l(),zt=a("span"),ja=s("Optimization"),kr=l(),oe=a("p"),qa=s("The "),Et=a("code"),Ua=s(".optimization"),Ga=s(" module provides:"),Sr=l(),F=a("ul"),Tt=a("li"),Va=s("an optimizer with weight decay fixed that can be used to fine-tuned models, and"),Ma=l(),ze=a("li"),Ha=s("several schedules in the form of schedule objects that inherit from "),Dt=a("code"),Ba=s("_LRSchedule"),Ja=s(":"),Ka=l(),Lt=a("li"),Qa=s("a gradient accumulation class to accumulate the gradients of multiple batches"),Nr=l(),R=a("h2"),se=a("a"),Pt=a("span"),g(Ee.$$.fragment),Xa=l(),Wt=a("span"),Ya=s("AdamW (PyTorch)"),Ir=l(),k=a("div"),g(Te.$$.fragment),Za=l(),De=a("p"),en=s("Implements Adam algorithm with weight decay fix as introduced in "),Le=a("a"),tn=s(`Decoupled Weight Decay Regularization`),rn=s("."),an=l(),ie=a("div"),g(Pe.$$.fragment),nn=l(),kt=a("p"),on=s("Performs a single optimization step."),Cr=l(),j=a("h2"),le=a("a"),St=a("span"),g(We.$$.fragment),sn=l(),Nt=a("span"),ln=s("AdaFactor (PyTorch)"),Fr=l(),A=a("div"),g(ke.$$.fragment),mn=l(),_t=a("p"),cn=s(`AdaFactor pytorch implementation can be used as a drop in replacement for Adam original fairseq code: `),Se=a("a"),pn=s("https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py"),dn=l(),D=a("p"),hn=s("Paper: "),It=a("em"),un=s("Adafactor: Adaptive Learning Rates with Sublinear Memory Cost"),fn=l(),Ne=a("a"),gn=s("https://arxiv.org/abs/1804.04235"),_n=s(` Note that this optimizer internally adjusts the learning rate depending on the `),Ct=a("code"),wn=s("scale_parameter"),vn=s(", "),Ft=a("code"),bn=s("relative_step"),yn=s(` and `),Ot=a("code"),$n=s("warmup_init"),xn=s(" options. To use a manual (external) learning rate schedule you should set "),Rt=a("code"),An=s("scale_parameter=False"),zn=s(` and `),jt=a("code"),En=s("relative_step=False"),Tn=s("."),Dn=l(),qt=a("p"),Ln=s("This implementation handles low-precision (FP16, bfloat) values, but we have not thoroughly tested."),Pn=l(),Ie=a("p"),Wn=s("Recommended T5 finetuning settings ("),Ce=a("a"),kn=s("https://discuss.huggingface.co/t/t5-finetuning-tips/684/3"),Sn=s("):"),Nn=l(),S=a("ul"),Fe=a("li"),Ut=a("p"),In=s("Training without LR warmup or clip_threshold is not recommended."),Cn=l(),Oe=a("ul"),Gt=a("li"),Fn=s("use scheduled LR warm-up to fixed LR"),On=l(),Re=a("li"),Rn=s("use clip_threshold=1.0 ("),je=a("a"),jn=s("https://arxiv.org/abs/1804.04235"),qn=s(")"),Un=l(),Vt=a("li"),Mt=a("p"),Gn=s("Disable relative updates"),Vn=l(),Ht=a("li"),Bt=a("p"),Mn=s("Use scale_parameter=False"),Hn=l(),Jt=a("li"),Kt=a("p"),Bn=s("Additional optimizer operations like gradient clipping should not be used alongside Adafactor"),Jn=l(),g(me.$$.fragment),Kn=l(),g(ce.$$.fragment),Qn=l(),O=a("p"),Xn=s("When using "),Qt=a("code"),Yn=s("lr=None"),Zn=s(" with "),wt=a("a"),eo=s("Trainer"),to=s(" you will most likely need to use "),Xt=a("code"),ro=s("AdafactorSchedule"),ao=l(),g(pe.$$.fragment),no=l(),g(de.$$.fragment),oo=l(),he=a("div"),g(qe.$$.fragment),so=l(),Yt=a("p"),io=s("Performs a single optimization step"),Or=l(),q=a("h2"),ue=a("a"),Zt=a("span"),g(Ue.$$.fragment),lo=l(),er=a("span"),mo=s("AdamWeightDecay (TensorFlow)"),Rr=l(),W=a("div"),g(Ge.$$.fragment),co=l(),U=a("p"),po=s(`Adam enables L2 weight decay and clip_by_global_norm on gradients. Just adding the square of the weights to the loss function is `),tr=a("em"),ho=s("not"),uo=s(` the correct way of using L2 regularization/weight decay with Adam, since that will interact with the m and v parameters in strange ways as shown in `),Ve=a("a"),fo=s(`Decoupled Weight Decay Regularization`),go=s("."),_o=l(),rr=a("p"),wo=s(`Instead we want ot decay the weights in a manner that doesn\u2019t interact with the m/v parameters. This is equivalent to adding the square of the weights to the loss with plain (non-momentum) SGD.`),vo=l(),fe=a("div"),g(Me.$$.fragment),bo=l(),ar=a("p"),yo=s("Creates an optimizer from its config with WarmUp custom object."),jr=l(),G=a("div"),g(He.$$.fragment),$o=l(),nr=a("p"),xo=s("Creates an optimizer with a learning rate schedule using a warmup phase followed by a linear decay."),qr=l(),V=a("h2"),ge=a("a"),or=a("span"),g(Be.$$.fragment),Ao=l(),sr=a("span"),zo=s("Schedules"),Ur=l(),M=a("h3"),_e=a("a"),ir=a("span"),g(Je.$$.fragment),Eo=l(),lr=a("span"),To=s("Learning Rate Schedules (Pytorch)"),Gr=l(),H=a("div"),g(Ke.$$.fragment),Do=l(),mr=a("p"),Lo=s("An enumeration."),Vr=l(),B=a("div"),g(Qe.$$.fragment),Po=l(),cr=a("p"),Wo=s("Unified API to get any scheduler from its name."),Mr=l(),J=a("div"),g(Xe.$$.fragment),ko=l(),pr=a("p"),So=s("Create a schedule with a constant learning rate, using the learning rate set in optimizer."),Hr=l(),K=a("div"),g(Ye.$$.fragment),No=l(),dr=a("p"),Io=s(`Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate increases linearly between 0 and the initial lr set in the optimizer.`),Br=l(),Ze=a("img"),Jr=l(),Q=a("div"),g(et.$$.fragment),Co=l(),hr=a("p"),Fo=s(`Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer.`),Kr=l(),tt=a("img"),Qr=l(),X=a("div"),g(rt.$$.fragment),Oo=l(),ur=a("p"),Ro=s(`Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer.`),Xr=l(),at=a("img"),Yr=l(),Y=a("div"),g(nt.$$.fragment),jo=l(),fr=a("p"),qo=s(`Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.`),Zr=l(),ot=a("img"),ea=l(),N=a("div"),g(st.$$.fragment),Uo=l(),it=a("p"),Go=s(`Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the optimizer to end lr defined by `),gr=a("em"),Vo=s("lr_end"),Mo=s(`, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.`),Ho=l(),we=a("p"),Bo=s("Note: "),_r=a("em"),Jo=s("power"),Ko=s(` defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT implementation at `),lt=a("a"),Qo=s("https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37"),ta=l(),Z=a("h3"),ve=a("a"),wr=a("span"),g(mt.$$.fragment),Xo=l(),vr=a("span"),Yo=s("Warmup (TensorFlow)"),ra=l(),ee=a("div"),g(ct.$$.fragment),Zo=l(),br=a("p"),es=s("Applies a warmup schedule on a given learning rate decay schedule."),aa=l(),te=a("h2"),be=a("a"),yr=a("span"),g(pt.$$.fragment),ts=l(),$r=a("span"),rs=s("Gradient Strategies"),na=l(),re=a("h3"),ye=a("a"),xr=a("span"),g(dt.$$.fragment),as=l(),Ar=a("span"),ns=s("GradientAccumulator (TensorFlow)"),oa=l(),I=a("div"),g(ht.$$.fragment),os=l(),ae=a("p"),ss=s(`Gradient accumulation utility. When used with a distribution strategy, the accumulator should be called in a replica context. Gradients will be accumulated locally on each replica and without synchronization. Users should then call `),zr=a("code"),is=s(".gradients"),ls=s(", scale the gradients if required, and pass the result to "),Er=a("code"),ms=s("apply_gradients"),cs=s("."),ps=l(),$e=a("div"),g(ut.$$.fragment),ds=l(),Tr=a("p"),hs=s("Resets the accumulated gradients on the current replica."),this.h()},l(e){const d=ji('[data-svelte="svelte-1phssyn"]',document.head);f=n(d,"META",{name:!0,content:!0}),d.forEach(r),T=m(e),x=n(e,"H1",{class:!0});var ft=o(x);u=n(ft,"A",{id:!0,class:!0,href:!0});var Dr=o(u);z=n(Dr,"SPAN",{});var Lr=o(z);_(p.$$.fragment,Lr),Lr.forEach(r),Dr.forEach(r),$=m(ft),zt=n(ft,"SPAN",{});var Pr=o(zt);ja=i(Pr,"Optimization"),Pr.forEach(r),ft.forEach(r),kr=m(e),oe=n(e,"P",{});var ia=o(oe);qa=i(ia,"The "),Et=n(ia,"CODE",{});var vs=o(Et);Ua=i(vs,".optimization"),vs.forEach(r),Ga=i(ia," module provides:"),ia.forEach(r),Sr=m(e),F=n(e,"UL",{});var vt=o(F);Tt=n(vt,"LI",{});var bs=o(Tt);Va=i(bs,"an optimizer with weight decay fixed that can be used to fine-tuned models, and"),bs.forEach(r),Ma=m(vt),ze=n(vt,"LI",{});var la=o(ze);Ha=i(la,"several schedules in the form of schedule objects that inherit from "),Dt=n(la,"CODE",{});var ys=o(Dt);Ba=i(ys,"_LRSchedule"),ys.forEach(r),Ja=i(la,":"),la.forEach(r),Ka=m(vt),Lt=n(vt,"LI",{});var $s=o(Lt);Qa=i($s,"a gradient accumulation class to accumulate the gradients of multiple batches"),$s.forEach(r),vt.forEach(r),Nr=m(e),R=n(e,"H2",{class:!0});var ma=o(R);se=n(ma,"A",{id:!0,class:!0,href:!0});var xs=o(se);Pt=n(xs,"SPAN",{});var As=o(Pt);_(Ee.$$.fragment,As),As.forEach(r),xs.forEach(r),Xa=m(ma),Wt=n(ma,"SPAN",{});var zs=o(Wt);Ya=i(zs,"AdamW (PyTorch)"),zs.forEach(r),ma.forEach(r),Ir=m(e),k=n(e,"DIV",{class:!0});var bt=o(k);_(Te.$$.fragment,bt),Za=m(bt),De=n(bt,"P",{});var ca=o(De);en=i(ca,"Implements Adam algorithm with weight decay fix as introduced in "),Le=n(ca,"A",{href:!0,rel:!0});var Es=o(Le);tn=i(Es,`Decoupled Weight Decay Regularization`),Es.forEach(r),rn=i(ca,"."),ca.forEach(r),an=m(bt),ie=n(bt,"DIV",{class:!0});var pa=o(ie);_(Pe.$$.fragment,pa),nn=m(pa),kt=n(pa,"P",{});var Ts=o(kt);on=i(Ts,"Performs a single optimization step."),Ts.forEach(r),pa.forEach(r),bt.forEach(r),Cr=m(e),j=n(e,"H2",{class:!0});var da=o(j);le=n(da,"A",{id:!0,class:!0,href:!0});var Ds=o(le);St=n(Ds,"SPAN",{});var Ls=o(St);_(We.$$.fragment,Ls),Ls.forEach(r),Ds.forEach(r),sn=m(da),Nt=n(da,"SPAN",{});var Ps=o(Nt);ln=i(Ps,"AdaFactor (PyTorch)"),Ps.forEach(r),da.forEach(r),Fr=m(e),A=n(e,"DIV",{class:!0});var E=o(A);_(ke.$$.fragment,E),mn=m(E),_t=n(E,"P",{});var us=o(_t);cn=i(us,`AdaFactor pytorch implementation can be used as a drop in replacement for Adam original fairseq code: `),Se=n(us,"A",{href:!0,rel:!0});var Ws=o(Se);pn=i(Ws,"https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py"),Ws.forEach(r),us.forEach(r),dn=m(E),D=n(E,"P",{});var P=o(D);hn=i(P,"Paper: "),It=n(P,"EM",{});var ks=o(It);un=i(ks,"Adafactor: Adaptive Learning Rates with Sublinear Memory Cost"),ks.forEach(r),fn=m(P),Ne=n(P,"A",{href:!0,rel:!0});var Ss=o(Ne);gn=i(Ss,"https://arxiv.org/abs/1804.04235"),Ss.forEach(r),_n=i(P,` Note that this optimizer internally adjusts the learning rate depending on the `),Ct=n(P,"CODE",{});var Ns=o(Ct);wn=i(Ns,"scale_parameter"),Ns.forEach(r),vn=i(P,", "),Ft=n(P,"CODE",{});var Is=o(Ft);bn=i(Is,"relative_step"),Is.forEach(r),yn=i(P,` and `),Ot=n(P,"CODE",{});var Cs=o(Ot);$n=i(Cs,"warmup_init"),Cs.forEach(r),xn=i(P," options. To use a manual (external) learning rate schedule you should set "),Rt=n(P,"CODE",{});var Fs=o(Rt);An=i(Fs,"scale_parameter=False"),Fs.forEach(r),zn=i(P,` and `),jt=n(P,"CODE",{});var Os=o(jt);En=i(Os,"relative_step=False"),Os.forEach(r),Tn=i(P,"."),P.forEach(r),Dn=m(E),qt=n(E,"P",{});var Rs=o(qt);Ln=i(Rs,"This implementation handles low-precision (FP16, bfloat) values, but we have not thoroughly tested."),Rs.forEach(r),Pn=m(E),Ie=n(E,"P",{});var ha=o(Ie);Wn=i(ha,"Recommended T5 finetuning settings ("),Ce=n(ha,"A",{href:!0,rel:!0});var js=o(Ce);kn=i(js,"https://discuss.huggingface.co/t/t5-finetuning-tips/684/3"),js.forEach(r),Sn=i(ha,"):"),ha.forEach(r),Nn=m(E),S=n(E,"UL",{});var xe=o(S);Fe=n(xe,"LI",{});var ua=o(Fe);Ut=n(ua,"P",{});var qs=o(Ut);In=i(qs,"Training without LR warmup or clip_threshold is not recommended."),qs.forEach(r),Cn=m(ua),Oe=n(ua,"UL",{});var fa=o(Oe);Gt=n(fa,"LI",{});var Us=o(Gt);Fn=i(Us,"use scheduled LR warm-up to fixed LR"),Us.forEach(r),On=m(fa),Re=n(fa,"LI",{});var ga=o(Re);Rn=i(ga,"use clip_threshold=1.0 ("),je=n(ga,"A",{href:!0,rel:!0});var Gs=o(je);jn=i(Gs,"https://arxiv.org/abs/1804.04235"),Gs.forEach(r),qn=i(ga,")"),ga.forEach(r),fa.forEach(r),ua.forEach(r),Un=m(xe),Vt=n(xe,"LI",{});var Vs=o(Vt);Mt=n(Vs,"P",{});var Ms=o(Mt);Gn=i(Ms,"Disable relative updates"),Ms.forEach(r),Vs.forEach(r),Vn=m(xe),Ht=n(xe,"LI",{});var Hs=o(Ht);Bt=n(Hs,"P",{});var Bs=o(Bt);Mn=i(Bs,"Use scale_parameter=False"),Bs.forEach(r),Hs.forEach(r),Hn=m(xe),Jt=n(xe,"LI",{});var Js=o(Jt);Kt=n(Js,"P",{});var Ks=o(Kt);Bn=i(Ks,"Additional optimizer operations like gradient clipping should not be used alongside Adafactor"),Ks.forEach(r),Js.forEach(r),xe.forEach(r),Jn=m(E),_(me.$$.fragment,E),Kn=m(E),_(ce.$$.fragment,E),Qn=m(E),O=n(E,"P",{});var gt=o(O);Xn=i(gt,"When using "),Qt=n(gt,"CODE",{});var Qs=o(Qt);Yn=i(Qs,"lr=None"),Qs.forEach(r),Zn=i(gt," with "),wt=n(gt,"A",{href:!0});var Xs=o(wt);eo=i(Xs,"Trainer"),Xs.forEach(r),to=i(gt," you will most likely need to use "),Xt=n(gt,"CODE",{});var Ys=o(Xt);ro=i(Ys,"AdafactorSchedule"),Ys.forEach(r),gt.forEach(r),ao=m(E),_(pe.$$.fragment,E),no=m(E),_(de.$$.fragment,E),oo=m(E),he=n(E,"DIV",{class:!0});var _a=o(he);_(qe.$$.fragment,_a),so=m(_a),Yt=n(_a,"P",{});var Zs=o(Yt);io=i(Zs,"Performs a single optimization step"),Zs.forEach(r),_a.forEach(r),E.forEach(r),Or=m(e),q=n(e,"H2",{class:!0});var wa=o(q);ue=n(wa,"A",{id:!0,class:!0,href:!0});var ei=o(ue);Zt=n(ei,"SPAN",{});var ti=o(Zt);_(Ue.$$.fragment,ti),ti.forEach(r),ei.forEach(r),lo=m(wa),er=n(wa,"SPAN",{});var ri=o(er);mo=i(ri,"AdamWeightDecay (TensorFlow)"),ri.forEach(r),wa.forEach(r),Rr=m(e),W=n(e,"DIV",{class:!0});var Ae=o(W);_(Ge.$$.fragment,Ae),co=m(Ae),U=n(Ae,"P",{});var yt=o(U);po=i(yt,`Adam enables L2 weight decay and clip_by_global_norm on gradients. Just adding the square of the weights to the loss function is `),tr=n(yt,"EM",{});var ai=o(tr);ho=i(ai,"not"),ai.forEach(r),uo=i(yt,` the correct way of using L2 regularization/weight decay with Adam, since that will interact with the m and v parameters in strange ways as shown in `),Ve=n(yt,"A",{href:!0,rel:!0});var ni=o(Ve);fo=i(ni,`Decoupled Weight Decay Regularization`),ni.forEach(r),go=i(yt,"."),yt.forEach(r),_o=m(Ae),rr=n(Ae,"P",{});var oi=o(rr);wo=i(oi,`Instead we want ot decay the weights in a manner that doesn\u2019t interact with the m/v parameters. This is equivalent to adding the square of the weights to the loss with plain (non-momentum) SGD.`),oi.forEach(r),vo=m(Ae),fe=n(Ae,"DIV",{class:!0});var va=o(fe);_(Me.$$.fragment,va),bo=m(va),ar=n(va,"P",{});var si=o(ar);yo=i(si,"Creates an optimizer from its config with WarmUp custom object."),si.forEach(r),va.forEach(r),Ae.forEach(r),jr=m(e),G=n(e,"DIV",{class:!0});var ba=o(G);_(He.$$.fragment,ba),$o=m(ba),nr=n(ba,"P",{});var ii=o(nr);xo=i(ii,"Creates an optimizer with a learning rate schedule using a warmup phase followed by a linear decay."),ii.forEach(r),ba.forEach(r),qr=m(e),V=n(e,"H2",{class:!0});var ya=o(V);ge=n(ya,"A",{id:!0,class:!0,href:!0});var li=o(ge);or=n(li,"SPAN",{});var mi=o(or);_(Be.$$.fragment,mi),mi.forEach(r),li.forEach(r),Ao=m(ya),sr=n(ya,"SPAN",{});var ci=o(sr);zo=i(ci,"Schedules"),ci.forEach(r),ya.forEach(r),Ur=m(e),M=n(e,"H3",{class:!0});var $a=o(M);_e=n($a,"A",{id:!0,class:!0,href:!0});var pi=o(_e);ir=n(pi,"SPAN",{});var di=o(ir);_(Je.$$.fragment,di),di.forEach(r),pi.forEach(r),Eo=m($a),lr=n($a,"SPAN",{});var hi=o(lr);To=i(hi,"Learning Rate Schedules (Pytorch)"),hi.forEach(r),$a.forEach(r),Gr=m(e),H=n(e,"DIV",{class:!0});var xa=o(H);_(Ke.$$.fragment,xa),Do=m(xa),mr=n(xa,"P",{});var ui=o(mr);Lo=i(ui,"An enumeration."),ui.forEach(r),xa.forEach(r),Vr=m(e),B=n(e,"DIV",{class:!0});var Aa=o(B);_(Qe.$$.fragment,Aa),Po=m(Aa),cr=n(Aa,"P",{});var fi=o(cr);Wo=i(fi,"Unified API to get any scheduler from its name."),fi.forEach(r),Aa.forEach(r),Mr=m(e),J=n(e,"DIV",{class:!0});var za=o(J);_(Xe.$$.fragment,za),ko=m(za),pr=n(za,"P",{});var gi=o(pr);So=i(gi,"Create a schedule with a constant learning rate, using the learning rate set in optimizer."),gi.forEach(r),za.forEach(r),Hr=m(e),K=n(e,"DIV",{class:!0});var Ea=o(K);_(Ye.$$.fragment,Ea),No=m(Ea),dr=n(Ea,"P",{});var _i=o(dr);Io=i(_i,`Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate increases linearly between 0 and the initial lr set in the optimizer.`),_i.forEach(r),Ea.forEach(r),Br=m(e),Ze=n(e,"IMG",{alt:!0,src:!0}),Jr=m(e),Q=n(e,"DIV",{class:!0});var Ta=o(Q);_(et.$$.fragment,Ta),Co=m(Ta),hr=n(Ta,"P",{});var wi=o(hr);Fo=i(wi,`Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer.`),wi.forEach(r),Ta.forEach(r),Kr=m(e),tt=n(e,"IMG",{alt:!0,src:!0}),Qr=m(e),X=n(e,"DIV",{class:!0});var Da=o(X);_(rt.$$.fragment,Da),Oo=m(Da),ur=n(Da,"P",{});var vi=o(ur);Ro=i(vi,`Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer.`),vi.forEach(r),Da.forEach(r),Xr=m(e),at=n(e,"IMG",{alt:!0,src:!0}),Yr=m(e),Y=n(e,"DIV",{class:!0});var La=o(Y);_(nt.$$.fragment,La),jo=m(La),fr=n(La,"P",{});var bi=o(fr);qo=i(bi,`Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.`),bi.forEach(r),La.forEach(r),Zr=m(e),ot=n(e,"IMG",{alt:!0,src:!0}),ea=m(e),N=n(e,"DIV",{class:!0});var $t=o(N);_(st.$$.fragment,$t),Uo=m($t),it=n($t,"P",{});var Pa=o(it);Go=i(Pa,`Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the optimizer to end lr defined by `),gr=n(Pa,"EM",{});var yi=o(gr);Vo=i(yi,"lr_end"),yi.forEach(r),Mo=i(Pa,`, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.`),Pa.forEach(r),Ho=m($t),we=n($t,"P",{});var Wr=o(we);Bo=i(Wr,"Note: "),_r=n(Wr,"EM",{});var $i=o(_r);Jo=i($i,"power"),$i.forEach(r),Ko=i(Wr,` defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT implementation at `),lt=n(Wr,"A",{href:!0,rel:!0});var xi=o(lt);Qo=i(xi,"https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37"),xi.forEach(r),Wr.forEach(r),$t.forEach(r),ta=m(e),Z=n(e,"H3",{class:!0});var Wa=o(Z);ve=n(Wa,"A",{id:!0,class:!0,href:!0});var Ai=o(ve);wr=n(Ai,"SPAN",{});var zi=o(wr);_(mt.$$.fragment,zi),zi.forEach(r),Ai.forEach(r),Xo=m(Wa),vr=n(Wa,"SPAN",{});var Ei=o(vr);Yo=i(Ei,"Warmup (TensorFlow)"),Ei.forEach(r),Wa.forEach(r),ra=m(e),ee=n(e,"DIV",{class:!0});var ka=o(ee);_(ct.$$.fragment,ka),Zo=m(ka),br=n(ka,"P",{});var Ti=o(br);es=i(Ti,"Applies a warmup schedule on a given learning rate decay schedule."),Ti.forEach(r),ka.forEach(r),aa=m(e),te=n(e,"H2",{class:!0});var Sa=o(te);be=n(Sa,"A",{id:!0,class:!0,href:!0});var Di=o(be);yr=n(Di,"SPAN",{});var Li=o(yr);_(pt.$$.fragment,Li),Li.forEach(r),Di.forEach(r),ts=m(Sa),$r=n(Sa,"SPAN",{});var Pi=o($r);rs=i(Pi,"Gradient Strategies"),Pi.forEach(r),Sa.forEach(r),na=m(e),re=n(e,"H3",{class:!0});var Na=o(re);ye=n(Na,"A",{id:!0,class:!0,href:!0});var Wi=o(ye);xr=n(Wi,"SPAN",{});var ki=o(xr);_(dt.$$.fragment,ki),ki.forEach(r),Wi.forEach(r),as=m(Na),Ar=n(Na,"SPAN",{});var Si=o(Ar);ns=i(Si,"GradientAccumulator (TensorFlow)"),Si.forEach(r),Na.forEach(r),oa=m(e),I=n(e,"DIV",{class:!0});var xt=o(I);_(ht.$$.fragment,xt),os=m(xt),ae=n(xt,"P",{});var At=o(ae);ss=i(At,`Gradient accumulation utility. When used with a distribution strategy, the accumulator should be called in a replica context. Gradients will be accumulated locally on each replica and without synchronization. Users should then call `),zr=n(At,"CODE",{});var Ni=o(zr);is=i(Ni,".gradients"),Ni.forEach(r),ls=i(At,", scale the gradients if required, and pass the result to "),Er=n(At,"CODE",{});var Ii=o(Er);ms=i(Ii,"apply_gradients"),Ii.forEach(r),cs=i(At,"."),At.forEach(r),ps=m(xt),$e=n(xt,"DIV",{class:!0});var Ia=o($e);_(ut.$$.fragment,Ia),ds=m(Ia),Tr=n(Ia,"P",{});var Ci=o(Tr);hs=i(Ci,"Resets the accumulated gradients on the current replica."),Ci.forEach(r),Ia.forEach(r),xt.forEach(r),this.h()},h(){c(f,"name","hf:doc:metadata"),c(f,"content",JSON.stringify(Bi)),c(u,"id","optimization"),c(u,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(u,"href","#optimization"),c(x,"class","relative group"),c(se,"id","transformers.AdamW"),c(se,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(se,"href","#transformers.AdamW"),c(R,"class","relative group"),c(Le,"href","https://arxiv.org/abs/1711.05101"),c(Le,"rel","nofollow"),c(ie,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(k,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(le,"id","transformers.Adafactor"),c(le,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(le,"href","#transformers.Adafactor"),c(j,"class","relative group"),c(Se,"href","https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py"),c(Se,"rel","nofollow"),c(Ne,"href","https://arxiv.org/abs/1804.04235"),c(Ne,"rel","nofollow"),c(Ce,"href","https://discuss.huggingface.co/t/t5-finetuning-tips/684/3"),c(Ce,"rel","nofollow"),c(je,"href","https://arxiv.org/abs/1804.04235"),c(je,"rel","nofollow"),c(wt,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),c(he,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(A,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(ue,"id","transformers.AdamWeightDecay"),c(ue,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ue,"href","#transformers.AdamWeightDecay"),c(q,"class","relative group"),c(Ve,"href","https://arxiv.org/abs/1711.05101"),c(Ve,"rel","nofollow"),c(fe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(W,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(G,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(ge,"id","schedules"),c(ge,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ge,"href","#schedules"),c(V,"class","relative group"),c(_e,"id","transformers.SchedulerType"),c(_e,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(_e,"href","#transformers.SchedulerType"),c(M,"class","relative group"),c(H,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(B,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(J,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(K,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Ze,"alt",""),Ca(Ze.src,fs="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_constant_schedule.png")||c(Ze,"src",fs),c(Q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(tt,"alt",""),Ca(tt.src,gs="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_cosine_schedule.png")||c(tt,"src",gs),c(X,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(at,"alt",""),Ca(at.src,_s="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_cosine_hard_restarts_schedule.png")||c(at,"src",_s),c(Y,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(ot,"alt",""),Ca(ot.src,ws="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_linear_schedule.png")||c(ot,"src",ws),c(lt,"href","https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37"),c(lt,"rel","nofollow"),c(N,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(ve,"id","transformers.WarmUp"),c(ve,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ve,"href","#transformers.WarmUp"),c(Z,"class","relative group"),c(ee,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(be,"id","gradient-strategies"),c(be,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(be,"href","#gradient-strategies"),c(te,"class","relative group"),c(ye,"id","transformers.GradientAccumulator"),c(ye,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ye,"href","#transformers.GradientAccumulator"),c(re,"class","relative group"),c($e,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(I,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,d){t(document.head,f),h(e,T,d),h(e,x,d),t(x,u),t(u,z),w(p,z,null),t(x,$),t(x,zt),t(zt,ja),h(e,kr,d),h(e,oe,d),t(oe,qa),t(oe,Et),t(Et,Ua),t(oe,Ga),h(e,Sr,d),h(e,F,d),t(F,Tt),t(Tt,Va),t(F,Ma),t(F,ze),t(ze,Ha),t(ze,Dt),t(Dt,Ba),t(ze,Ja),t(F,Ka),t(F,Lt),t(Lt,Qa),h(e,Nr,d),h(e,R,d),t(R,se),t(se,Pt),w(Ee,Pt,null),t(R,Xa),t(R,Wt),t(Wt,Ya),h(e,Ir,d),h(e,k,d),w(Te,k,null),t(k,Za),t(k,De),t(De,en),t(De,Le),t(Le,tn),t(De,rn),t(k,an),t(k,ie),w(Pe,ie,null),t(ie,nn),t(ie,kt),t(kt,on),h(e,Cr,d),h(e,j,d),t(j,le),t(le,St),w(We,St,null),t(j,sn),t(j,Nt),t(Nt,ln),h(e,Fr,d),h(e,A,d),w(ke,A,null),t(A,mn),t(A,_t),t(_t,cn),t(_t,Se),t(Se,pn),t(A,dn),t(A,D),t(D,hn),t(D,It),t(It,un),t(D,fn),t(D,Ne),t(Ne,gn),t(D,_n),t(D,Ct),t(Ct,wn),t(D,vn),t(D,Ft),t(Ft,bn),t(D,yn),t(D,Ot),t(Ot,$n),t(D,xn),t(D,Rt),t(Rt,An),t(D,zn),t(D,jt),t(jt,En),t(D,Tn),t(A,Dn),t(A,qt),t(qt,Ln),t(A,Pn),t(A,Ie),t(Ie,Wn),t(Ie,Ce),t(Ce,kn),t(Ie,Sn),t(A,Nn),t(A,S),t(S,Fe),t(Fe,Ut),t(Ut,In),t(Fe,Cn),t(Fe,Oe),t(Oe,Gt),t(Gt,Fn),t(Oe,On),t(Oe,Re),t(Re,Rn),t(Re,je),t(je,jn),t(Re,qn),t(S,Un),t(S,Vt),t(Vt,Mt),t(Mt,Gn),t(S,Vn),t(S,Ht),t(Ht,Bt),t(Bt,Mn),t(S,Hn),t(S,Jt),t(Jt,Kt),t(Kt,Bn),t(A,Jn),w(me,A,null),t(A,Kn),w(ce,A,null),t(A,Qn),t(A,O),t(O,Xn),t(O,Qt),t(Qt,Yn),t(O,Zn),t(O,wt),t(wt,eo),t(O,to),t(O,Xt),t(Xt,ro),t(A,ao),w(pe,A,null),t(A,no),w(de,A,null),t(A,oo),t(A,he),w(qe,he,null),t(he,so),t(he,Yt),t(Yt,io),h(e,Or,d),h(e,q,d),t(q,ue),t(ue,Zt),w(Ue,Zt,null),t(q,lo),t(q,er),t(er,mo),h(e,Rr,d),h(e,W,d),w(Ge,W,null),t(W,co),t(W,U),t(U,po),t(U,tr),t(tr,ho),t(U,uo),t(U,Ve),t(Ve,fo),t(U,go),t(W,_o),t(W,rr),t(rr,wo),t(W,vo),t(W,fe),w(Me,fe,null),t(fe,bo),t(fe,ar),t(ar,yo),h(e,jr,d),h(e,G,d),w(He,G,null),t(G,$o),t(G,nr),t(nr,xo),h(e,qr,d),h(e,V,d),t(V,ge),t(ge,or),w(Be,or,null),t(V,Ao),t(V,sr),t(sr,zo),h(e,Ur,d),h(e,M,d),t(M,_e),t(_e,ir),w(Je,ir,null),t(M,Eo),t(M,lr),t(lr,To),h(e,Gr,d),h(e,H,d),w(Ke,H,null),t(H,Do),t(H,mr),t(mr,Lo),h(e,Vr,d),h(e,B,d),w(Qe,B,null),t(B,Po),t(B,cr),t(cr,Wo),h(e,Mr,d),h(e,J,d),w(Xe,J,null),t(J,ko),t(J,pr),t(pr,So),h(e,Hr,d),h(e,K,d),w(Ye,K,null),t(K,No),t(K,dr),t(dr,Io),h(e,Br,d),h(e,Ze,d),h(e,Jr,d),h(e,Q,d),w(et,Q,null),t(Q,Co),t(Q,hr),t(hr,Fo),h(e,Kr,d),h(e,tt,d),h(e,Qr,d),h(e,X,d),w(rt,X,null),t(X,Oo),t(X,ur),t(ur,Ro),h(e,Xr,d),h(e,at,d),h(e,Yr,d),h(e,Y,d),w(nt,Y,null),t(Y,jo),t(Y,fr),t(fr,qo),h(e,Zr,d),h(e,ot,d),h(e,ea,d),h(e,N,d),w(st,N,null),t(N,Uo),t(N,it),t(it,Go),t(it,gr),t(gr,Vo),t(it,Mo),t(N,Ho),t(N,we),t(we,Bo),t(we,_r),t(_r,Jo),t(we,Ko),t(we,lt),t(lt,Qo),h(e,ta,d),h(e,Z,d),t(Z,ve),t(ve,wr),w(mt,wr,null),t(Z,Xo),t(Z,vr),t(vr,Yo),h(e,ra,d),h(e,ee,d),w(ct,ee,null),t(ee,Zo),t(ee,br),t(br,es),h(e,aa,d),h(e,te,d),t(te,be),t(be,yr),w(pt,yr,null),t(te,ts),t(te,$r),t($r,rs),h(e,na,d),h(e,re,d),t(re,ye),t(ye,xr),w(dt,xr,null),t(re,as),t(re,Ar),t(Ar,ns),h(e,oa,d),h(e,I,d),w(ht,I,null),t(I,os),t(I,ae),t(ae,ss),t(ae,zr),t(zr,is),t(ae,ls),t(ae,Er),t(Er,ms),t(ae,cs),t(I,ps),t(I,$e),w(ut,$e,null),t($e,ds),t($e,Tr),t(Tr,hs),sa=!0},p(e,[d]){const ft={};d&2&&(ft.$$scope={dirty:d,ctx:e}),me.$set(ft);const Dr={};d&2&&(Dr.$$scope={dirty:d,ctx:e}),ce.$set(Dr);const Lr={};d&2&&(Lr.$$scope={dirty:d,ctx:e}),pe.$set(Lr);const Pr={};d&2&&(Pr.$$scope={dirty:d,ctx:e}),de.$set(Pr)},i(e){sa||(v(p.$$.fragment,e),v(Ee.$$.fragment,e),v(Te.$$.fragment,e),v(Pe.$$.fragment,e),v(We.$$.fragment,e),v(ke.$$.fragment,e),v(me.$$.fragment,e),v(ce.$$.fragment,e),v(pe.$$.fragment,e),v(de.$$.fragment,e),v(qe.$$.fragment,e),v(Ue.$$.fragment,e),v(Ge.$$.fragment,e),v(Me.$$.fragment,e),v(He.$$.fragment,e),v(Be.$$.fragment,e),v(Je.$$.fragment,e),v(Ke.$$.fragment,e),v(Qe.$$.fragment,e),v(Xe.$$.fragment,e),v(Ye.$$.fragment,e),v(et.$$.fragment,e),v(rt.$$.fragment,e),v(nt.$$.fragment,e),v(st.$$.fragment,e),v(mt.$$.fragment,e),v(ct.$$.fragment,e),v(pt.$$.fragment,e),v(dt.$$.fragment,e),v(ht.$$.fragment,e),v(ut.$$.fragment,e),sa=!0)},o(e){b(p.$$.fragment,e),b(Ee.$$.fragment,e),b(Te.$$.fragment,e),b(Pe.$$.fragment,e),b(We.$$.fragment,e),b(ke.$$.fragment,e),b(me.$$.fragment,e),b(ce.$$.fragment,e),b(pe.$$.fragment,e),b(de.$$.fragment,e),b(qe.$$.fragment,e),b(Ue.$$.fragment,e),b(Ge.$$.fragment,e),b(Me.$$.fragment,e),b(He.$$.fragment,e),b(Be.$$.fragment,e),b(Je.$$.fragment,e),b(Ke.$$.fragment,e),b(Qe.$$.fragment,e),b(Xe.$$.fragment,e),b(Ye.$$.fragment,e),b(et.$$.fragment,e),b(rt.$$.fragment,e),b(nt.$$.fragment,e),b(st.$$.fragment,e),b(mt.$$.fragment,e),b(ct.$$.fragment,e),b(pt.$$.fragment,e),b(dt.$$.fragment,e),b(ht.$$.fragment,e),b(ut.$$.fragment,e),sa=!1},d(e){r(f),e&&r(T),e&&r(x),y(p),e&&r(kr),e&&r(oe),e&&r(Sr),e&&r(F),e&&r(Nr),e&&r(R),y(Ee),e&&r(Ir),e&&r(k),y(Te),y(Pe),e&&r(Cr),e&&r(j),y(We),e&&r(Fr),e&&r(A),y(ke),y(me),y(ce),y(pe),y(de),y(qe),e&&r(Or),e&&r(q),y(Ue),e&&r(Rr),e&&r(W),y(Ge),y(Me),e&&r(jr),e&&r(G),y(He),e&&r(qr),e&&r(V),y(Be),e&&r(Ur),e&&r(M),y(Je),e&&r(Gr),e&&r(H),y(Ke),e&&r(Vr),e&&r(B),y(Qe),e&&r(Mr),e&&r(J),y(Xe),e&&r(Hr),e&&r(K),y(Ye),e&&r(Br),e&&r(Ze),e&&r(Jr),e&&r(Q),y(et),e&&r(Kr),e&&r(tt),e&&r(Qr),e&&r(X),y(rt),e&&r(Xr),e&&r(at),e&&r(Yr),e&&r(Y),y(nt),e&&r(Zr),e&&r(ot),e&&r(ea),e&&r(N),y(st),e&&r(ta),e&&r(Z),y(mt),e&&r(ra),e&&r(ee),y(ct),e&&r(aa),e&&r(te),y(pt),e&&r(na),e&&r(re),y(dt),e&&r(oa),e&&r(I),y(ht),y(ut)}}}const Bi={local:"optimization",sections:[{local:"transformers.AdamW",title:"AdamW (PyTorch)"},{local:"transformers.Adafactor",title:"AdaFactor (PyTorch)"},{local:"transformers.AdamWeightDecay",title:"AdamWeightDecay (TensorFlow)"},{local:"schedules",sections:[{local:"transformers.SchedulerType",title:"Learning Rate Schedules (Pytorch)"},{local:"transformers.WarmUp",title:"Warmup (TensorFlow)"}],title:"Schedules"},{local:"gradient-strategies",sections:[{local:"transformers.GradientAccumulator",title:"GradientAccumulator (TensorFlow)"}],title:"Gradient Strategies"}],title:"Optimization"};function Ji(C){return qi(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class el extends Fi{constructor(f){super();Oi(this,f,Ji,Hi,Ri,{})}}export{el as default,Bi as metadata};
18
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/main_classes/keras_callbacks.mdx-hf-doc-builder.js
import{S as it,i as dt,s as pt,e as s,k as f,w as $,t as p,M as mt,c as n,d as a,m as b,a as l,x as w,h as m,b as h,G as t,g as _,y,q as x,o as C,B as T,v as ut,L as We}from"../../chunks/vendor-hf-doc-builder.js";import{D as ct}from"../../chunks/Docstring-hf-doc-builder.js";import{C as Ge}from"../../chunks/CodeBlock-hf-doc-builder.js";import{I as Be}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{E as Fe}from"../../chunks/ExampleCodeBlock-hf-doc-builder.js";function ht(O){let r,u;return r=new Ge({props:{code:`from datasets import load_metric rouge_metric = load_metric("rouge") def rouge_fn(predictions, labels): decoded_predictions = tokenizer.batch_decode(predictions, skip_special_tokens=True) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) result = rouge_metric.compute(predictions=decoded_predictions, references=decoded_labels) return {key: value.mid.fmeasure * 100 for key, value in result.items()}`,highlighted:`<span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_metric rouge_metric = load_metric(<span class="hljs-string">&quot;rouge&quot;</span>) <span class="hljs-keyword">def</span> <span class="hljs-title function_">rouge_fn</span>(<span class="hljs-params">predictions, labels</span>): decoded_predictions = tokenizer.batch_decode(predictions, skip_special_tokens=<span class="hljs-literal">True</span>) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=<span class="hljs-literal">True</span>) result = rouge_metric.compute(predictions=decoded_predictions, references=decoded_labels) <span class="hljs-keyword">return</span> {key: value.mid.fmeasure * <span class="hljs-number">100</span> <span class="hljs-keyword">for</span> key, value <span class="hljs-keyword">in</span> result.items()}`}}),{c(){$(r.$$.fragment)},l(o){w(r.$$.fragment,o)},m(o,d){y(r,o,d),u=!0},p:We,i(o){u||(x(r.$$.fragment,o),u=!0)},o(o){C(r.$$.fragment,o),u=!1},d(o){T(r,o)}}}function ft(O){let r,u,o,d,P;return d=new Ge({props:{code:"{'rouge1': 37.4199, 'rouge2': 13.9768, 'rougeL': 34.361, 'rougeLsum': 35.0781",highlighted:'{&#x27;rouge1&#x27;: <span class="hljs-number">37.4199</span>, &#x27;rouge2&#x27;: <span class="hljs-number">13.9768</span>, &#x27;rougeL&#x27;: <span class="hljs-number">34.361</span>, &#x27;rougeLsum&#x27;: <span class="hljs-number">35.0781</span>'}}),{c(){r=s("p"),u=p("The above function will return a dict containing values which will be logged like any other Keras metric:"),o=f(),$(d.$$.fragment)},l(c){r=n(c,"P",{});var E=l(r);u=m(E,"The above function will return a dict containing values which will be logged like any other Keras metric:"),E.forEach(a),o=b(c),w(d.$$.fragment,c)},m(c,E){_(c,r,E),t(r,u),_(c,o,E),y(d,c,E),P=!0},p:We,i(c){P||(x(d.$$.fragment,c),P=!0)},o(c){C(d.$$.fragment,c),P=!1},d(c){c&&a(r),c&&a(o),T(d,c)}}}function bt(O){let r,u;return r=new Ge({props:{code:`from transformers.keras_callbacks import PushToHubCallback push_to_hub_callback = PushToHubCallback( output_dir="./model_save", tokenizer=tokenizer, hub_model_id="gpt5-7xlarge", ) model.fit(train_dataset, callbacks=[push_to_hub_callback])`,highlighted:`<span class="hljs-keyword">from</span> transformers.keras_callbacks <span class="hljs-keyword">import</span> PushToHubCallback push_to_hub_callback = PushToHubCallback( output_dir=<span class="hljs-string">&quot;./model_save&quot;</span>, tokenizer=tokenizer, hub_model_id=<span class="hljs-string">&quot;gpt5-7xlarge&quot;</span>, ) model.fit(train_dataset, callbacks=[push_to_hub_callback])`}}),{c(){$(r.$$.fragment)},l(o){w(r.$$.fragment,o)},m(o,d){y(r,o,d),u=!0},p:We,i(o){u||(x(r.$$.fragment,o),u=!0)},o(o){C(r.$$.fragment,o),u=!1},d(o){T(r,o)}}}function gt(O){let r,u,o,d,P,c,E,V,be,ce,W,ge,ie,H,j,X,A,_e,J,ke,de,g,I,ve,k,$e,Q,we,ye,Y,xe,Ce,Z,Te,Pe,ee,Ee,Ke,ze,te,He,qe,L,Me,D,pe,q,N,ae,U,Oe,oe,je,me,v,B,Le,M,De,re,Ne,Se,se,Ae,Ie,Ue,S,ue;return c=new Be({}),A=new Be({}),I=new ct({props:{name:"class transformers.KerasMetricCallback",anchor:"transformers.KerasMetricCallback",parameters:[{name:"metric_fn",val:": typing.Callable"},{name:"eval_dataset",val:": typing.Union[tensorflow.python.data.ops.dataset_ops.DatasetV2, numpy.ndarray, tensorflow.python.framework.ops.Tensor, tuple, dict]"},{name:"output_cols",val:": typing.Optional[typing.List[str]] = None"},{name:"label_cols",val:": typing.Optional[typing.List[str]] = None"},{name:"batch_size",val:": typing.Optional[int] = None"},{name:"predict_with_generate",val:": bool = False"},{name:"use_xla_generation",val:": bool = False"},{name:"generate_kwargs",val:": typing.Optional[dict] = None"}],parametersDescription:[{anchor:"transformers.KerasMetricCallback.metric_fn",description:`<strong>metric_fn</strong> (<code>Callable</code>) &#x2014; Metric function provided by the user. It will be called with two arguments - <code>predictions</code> and <code>labels</code>. These contain the model&#x2019;s outputs and matching labels from the dataset. It should return a dict mapping metric names to numerical values.`,name:"metric_fn"},{anchor:"transformers.KerasMetricCallback.eval_dataset",description:`<strong>eval_dataset</strong> (<code>tf.data.Dataset</code> or <code>dict</code> or <code>tuple</code> or <code>np.ndarray</code> or <code>tf.Tensor</code>) &#x2014; Validation data to be used to generate predictions for the <code>metric_fn</code>.`,name:"eval_dataset"},{anchor:"transformers.KerasMetricCallback.output_cols",description:"<strong>output_cols</strong> (`List[str], <em>optional</em>) &#x2014;\nA list of columns to be retained from the model output as the predictions. Defaults to all.",name:"output_cols"},{anchor:"transformers.KerasMetricCallback.label_cols",description:`<strong>label_cols</strong> (&#x2019;<code>List[str]</code>, <em>optional</em>&#x2019;) &#x2014; A list of columns to be retained from the input dataset as the labels. Will be autodetected if this is not supplied.`,name:"label_cols"},{anchor:"transformers.KerasMetricCallback.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>) &#x2014; Batch size. Only used when the data is not a pre-batched <code>tf.data.Dataset</code>.`,name:"batch_size"},{anchor:"transformers.KerasMetricCallback.predict_with_generate",description:`<strong>predict_with_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether we should use <code>model.generate()</code> to get outputs for the model.`,name:"predict_with_generate"},{anchor:"transformers.KerasMetricCallback.use_xla_generation",description:`<strong>use_xla_generation</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If we&#x2019;re generating, whether to compile model generation with XLA. This can massively increase the speed of generation (up to 100X speedup) but will require a new XLA compilation for each input shape. When using XLA generation, it&#x2019;s a good idea to pad your inputs to the same size, or to use the <code>pad_to_multiple_of</code> argument in your <code>tokenizer</code> or <code>DataCollator</code>, which will reduce the number of unique input shapes and save a lot of compilation time. This option has no effect is <code>predict_with_generate</code> is <code>False</code>.`,name:"use_xla_generation"},{anchor:"transformers.KerasMetricCallback.generate_kwargs",description:`<strong>generate_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Keyword arguments to pass to <code>model.generate()</code> when generating. Has no effect if <code>predict_with_generate</code> is <code>False</code>.`,name:"generate_kwargs"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/keras_callbacks.py#L22"}}),L=new Fe({props:{anchor:"transformers.KerasMetricCallback.example",$$slots:{default:[ht]},$$scope:{ctx:O}}}),D=new Fe({props:{anchor:"transformers.KerasMetricCallback.example-2",$$slots:{default:[ft]},$$scope:{ctx:O}}}),U=new Be({}),B=new ct({props:{name:"class transformers.PushToHubCallback",anchor:"transformers.PushToHubCallback",parameters:[{name:"output_dir",val:": typing.Union[str, pathlib.Path]"},{name:"save_strategy",val:": typing.Union[str, transformers.trainer_utils.IntervalStrategy] = 'epoch'"},{name:"save_steps",val:": typing.Optional[int] = None"},{name:"tokenizer",val:": typing.Optional[transformers.tokenization_utils_base.PreTrainedTokenizerBase] = None"},{name:"hub_model_id",val:": typing.Optional[str] = None"},{name:"hub_token",val:": typing.Optional[str] = None"},{name:"checkpoint",val:": bool = False"},{name:"**model_card_args",val:""}],parametersDescription:[{anchor:"transformers.PushToHubCallback.output_dir",description:`<strong>output_dir</strong> (<code>str</code>) &#x2014; The output directory where the model predictions and checkpoints will be written and synced with the repository on the Hub.`,name:"output_dir"},{anchor:"transformers.PushToHubCallback.save_strategy",description:`<strong>save_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/trainer_utils#transformers.IntervalStrategy">IntervalStrategy</a>, <em>optional</em>, defaults to <code>&quot;epoch&quot;</code>) &#x2014; The checkpoint save strategy to adopt during training. Possible values are:</p> <ul> <li><code>&quot;no&quot;</code>: Save is done at the end of training.</li> <li><code>&quot;epoch&quot;</code>: Save is done at the end of each epoch.</li> <li><code>&quot;steps&quot;</code>: Save is done every <code>save_steps</code></li> </ul>`,name:"save_strategy"},{anchor:"transformers.PushToHubCallback.save_steps",description:`<strong>save_steps</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of steps between saves when using the &#x201C;steps&#x201D; <code>save_strategy</code>.`,name:"save_steps"},{anchor:"transformers.PushToHubCallback.tokenizer",description:`<strong>tokenizer</strong> (<code>PreTrainedTokenizerBase</code>, <em>optional</em>) &#x2014; The tokenizer used by the model. If supplied, will be uploaded to the repo alongside the weights.`,name:"tokenizer"},{anchor:"transformers.PushToHubCallback.hub_model_id",description:`<strong>hub_model_id</strong> (<code>str</code>, <em>optional</em>) &#x2014; The name of the repository to keep in sync with the local <code>output_dir</code>. It can be a simple model ID in which case the model will be pushed in your namespace. Otherwise it should be the whole repository name, for instance <code>&quot;user_name/model&quot;</code>, which allows you to push to an organization you are a member of with <code>&quot;organization_name/model&quot;</code>.</p> <p>Will default to the name of <code>output_dir</code>.`,name:"hub_model_id"},{anchor:"transformers.PushToHubCallback.hub_token",description:`<strong>hub_token</strong> (<code>str</code>, <em>optional</em>) &#x2014; The token to use to push the model to the Hub. Will default to the token in the cache folder obtained with <code>huggingface-cli login</code>.`,name:"hub_token"},{anchor:"transformers.PushToHubCallback.checkpoint",description:`<strong>checkpoint</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to save full training checkpoints (including epoch and optimizer state) to allow training to be resumed. Only usable when <code>save_strategy</code> is <code>&quot;epoch&quot;</code>.`,name:"checkpoint"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/keras_callbacks.py#L267"}}),S=new Fe({props:{anchor:"transformers.PushToHubCallback.example",$$slots:{default:[bt]},$$scope:{ctx:O}}}),{c(){r=s("meta"),u=f(),o=s("h1"),d=s("a"),P=s("span"),$(c.$$.fragment),E=f(),V=s("span"),be=p("Keras callbacks"),ce=f(),W=s("p"),ge=p(`When training a Transformers model with Keras, there are some library-specific callbacks available to automate common tasks:`),ie=f(),H=s("h2"),j=s("a"),X=s("span"),$(A.$$.fragment),_e=f(),J=s("span"),ke=p("KerasMetricCallback"),de=f(),g=s("div"),$(I.$$.fragment),ve=f(),k=s("p"),$e=p(`Callback to compute metrics at the end of every epoch. Unlike normal Keras metrics, these do not need to be compilable by TF. It is particularly useful for common NLP metrics like BLEU and ROUGE that require string operations or generation loops that cannot be compiled. Predictions (or generations) will be computed on the `),Q=s("code"),we=p("eval_dataset"),ye=p(" before being passed to the "),Y=s("code"),xe=p("metric_fn"),Ce=p(" in "),Z=s("code"),Te=p("np.ndarray"),Pe=p(" format. The "),ee=s("code"),Ee=p("metric_fn"),Ke=p(` should compute metrics and return a dict mapping metric names to metric values.`),ze=f(),te=s("p"),He=p(`We provide an example of a suitable metric_fn that computes ROUGE scores for a summarization model below. Note that this example skips some post-processing for readability and simplicity, and should probably not be used as-is!`),qe=f(),$(L.$$.fragment),Me=f(),$(D.$$.fragment),pe=f(),q=s("h2"),N=s("a"),ae=s("span"),$(U.$$.fragment),Oe=f(),oe=s("span"),je=p("PushToHubCallback"),me=f(),v=s("div"),$(B.$$.fragment),Le=f(),M=s("p"),De=p(`Callback that will save and push the model to the Hub regularly. By default, it pushes once per epoch, but this can be changed with the `),re=s("code"),Ne=p("save_strategy"),Se=p(` argument. Pushed models can be accessed like any other model on the hub, such as with the `),se=s("code"),Ae=p("from_pretrained"),Ie=p(" method."),Ue=f(),$(S.$$.fragment),this.h()},l(e){const i=mt('[data-svelte="svelte-1phssyn"]',document.head);r=n(i,"META",{name:!0,content:!0}),i.forEach(a),u=b(e),o=n(e,"H1",{class:!0});var F=l(o);d=n(F,"A",{id:!0,class:!0,href:!0});var ne=l(d);P=n(ne,"SPAN",{});var le=l(P);w(c.$$.fragment,le),le.forEach(a),ne.forEach(a),E=b(F),V=n(F,"SPAN",{});var Re=l(V);be=m(Re,"Keras callbacks"),Re.forEach(a),F.forEach(a),ce=b(e),W=n(e,"P",{});var Ve=l(W);ge=m(Ve,`When training a Transformers model with Keras, there are some library-specific callbacks available to automate common tasks:`),Ve.forEach(a),ie=b(e),H=n(e,"H2",{class:!0});var he=l(H);j=n(he,"A",{id:!0,class:!0,href:!0});var Xe=l(j);X=n(Xe,"SPAN",{});var Je=l(X);w(A.$$.fragment,Je),Je.forEach(a),Xe.forEach(a),_e=b(he),J=n(he,"SPAN",{});var Qe=l(J);ke=m(Qe,"KerasMetricCallback"),Qe.forEach(a),he.forEach(a),de=b(e),g=n(e,"DIV",{class:!0});var K=l(g);w(I.$$.fragment,K),ve=b(K),k=n(K,"P",{});var z=l(k);$e=m(z,`Callback to compute metrics at the end of every epoch. Unlike normal Keras metrics, these do not need to be compilable by TF. It is particularly useful for common NLP metrics like BLEU and ROUGE that require string operations or generation loops that cannot be compiled. Predictions (or generations) will be computed on the `),Q=n(z,"CODE",{});var Ye=l(Q);we=m(Ye,"eval_dataset"),Ye.forEach(a),ye=m(z," before being passed to the "),Y=n(z,"CODE",{});var Ze=l(Y);xe=m(Ze,"metric_fn"),Ze.forEach(a),Ce=m(z," in "),Z=n(z,"CODE",{});var et=l(Z);Te=m(et,"np.ndarray"),et.forEach(a),Pe=m(z," format. The "),ee=n(z,"CODE",{});var tt=l(ee);Ee=m(tt,"metric_fn"),tt.forEach(a),Ke=m(z,` should compute metrics and return a dict mapping metric names to metric values.`),z.forEach(a),ze=b(K),te=n(K,"P",{});var at=l(te);He=m(at,`We provide an example of a suitable metric_fn that computes ROUGE scores for a summarization model below. Note that this example skips some post-processing for readability and simplicity, and should probably not be used as-is!`),at.forEach(a),qe=b(K),w(L.$$.fragment,K),Me=b(K),w(D.$$.fragment,K),K.forEach(a),pe=b(e),q=n(e,"H2",{class:!0});var fe=l(q);N=n(fe,"A",{id:!0,class:!0,href:!0});var ot=l(N);ae=n(ot,"SPAN",{});var rt=l(ae);w(U.$$.fragment,rt),rt.forEach(a),ot.forEach(a),Oe=b(fe),oe=n(fe,"SPAN",{});var st=l(oe);je=m(st,"PushToHubCallback"),st.forEach(a),fe.forEach(a),me=b(e),v=n(e,"DIV",{class:!0});var G=l(v);w(B.$$.fragment,G),Le=b(G),M=n(G,"P",{});var R=l(M);De=m(R,`Callback that will save and push the model to the Hub regularly. By default, it pushes once per epoch, but this can be changed with the `),re=n(R,"CODE",{});var nt=l(re);Ne=m(nt,"save_strategy"),nt.forEach(a),Se=m(R,` argument. Pushed models can be accessed like any other model on the hub, such as with the `),se=n(R,"CODE",{});var lt=l(se);Ae=m(lt,"from_pretrained"),lt.forEach(a),Ie=m(R," method."),R.forEach(a),Ue=b(G),w(S.$$.fragment,G),G.forEach(a),this.h()},h(){h(r,"name","hf:doc:metadata"),h(r,"content",JSON.stringify(_t)),h(d,"id","keras-callbacks"),h(d,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(d,"href","#keras-callbacks"),h(o,"class","relative group"),h(j,"id","transformers.KerasMetricCallback"),h(j,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(j,"href","#transformers.KerasMetricCallback"),h(H,"class","relative group"),h(g,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),h(N,"id","transformers.PushToHubCallback"),h(N,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(N,"href","#transformers.PushToHubCallback"),h(q,"class","relative group"),h(v,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,i){t(document.head,r),_(e,u,i),_(e,o,i),t(o,d),t(d,P),y(c,P,null),t(o,E),t(o,V),t(V,be),_(e,ce,i),_(e,W,i),t(W,ge),_(e,ie,i),_(e,H,i),t(H,j),t(j,X),y(A,X,null),t(H,_e),t(H,J),t(J,ke),_(e,de,i),_(e,g,i),y(I,g,null),t(g,ve),t(g,k),t(k,$e),t(k,Q),t(Q,we),t(k,ye),t(k,Y),t(Y,xe),t(k,Ce),t(k,Z),t(Z,Te),t(k,Pe),t(k,ee),t(ee,Ee),t(k,Ke),t(g,ze),t(g,te),t(te,He),t(g,qe),y(L,g,null),t(g,Me),y(D,g,null),_(e,pe,i),_(e,q,i),t(q,N),t(N,ae),y(U,ae,null),t(q,Oe),t(q,oe),t(oe,je),_(e,me,i),_(e,v,i),y(B,v,null),t(v,Le),t(v,M),t(M,De),t(M,re),t(re,Ne),t(M,Se),t(M,se),t(se,Ae),t(M,Ie),t(v,Ue),y(S,v,null),ue=!0},p(e,[i]){const F={};i&2&&(F.$$scope={dirty:i,ctx:e}),L.$set(F);const ne={};i&2&&(ne.$$scope={dirty:i,ctx:e}),D.$set(ne);const le={};i&2&&(le.$$scope={dirty:i,ctx:e}),S.$set(le)},i(e){ue||(x(c.$$.fragment,e),x(A.$$.fragment,e),x(I.$$.fragment,e),x(L.$$.fragment,e),x(D.$$.fragment,e),x(U.$$.fragment,e),x(B.$$.fragment,e),x(S.$$.fragment,e),ue=!0)},o(e){C(c.$$.fragment,e),C(A.$$.fragment,e),C(I.$$.fragment,e),C(L.$$.fragment,e),C(D.$$.fragment,e),C(U.$$.fragment,e),C(B.$$.fragment,e),C(S.$$.fragment,e),ue=!1},d(e){a(r),e&&a(u),e&&a(o),T(c),e&&a(ce),e&&a(W),e&&a(ie),e&&a(H),T(A),e&&a(de),e&&a(g),T(I),T(L),T(D),e&&a(pe),e&&a(q),T(U),e&&a(me),e&&a(v),T(B),T(S)}}}const _t={local:"keras-callbacks",sections:[{local:"transformers.KerasMetricCallback",title:"KerasMetricCallback"},{local:"transformers.PushToHubCallback",title:"PushToHubCallback"}],title:"Keras callbacks"};function kt(O){return ut(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Ct extends it{constructor(r){super();dt(this,r,kt,gt,pt,{})}}export{Ct as default,_t as metadata};
19
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/main_classes/logging.mdx-hf-doc-builder.js
import{S as hl,i as vl,s as bl,e as o,k as f,w as h,t as l,M as _l,c as s,d as t,m,a,x as v,h as n,b as c,G as r,g,y as b,q as _,o as $,B as E,v as $l,L as El}from"../../chunks/vendor-hf-doc-builder.js";import{T as yl}from"../../chunks/Tip-hf-doc-builder.js";import{D as L}from"../../chunks/Docstring-hf-doc-builder.js";import{C as st}from"../../chunks/CodeBlock-hf-doc-builder.js";import{I as Es}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{E as wl}from"../../chunks/ExampleCodeBlock-hf-doc-builder.js";function Rl(ir){let u,C,w,d,R,p,I,Z,Te,U,Y,Se,V,F,ee,re,Le,x,be,P,Ce,_e,S,$e,y,j,te,oe,Fe,se,z,xe,M,Pe;return{c(){u=o("p"),C=l("\u{1F917} Transformers has following logging levels:"),w=f(),d=o("ul"),R=o("li"),p=l("50: "),I=o("code"),Z=l("transformers.logging.CRITICAL"),Te=l(" or "),U=o("code"),Y=l("transformers.logging.FATAL"),Se=f(),V=o("li"),F=l("40: "),ee=o("code"),re=l("transformers.logging.ERROR"),Le=f(),x=o("li"),be=l("30: "),P=o("code"),Ce=l("transformers.logging.WARNING"),_e=l(" or "),S=o("code"),$e=l("transformers.logging.WARN"),y=f(),j=o("li"),te=l("20: "),oe=o("code"),Fe=l("transformers.logging.INFO"),se=f(),z=o("li"),xe=l("10: "),M=o("code"),Pe=l("transformers.logging.DEBUG")},l(N){u=s(N,"P",{});var k=a(u);C=n(k,"\u{1F917} Transformers has following logging levels:"),k.forEach(t),w=m(N),d=s(N,"UL",{});var O=a(d);R=s(O,"LI",{});var le=a(R);p=n(le,"50: "),I=s(le,"CODE",{});var Ee=a(I);Z=n(Ee,"transformers.logging.CRITICAL"),Ee.forEach(t),Te=n(le," or "),U=s(le,"CODE",{});var gr=a(U);Y=n(gr,"transformers.logging.FATAL"),gr.forEach(t),le.forEach(t),Se=m(O),V=s(O,"LI",{});var ke=a(V);F=n(ke,"40: "),ee=s(ke,"CODE",{});var ye=a(ee);re=n(ye,"transformers.logging.ERROR"),ye.forEach(t),ke.forEach(t),Le=m(O),x=s(O,"LI",{});var ne=a(x);be=n(ne,"30: "),P=s(ne,"CODE",{});var fr=a(P);Ce=n(fr,"transformers.logging.WARNING"),fr.forEach(t),_e=n(ne," or "),S=s(ne,"CODE",{});var Ge=a(S);$e=n(Ge,"transformers.logging.WARN"),Ge.forEach(t),ne.forEach(t),y=m(O),j=s(O,"LI",{});var B=a(j);te=n(B,"20: "),oe=s(B,"CODE",{});var Ve=a(oe);Fe=n(Ve,"transformers.logging.INFO"),Ve.forEach(t),B.forEach(t),se=m(O),z=s(O,"LI",{});var A=a(z);xe=n(A,"10: "),M=s(A,"CODE",{});var mr=a(M);Pe=n(mr,"transformers.logging.DEBUG"),mr.forEach(t),A.forEach(t),O.forEach(t)},m(N,k){g(N,u,k),r(u,C),g(N,w,k),g(N,d,k),r(d,R),r(R,p),r(R,I),r(I,Z),r(R,Te),r(R,U),r(U,Y),r(d,Se),r(d,V),r(V,F),r(V,ee),r(ee,re),r(d,Le),r(d,x),r(x,be),r(x,P),r(P,Ce),r(x,_e),r(x,S),r(S,$e),r(d,y),r(d,j),r(j,te),r(j,oe),r(oe,Fe),r(d,se),r(d,z),r(z,xe),r(z,M),r(M,Pe)},d(N){N&&t(u),N&&t(w),N&&t(d)}}}function Al(ir){let u,C,w,d,R;return d=new st({props:{code:" [LEVELNAME|FILENAME|LINE NUMBER] TIME >> MESSAGE",highlighted:' [LEVELNAME|<span class="hljs-type">FILENAME</span>|<span class="hljs-type">LINE</span> NUMBER] TIME &gt;&gt; MESSAGE'}}),{c(){u=o("p"),C=l("Enable explicit formatting for every HuggingFace Transformers\u2019s logger. The explicit formatter is as follows:"),w=f(),h(d.$$.fragment)},l(p){u=s(p,"P",{});var I=a(u);C=n(I,"Enable explicit formatting for every HuggingFace Transformers\u2019s logger. The explicit formatter is as follows:"),I.forEach(t),w=m(p),v(d.$$.fragment,p)},m(p,I){g(p,u,I),r(u,C),g(p,w,I),b(d,p,I),R=!0},p:El,i(p){R||(_(d.$$.fragment,p),R=!0)},o(p){$(d.$$.fragment,p),R=!1},d(p){p&&t(u),p&&t(w),E(d,p)}}}function Il(ir){let u,C,w,d,R,p,I,Z,Te,U,Y,Se,V,F,ee,re,Le,x,be,P,Ce,_e,S,$e,y,j,te,oe,Fe,se,z,xe,M,Pe,N,k,O,le,Ee,gr,ke,ye,ne,fr,Ge,B,Ve,A,mr,Rr,qt,Ht,Ar,Ut,Yt,Ir,jt,zt,Nr,Jt,Kt,lt,Me,nt,dr,Qt,at,Be,it,J,Xt,cr,Zt,eo,pr,ro,to,gt,D,we,Or,oo,so,Dr,lo,no,ao,ur,Tr,io,go,fo,Re,Sr,mo,co,Lr,po,uo,ho,hr,Cr,vo,bo,_o,vr,Fr,$o,Eo,ft,G,yo,xr,wo,Ro,br,Ao,Io,_r,No,Oo,mt,ae,Ae,Pr,We,Do,kr,To,dt,ie,qe,So,He,Lo,Gr,Co,Fo,ct,ge,Ue,xo,Ye,Po,Vr,ko,Go,pt,fe,je,Vo,ze,Mo,Mr,Bo,Wo,ut,me,Je,qo,Ke,Ho,Br,Uo,Yo,ht,de,Ie,Wr,Qe,jo,qr,zo,vt,W,Xe,Jo,Hr,Ko,Qo,Ne,bt,ce,Ze,Xo,Ur,Zo,_t,q,er,es,Yr,rs,ts,jr,os,$t,pe,rr,ss,zr,ls,Et,ue,tr,ns,Jr,as,yt,K,or,is,Oe,gs,wt,H,sr,fs,Kr,ms,ds,Qr,cs,Rt,he,lr,ps,Xr,us,At,ve,nr,hs,Zr,vs,It;return p=new Es({}),S=new st({props:{code:`import transformers transformers.logging.set_verbosity_info()`,highlighted:`<span class="hljs-keyword">import</span> transformers transformers.logging.set_verbosity_info()`}}),B=new st({props:{code:"TRANSFORMERS_VERBOSITY=error ./myprogram.py",highlighted:"TRANSFORMERS_VERBOSITY=error ./myprogram.py"}}),Me=new st({props:{code:"TRANSFORMERS_NO_ADVISORY_WARNINGS=1 ./myprogram.py",highlighted:"TRANSFORMERS_NO_ADVISORY_WARNINGS=1 ./myprogram.py"}}),Be=new st({props:{code:`from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger("transformers") logger.info("INFO") logger.warning("WARN")`,highlighted:`<span class="hljs-keyword">from</span> transformers.utils <span class="hljs-keyword">import</span> logging logging.set_verbosity_info() logger = logging.get_logger(<span class="hljs-string">&quot;transformers&quot;</span>) logger.info(<span class="hljs-string">&quot;INFO&quot;</span>) logger.warning(<span class="hljs-string">&quot;WARN&quot;</span>)`}}),We=new Es({}),qe=new L({props:{name:"transformers.utils.logging.set_verbosity_error",anchor:"transformers.utils.logging.set_verbosity_error",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/logging.py#L186"}}),Ue=new L({props:{name:"transformers.utils.logging.set_verbosity_warning",anchor:"transformers.utils.logging.set_verbosity_warning",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/logging.py#L176"}}),je=new L({props:{name:"transformers.utils.logging.set_verbosity_info",anchor:"transformers.utils.logging.set_verbosity_info",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/logging.py#L171"}}),Je=new L({props:{name:"transformers.utils.logging.set_verbosity_debug",anchor:"transformers.utils.logging.set_verbosity_debug",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/logging.py#L181"}}),Qe=new Es({}),Xe=new L({props:{name:"transformers.utils.logging.get_verbosity",anchor:"transformers.utils.logging.get_verbosity",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/logging.py#L129",returnDescription:` <p>The logging level.</p> `,returnType:` <p><code>int</code></p> `}}),Ne=new yl({props:{$$slots:{default:[Rl]},$$scope:{ctx:ir}}}),Ze=new L({props:{name:"transformers.utils.logging.set_verbosity",anchor:"transformers.utils.logging.set_verbosity",parameters:[{name:"verbosity",val:": int"}],parametersDescription:[{anchor:"transformers.utils.logging.set_verbosity.verbosity",description:`<strong>verbosity</strong> (<code>int</code>) &#x2014; Logging level, e.g., one of:</p> <ul> <li><code>transformers.logging.CRITICAL</code> or <code>transformers.logging.FATAL</code></li> <li><code>transformers.logging.ERROR</code></li> <li><code>transformers.logging.WARNING</code> or <code>transformers.logging.WARN</code></li> <li><code>transformers.logging.INFO</code></li> <li><code>transformers.logging.DEBUG</code></li> </ul>`,name:"verbosity"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/logging.py#L152"}}),er=new L({props:{name:"transformers.utils.logging.get_logger",anchor:"transformers.utils.logging.get_logger",parameters:[{name:"name",val:": typing.Optional[str] = None"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/logging.py#L115"}}),rr=new L({props:{name:"transformers.utils.logging.enable_default_handler",anchor:"transformers.utils.logging.enable_default_handler",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/logging.py#L200"}}),tr=new L({props:{name:"transformers.utils.logging.disable_default_handler",anchor:"transformers.utils.logging.disable_default_handler",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/logging.py#L191"}}),or=new L({props:{name:"transformers.utils.logging.enable_explicit_format",anchor:"transformers.utils.logging.enable_explicit_format",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/logging.py#L246"}}),Oe=new wl({props:{anchor:"transformers.utils.logging.enable_explicit_format.example",$$slots:{default:[Al]},$$scope:{ctx:ir}}}),sr=new L({props:{name:"transformers.utils.logging.reset_format",anchor:"transformers.utils.logging.reset_format",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/logging.py#L261"}}),lr=new L({props:{name:"transformers.utils.logging.enable_progress_bar",anchor:"transformers.utils.logging.enable_progress_bar",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/logging.py#L337"}}),nr=new L({props:{name:"transformers.utils.logging.disable_progress_bar",anchor:"transformers.utils.logging.disable_progress_bar",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/logging.py#L344"}}),{c(){u=o("meta"),C=f(),w=o("h1"),d=o("a"),R=o("span"),h(p.$$.fragment),I=f(),Z=o("span"),Te=l("Logging"),U=f(),Y=o("p"),Se=l("\u{1F917} Transformers has a centralized logging system, so that you can setup the verbosity of the library easily."),V=f(),F=o("p"),ee=l("Currently the default verbosity of the library is "),re=o("code"),Le=l("WARNING"),x=l("."),be=f(),P=o("p"),Ce=l(`To change the level of verbosity, just use one of the direct setters. For instance, here is how to change the verbosity to the INFO level.`),_e=f(),h(S.$$.fragment),$e=f(),y=o("p"),j=l("You can also use the environment variable "),te=o("code"),oe=l("TRANSFORMERS_VERBOSITY"),Fe=l(` to override the default verbosity. You can set it to one of the following: `),se=o("code"),z=l("debug"),xe=l(", "),M=o("code"),Pe=l("info"),N=l(", "),k=o("code"),O=l("warning"),le=l(", "),Ee=o("code"),gr=l("error"),ke=l(", "),ye=o("code"),ne=l("critical"),fr=l(". For example:"),Ge=f(),h(B.$$.fragment),Ve=f(),A=o("p"),mr=l("Additionally, some "),Rr=o("code"),qt=l("warnings"),Ht=l(` can be disabled by setting the environment variable `),Ar=o("code"),Ut=l("TRANSFORMERS_NO_ADVISORY_WARNINGS"),Yt=l(" to a true value, like "),Ir=o("em"),jt=l("1"),zt=l(`. This will disable any warning that is logged using `),Nr=o("code"),Jt=l("logger.warning_advice()"),Kt=l(". For example:"),lt=f(),h(Me.$$.fragment),nt=f(),dr=o("p"),Qt=l("Here is an example of how to use the same logger as the library in your own module or script:"),at=f(),h(Be.$$.fragment),it=f(),J=o("p"),Xt=l(`All the methods of this logging module are documented below, the main ones are `),cr=o("a"),Zt=l("logging.get_verbosity()"),eo=l(` to get the current level of verbosity in the logger and `),pr=o("a"),ro=l("logging.set_verbosity()"),to=l(` to set the verbosity to the level of your choice. In order (from the least verbose to the most verbose), those levels (with their corresponding int values in parenthesis) are:`),gt=f(),D=o("ul"),we=o("li"),Or=o("code"),oo=l("transformers.logging.CRITICAL"),so=l(" or "),Dr=o("code"),lo=l("transformers.logging.FATAL"),no=l(` (int value, 50): only report the most critical errors.`),ao=f(),ur=o("li"),Tr=o("code"),io=l("transformers.logging.ERROR"),go=l(" (int value, 40): only report errors."),fo=f(),Re=o("li"),Sr=o("code"),mo=l("transformers.logging.WARNING"),co=l(" or "),Lr=o("code"),po=l("transformers.logging.WARN"),uo=l(` (int value, 30): only reports error and warnings. This the default level used by the library.`),ho=f(),hr=o("li"),Cr=o("code"),vo=l("transformers.logging.INFO"),bo=l(" (int value, 20): reports error, warnings and basic information."),_o=f(),vr=o("li"),Fr=o("code"),$o=l("transformers.logging.DEBUG"),Eo=l(" (int value, 10): report all information."),ft=f(),G=o("p"),yo=l("By default, "),xr=o("code"),wo=l("tqdm"),Ro=l(" progress bars will be displayed during model download. "),br=o("a"),Ao=l("logging.disable_progress_bar()"),Io=l(" and "),_r=o("a"),No=l("logging.enable_progress_bar()"),Oo=l(" can be used to suppress or unsuppress this behavior."),mt=f(),ae=o("h2"),Ae=o("a"),Pr=o("span"),h(We.$$.fragment),Do=f(),kr=o("span"),To=l("Base setters"),dt=f(),ie=o("div"),h(qe.$$.fragment),So=f(),He=o("p"),Lo=l("Set the verbosity to the "),Gr=o("code"),Co=l("ERROR"),Fo=l(" level."),ct=f(),ge=o("div"),h(Ue.$$.fragment),xo=f(),Ye=o("p"),Po=l("Set the verbosity to the "),Vr=o("code"),ko=l("WARNING"),Go=l(" level."),pt=f(),fe=o("div"),h(je.$$.fragment),Vo=f(),ze=o("p"),Mo=l("Set the verbosity to the "),Mr=o("code"),Bo=l("INFO"),Wo=l(" level."),ut=f(),me=o("div"),h(Je.$$.fragment),qo=f(),Ke=o("p"),Ho=l("Set the verbosity to the "),Br=o("code"),Uo=l("DEBUG"),Yo=l(" level."),ht=f(),de=o("h2"),Ie=o("a"),Wr=o("span"),h(Qe.$$.fragment),jo=f(),qr=o("span"),zo=l("Other functions"),vt=f(),W=o("div"),h(Xe.$$.fragment),Jo=f(),Hr=o("p"),Ko=l("Return the current level for the \u{1F917} Transformers\u2019s root logger as an int."),Qo=f(),h(Ne.$$.fragment),bt=f(),ce=o("div"),h(Ze.$$.fragment),Xo=f(),Ur=o("p"),Zo=l("Set the verbosity level for the \u{1F917} Transformers\u2019s root logger."),_t=f(),q=o("div"),h(er.$$.fragment),es=f(),Yr=o("p"),rs=l("Return a logger with the specified name."),ts=f(),jr=o("p"),os=l("This function is not supposed to be directly accessed unless you are writing a custom transformers module."),$t=f(),pe=o("div"),h(rr.$$.fragment),ss=f(),zr=o("p"),ls=l("Enable the default handler of the HuggingFace Transformers\u2019s root logger."),Et=f(),ue=o("div"),h(tr.$$.fragment),ns=f(),Jr=o("p"),as=l("Disable the default handler of the HuggingFace Transformers\u2019s root logger."),yt=f(),K=o("div"),h(or.$$.fragment),is=f(),h(Oe.$$.fragment),gs=l(` All handlers currently bound to the root logger are affected by this method.`),wt=f(),H=o("div"),h(sr.$$.fragment),fs=f(),Kr=o("p"),ms=l("Resets the formatting for HuggingFace Transformers\u2019s loggers."),ds=f(),Qr=o("p"),cs=l("All handlers currently bound to the root logger are affected by this method."),Rt=f(),he=o("div"),h(lr.$$.fragment),ps=f(),Xr=o("p"),us=l("Enable tqdm progress bar."),At=f(),ve=o("div"),h(nr.$$.fragment),hs=f(),Zr=o("p"),vs=l("Disable tqdm progress bar."),this.h()},l(e){const i=_l('[data-svelte="svelte-1phssyn"]',document.head);u=s(i,"META",{name:!0,content:!0}),i.forEach(t),C=m(e),w=s(e,"H1",{class:!0});var ar=a(w);d=s(ar,"A",{id:!0,class:!0,href:!0});var et=a(d);R=s(et,"SPAN",{});var ys=a(R);v(p.$$.fragment,ys),ys.forEach(t),et.forEach(t),I=m(ar),Z=s(ar,"SPAN",{});var ws=a(Z);Te=n(ws,"Logging"),ws.forEach(t),ar.forEach(t),U=m(e),Y=s(e,"P",{});var Rs=a(Y);Se=n(Rs,"\u{1F917} Transformers has a centralized logging system, so that you can setup the verbosity of the library easily."),Rs.forEach(t),V=m(e),F=s(e,"P",{});var Nt=a(F);ee=n(Nt,"Currently the default verbosity of the library is "),re=s(Nt,"CODE",{});var As=a(re);Le=n(As,"WARNING"),As.forEach(t),x=n(Nt,"."),Nt.forEach(t),be=m(e),P=s(e,"P",{});var Is=a(P);Ce=n(Is,`To change the level of verbosity, just use one of the direct setters. For instance, here is how to change the verbosity to the INFO level.`),Is.forEach(t),_e=m(e),v(S.$$.fragment,e),$e=m(e),y=s(e,"P",{});var T=a(y);j=n(T,"You can also use the environment variable "),te=s(T,"CODE",{});var Ns=a(te);oe=n(Ns,"TRANSFORMERS_VERBOSITY"),Ns.forEach(t),Fe=n(T,` to override the default verbosity. You can set it to one of the following: `),se=s(T,"CODE",{});var Os=a(se);z=n(Os,"debug"),Os.forEach(t),xe=n(T,", "),M=s(T,"CODE",{});var Ds=a(M);Pe=n(Ds,"info"),Ds.forEach(t),N=n(T,", "),k=s(T,"CODE",{});var Ts=a(k);O=n(Ts,"warning"),Ts.forEach(t),le=n(T,", "),Ee=s(T,"CODE",{});var Ss=a(Ee);gr=n(Ss,"error"),Ss.forEach(t),ke=n(T,", "),ye=s(T,"CODE",{});var Ls=a(ye);ne=n(Ls,"critical"),Ls.forEach(t),fr=n(T,". For example:"),T.forEach(t),Ge=m(e),v(B.$$.fragment,e),Ve=m(e),A=s(e,"P",{});var Q=a(A);mr=n(Q,"Additionally, some "),Rr=s(Q,"CODE",{});var Cs=a(Rr);qt=n(Cs,"warnings"),Cs.forEach(t),Ht=n(Q,` can be disabled by setting the environment variable `),Ar=s(Q,"CODE",{});var Fs=a(Ar);Ut=n(Fs,"TRANSFORMERS_NO_ADVISORY_WARNINGS"),Fs.forEach(t),Yt=n(Q," to a true value, like "),Ir=s(Q,"EM",{});var xs=a(Ir);jt=n(xs,"1"),xs.forEach(t),zt=n(Q,`. This will disable any warning that is logged using `),Nr=s(Q,"CODE",{});var Ps=a(Nr);Jt=n(Ps,"logger.warning_advice()"),Ps.forEach(t),Kt=n(Q,". For example:"),Q.forEach(t),lt=m(e),v(Me.$$.fragment,e),nt=m(e),dr=s(e,"P",{});var ks=a(dr);Qt=n(ks,"Here is an example of how to use the same logger as the library in your own module or script:"),ks.forEach(t),at=m(e),v(Be.$$.fragment,e),it=m(e),J=s(e,"P",{});var $r=a(J);Xt=n($r,`All the methods of this logging module are documented below, the main ones are `),cr=s($r,"A",{href:!0});var Gs=a(cr);Zt=n(Gs,"logging.get_verbosity()"),Gs.forEach(t),eo=n($r,` to get the current level of verbosity in the logger and `),pr=s($r,"A",{href:!0});var Vs=a(pr);ro=n(Vs,"logging.set_verbosity()"),Vs.forEach(t),to=n($r,` to set the verbosity to the level of your choice. In order (from the least verbose to the most verbose), those levels (with their corresponding int values in parenthesis) are:`),$r.forEach(t),gt=m(e),D=s(e,"UL",{});var X=a(D);we=s(X,"LI",{});var rt=a(we);Or=s(rt,"CODE",{});var Ms=a(Or);oo=n(Ms,"transformers.logging.CRITICAL"),Ms.forEach(t),so=n(rt," or "),Dr=s(rt,"CODE",{});var Bs=a(Dr);lo=n(Bs,"transformers.logging.FATAL"),Bs.forEach(t),no=n(rt,` (int value, 50): only report the most critical errors.`),rt.forEach(t),ao=m(X),ur=s(X,"LI",{});var bs=a(ur);Tr=s(bs,"CODE",{});var Ws=a(Tr);io=n(Ws,"transformers.logging.ERROR"),Ws.forEach(t),go=n(bs," (int value, 40): only report errors."),bs.forEach(t),fo=m(X),Re=s(X,"LI",{});var tt=a(Re);Sr=s(tt,"CODE",{});var qs=a(Sr);mo=n(qs,"transformers.logging.WARNING"),qs.forEach(t),co=n(tt," or "),Lr=s(tt,"CODE",{});var Hs=a(Lr);po=n(Hs,"transformers.logging.WARN"),Hs.forEach(t),uo=n(tt,` (int value, 30): only reports error and warnings. This the default level used by the library.`),tt.forEach(t),ho=m(X),hr=s(X,"LI",{});var _s=a(hr);Cr=s(_s,"CODE",{});var Us=a(Cr);vo=n(Us,"transformers.logging.INFO"),Us.forEach(t),bo=n(_s," (int value, 20): reports error, warnings and basic information."),_s.forEach(t),_o=m(X),vr=s(X,"LI",{});var $s=a(vr);Fr=s($s,"CODE",{});var Ys=a(Fr);$o=n(Ys,"transformers.logging.DEBUG"),Ys.forEach(t),Eo=n($s," (int value, 10): report all information."),$s.forEach(t),X.forEach(t),ft=m(e),G=s(e,"P",{});var De=a(G);yo=n(De,"By default, "),xr=s(De,"CODE",{});var js=a(xr);wo=n(js,"tqdm"),js.forEach(t),Ro=n(De," progress bars will be displayed during model download. "),br=s(De,"A",{href:!0});var zs=a(br);Ao=n(zs,"logging.disable_progress_bar()"),zs.forEach(t),Io=n(De," and "),_r=s(De,"A",{href:!0});var Js=a(_r);No=n(Js,"logging.enable_progress_bar()"),Js.forEach(t),Oo=n(De," can be used to suppress or unsuppress this behavior."),De.forEach(t),mt=m(e),ae=s(e,"H2",{class:!0});var Ot=a(ae);Ae=s(Ot,"A",{id:!0,class:!0,href:!0});var Ks=a(Ae);Pr=s(Ks,"SPAN",{});var Qs=a(Pr);v(We.$$.fragment,Qs),Qs.forEach(t),Ks.forEach(t),Do=m(Ot),kr=s(Ot,"SPAN",{});var Xs=a(kr);To=n(Xs,"Base setters"),Xs.forEach(t),Ot.forEach(t),dt=m(e),ie=s(e,"DIV",{class:!0});var Dt=a(ie);v(qe.$$.fragment,Dt),So=m(Dt),He=s(Dt,"P",{});var Tt=a(He);Lo=n(Tt,"Set the verbosity to the "),Gr=s(Tt,"CODE",{});var Zs=a(Gr);Co=n(Zs,"ERROR"),Zs.forEach(t),Fo=n(Tt," level."),Tt.forEach(t),Dt.forEach(t),ct=m(e),ge=s(e,"DIV",{class:!0});var St=a(ge);v(Ue.$$.fragment,St),xo=m(St),Ye=s(St,"P",{});var Lt=a(Ye);Po=n(Lt,"Set the verbosity to the "),Vr=s(Lt,"CODE",{});var el=a(Vr);ko=n(el,"WARNING"),el.forEach(t),Go=n(Lt," level."),Lt.forEach(t),St.forEach(t),pt=m(e),fe=s(e,"DIV",{class:!0});var Ct=a(fe);v(je.$$.fragment,Ct),Vo=m(Ct),ze=s(Ct,"P",{});var Ft=a(ze);Mo=n(Ft,"Set the verbosity to the "),Mr=s(Ft,"CODE",{});var rl=a(Mr);Bo=n(rl,"INFO"),rl.forEach(t),Wo=n(Ft," level."),Ft.forEach(t),Ct.forEach(t),ut=m(e),me=s(e,"DIV",{class:!0});var xt=a(me);v(Je.$$.fragment,xt),qo=m(xt),Ke=s(xt,"P",{});var Pt=a(Ke);Ho=n(Pt,"Set the verbosity to the "),Br=s(Pt,"CODE",{});var tl=a(Br);Uo=n(tl,"DEBUG"),tl.forEach(t),Yo=n(Pt," level."),Pt.forEach(t),xt.forEach(t),ht=m(e),de=s(e,"H2",{class:!0});var kt=a(de);Ie=s(kt,"A",{id:!0,class:!0,href:!0});var ol=a(Ie);Wr=s(ol,"SPAN",{});var sl=a(Wr);v(Qe.$$.fragment,sl),sl.forEach(t),ol.forEach(t),jo=m(kt),qr=s(kt,"SPAN",{});var ll=a(qr);zo=n(ll,"Other functions"),ll.forEach(t),kt.forEach(t),vt=m(e),W=s(e,"DIV",{class:!0});var Er=a(W);v(Xe.$$.fragment,Er),Jo=m(Er),Hr=s(Er,"P",{});var nl=a(Hr);Ko=n(nl,"Return the current level for the \u{1F917} Transformers\u2019s root logger as an int."),nl.forEach(t),Qo=m(Er),v(Ne.$$.fragment,Er),Er.forEach(t),bt=m(e),ce=s(e,"DIV",{class:!0});var Gt=a(ce);v(Ze.$$.fragment,Gt),Xo=m(Gt),Ur=s(Gt,"P",{});var al=a(Ur);Zo=n(al,"Set the verbosity level for the \u{1F917} Transformers\u2019s root logger."),al.forEach(t),Gt.forEach(t),_t=m(e),q=s(e,"DIV",{class:!0});var yr=a(q);v(er.$$.fragment,yr),es=m(yr),Yr=s(yr,"P",{});var il=a(Yr);rs=n(il,"Return a logger with the specified name."),il.forEach(t),ts=m(yr),jr=s(yr,"P",{});var gl=a(jr);os=n(gl,"This function is not supposed to be directly accessed unless you are writing a custom transformers module."),gl.forEach(t),yr.forEach(t),$t=m(e),pe=s(e,"DIV",{class:!0});var Vt=a(pe);v(rr.$$.fragment,Vt),ss=m(Vt),zr=s(Vt,"P",{});var fl=a(zr);ls=n(fl,"Enable the default handler of the HuggingFace Transformers\u2019s root logger."),fl.forEach(t),Vt.forEach(t),Et=m(e),ue=s(e,"DIV",{class:!0});var Mt=a(ue);v(tr.$$.fragment,Mt),ns=m(Mt),Jr=s(Mt,"P",{});var ml=a(Jr);as=n(ml,"Disable the default handler of the HuggingFace Transformers\u2019s root logger."),ml.forEach(t),Mt.forEach(t),yt=m(e),K=s(e,"DIV",{class:!0});var ot=a(K);v(or.$$.fragment,ot),is=m(ot),v(Oe.$$.fragment,ot),gs=n(ot,` All handlers currently bound to the root logger are affected by this method.`),ot.forEach(t),wt=m(e),H=s(e,"DIV",{class:!0});var wr=a(H);v(sr.$$.fragment,wr),fs=m(wr),Kr=s(wr,"P",{});var dl=a(Kr);ms=n(dl,"Resets the formatting for HuggingFace Transformers\u2019s loggers."),dl.forEach(t),ds=m(wr),Qr=s(wr,"P",{});var cl=a(Qr);cs=n(cl,"All handlers currently bound to the root logger are affected by this method."),cl.forEach(t),wr.forEach(t),Rt=m(e),he=s(e,"DIV",{class:!0});var Bt=a(he);v(lr.$$.fragment,Bt),ps=m(Bt),Xr=s(Bt,"P",{});var pl=a(Xr);us=n(pl,"Enable tqdm progress bar."),pl.forEach(t),Bt.forEach(t),At=m(e),ve=s(e,"DIV",{class:!0});var Wt=a(ve);v(nr.$$.fragment,Wt),hs=m(Wt),Zr=s(Wt,"P",{});var ul=a(Zr);vs=n(ul,"Disable tqdm progress bar."),ul.forEach(t),Wt.forEach(t),this.h()},h(){c(u,"name","hf:doc:metadata"),c(u,"content",JSON.stringify(Nl)),c(d,"id","logging"),c(d,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(d,"href","#logging"),c(w,"class","relative group"),c(cr,"href","/docs/transformers/pr_19429/en/main_classes/logging#transformers.utils.logging.get_verbosity"),c(pr,"href","/docs/transformers/pr_19429/en/main_classes/logging#transformers.utils.logging.set_verbosity"),c(br,"href","/docs/transformers/pr_19429/en/main_classes/logging#transformers.utils.logging.disable_progress_bar"),c(_r,"href","/docs/transformers/pr_19429/en/main_classes/logging#transformers.utils.logging.enable_progress_bar"),c(Ae,"id","transformers.utils.logging.set_verbosity_error"),c(Ae,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ae,"href","#transformers.utils.logging.set_verbosity_error"),c(ae,"class","relative group"),c(ie,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(ge,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(fe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(me,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Ie,"id","transformers.utils.logging.get_verbosity"),c(Ie,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ie,"href","#transformers.utils.logging.get_verbosity"),c(de,"class","relative group"),c(W,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(ce,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(pe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(ue,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(K,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(H,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(he,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(ve,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,i){r(document.head,u),g(e,C,i),g(e,w,i),r(w,d),r(d,R),b(p,R,null),r(w,I),r(w,Z),r(Z,Te),g(e,U,i),g(e,Y,i),r(Y,Se),g(e,V,i),g(e,F,i),r(F,ee),r(F,re),r(re,Le),r(F,x),g(e,be,i),g(e,P,i),r(P,Ce),g(e,_e,i),b(S,e,i),g(e,$e,i),g(e,y,i),r(y,j),r(y,te),r(te,oe),r(y,Fe),r(y,se),r(se,z),r(y,xe),r(y,M),r(M,Pe),r(y,N),r(y,k),r(k,O),r(y,le),r(y,Ee),r(Ee,gr),r(y,ke),r(y,ye),r(ye,ne),r(y,fr),g(e,Ge,i),b(B,e,i),g(e,Ve,i),g(e,A,i),r(A,mr),r(A,Rr),r(Rr,qt),r(A,Ht),r(A,Ar),r(Ar,Ut),r(A,Yt),r(A,Ir),r(Ir,jt),r(A,zt),r(A,Nr),r(Nr,Jt),r(A,Kt),g(e,lt,i),b(Me,e,i),g(e,nt,i),g(e,dr,i),r(dr,Qt),g(e,at,i),b(Be,e,i),g(e,it,i),g(e,J,i),r(J,Xt),r(J,cr),r(cr,Zt),r(J,eo),r(J,pr),r(pr,ro),r(J,to),g(e,gt,i),g(e,D,i),r(D,we),r(we,Or),r(Or,oo),r(we,so),r(we,Dr),r(Dr,lo),r(we,no),r(D,ao),r(D,ur),r(ur,Tr),r(Tr,io),r(ur,go),r(D,fo),r(D,Re),r(Re,Sr),r(Sr,mo),r(Re,co),r(Re,Lr),r(Lr,po),r(Re,uo),r(D,ho),r(D,hr),r(hr,Cr),r(Cr,vo),r(hr,bo),r(D,_o),r(D,vr),r(vr,Fr),r(Fr,$o),r(vr,Eo),g(e,ft,i),g(e,G,i),r(G,yo),r(G,xr),r(xr,wo),r(G,Ro),r(G,br),r(br,Ao),r(G,Io),r(G,_r),r(_r,No),r(G,Oo),g(e,mt,i),g(e,ae,i),r(ae,Ae),r(Ae,Pr),b(We,Pr,null),r(ae,Do),r(ae,kr),r(kr,To),g(e,dt,i),g(e,ie,i),b(qe,ie,null),r(ie,So),r(ie,He),r(He,Lo),r(He,Gr),r(Gr,Co),r(He,Fo),g(e,ct,i),g(e,ge,i),b(Ue,ge,null),r(ge,xo),r(ge,Ye),r(Ye,Po),r(Ye,Vr),r(Vr,ko),r(Ye,Go),g(e,pt,i),g(e,fe,i),b(je,fe,null),r(fe,Vo),r(fe,ze),r(ze,Mo),r(ze,Mr),r(Mr,Bo),r(ze,Wo),g(e,ut,i),g(e,me,i),b(Je,me,null),r(me,qo),r(me,Ke),r(Ke,Ho),r(Ke,Br),r(Br,Uo),r(Ke,Yo),g(e,ht,i),g(e,de,i),r(de,Ie),r(Ie,Wr),b(Qe,Wr,null),r(de,jo),r(de,qr),r(qr,zo),g(e,vt,i),g(e,W,i),b(Xe,W,null),r(W,Jo),r(W,Hr),r(Hr,Ko),r(W,Qo),b(Ne,W,null),g(e,bt,i),g(e,ce,i),b(Ze,ce,null),r(ce,Xo),r(ce,Ur),r(Ur,Zo),g(e,_t,i),g(e,q,i),b(er,q,null),r(q,es),r(q,Yr),r(Yr,rs),r(q,ts),r(q,jr),r(jr,os),g(e,$t,i),g(e,pe,i),b(rr,pe,null),r(pe,ss),r(pe,zr),r(zr,ls),g(e,Et,i),g(e,ue,i),b(tr,ue,null),r(ue,ns),r(ue,Jr),r(Jr,as),g(e,yt,i),g(e,K,i),b(or,K,null),r(K,is),b(Oe,K,null),r(K,gs),g(e,wt,i),g(e,H,i),b(sr,H,null),r(H,fs),r(H,Kr),r(Kr,ms),r(H,ds),r(H,Qr),r(Qr,cs),g(e,Rt,i),g(e,he,i),b(lr,he,null),r(he,ps),r(he,Xr),r(Xr,us),g(e,At,i),g(e,ve,i),b(nr,ve,null),r(ve,hs),r(ve,Zr),r(Zr,vs),It=!0},p(e,[i]){const ar={};i&2&&(ar.$$scope={dirty:i,ctx:e}),Ne.$set(ar);const et={};i&2&&(et.$$scope={dirty:i,ctx:e}),Oe.$set(et)},i(e){It||(_(p.$$.fragment,e),_(S.$$.fragment,e),_(B.$$.fragment,e),_(Me.$$.fragment,e),_(Be.$$.fragment,e),_(We.$$.fragment,e),_(qe.$$.fragment,e),_(Ue.$$.fragment,e),_(je.$$.fragment,e),_(Je.$$.fragment,e),_(Qe.$$.fragment,e),_(Xe.$$.fragment,e),_(Ne.$$.fragment,e),_(Ze.$$.fragment,e),_(er.$$.fragment,e),_(rr.$$.fragment,e),_(tr.$$.fragment,e),_(or.$$.fragment,e),_(Oe.$$.fragment,e),_(sr.$$.fragment,e),_(lr.$$.fragment,e),_(nr.$$.fragment,e),It=!0)},o(e){$(p.$$.fragment,e),$(S.$$.fragment,e),$(B.$$.fragment,e),$(Me.$$.fragment,e),$(Be.$$.fragment,e),$(We.$$.fragment,e),$(qe.$$.fragment,e),$(Ue.$$.fragment,e),$(je.$$.fragment,e),$(Je.$$.fragment,e),$(Qe.$$.fragment,e),$(Xe.$$.fragment,e),$(Ne.$$.fragment,e),$(Ze.$$.fragment,e),$(er.$$.fragment,e),$(rr.$$.fragment,e),$(tr.$$.fragment,e),$(or.$$.fragment,e),$(Oe.$$.fragment,e),$(sr.$$.fragment,e),$(lr.$$.fragment,e),$(nr.$$.fragment,e),It=!1},d(e){t(u),e&&t(C),e&&t(w),E(p),e&&t(U),e&&t(Y),e&&t(V),e&&t(F),e&&t(be),e&&t(P),e&&t(_e),E(S,e),e&&t($e),e&&t(y),e&&t(Ge),E(B,e),e&&t(Ve),e&&t(A),e&&t(lt),E(Me,e),e&&t(nt),e&&t(dr),e&&t(at),E(Be,e),e&&t(it),e&&t(J),e&&t(gt),e&&t(D),e&&t(ft),e&&t(G),e&&t(mt),e&&t(ae),E(We),e&&t(dt),e&&t(ie),E(qe),e&&t(ct),e&&t(ge),E(Ue),e&&t(pt),e&&t(fe),E(je),e&&t(ut),e&&t(me),E(Je),e&&t(ht),e&&t(de),E(Qe),e&&t(vt),e&&t(W),E(Xe),E(Ne),e&&t(bt),e&&t(ce),E(Ze),e&&t(_t),e&&t(q),E(er),e&&t($t),e&&t(pe),E(rr),e&&t(Et),e&&t(ue),E(tr),e&&t(yt),e&&t(K),E(or),E(Oe),e&&t(wt),e&&t(H),E(sr),e&&t(Rt),e&&t(he),E(lr),e&&t(At),e&&t(ve),E(nr)}}}const Nl={local:"logging",sections:[{local:"transformers.utils.logging.set_verbosity_error",title:"Base setters"},{local:"transformers.utils.logging.get_verbosity",title:"Other functions"}],title:"Logging"};function Ol(ir){return $l(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class xl extends hl{constructor(u){super();vl(this,u,Ol,Il,bl,{})}}export{xl as default,Nl as metadata};
20
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/main_classes/pipelines.mdx-hf-doc-builder.js
import{S as VD,i as HD,s as WD,e as o,k as l,w as h,t as a,M as ZD,c as r,d as n,m as d,a as s,x as u,h as i,b as c,G as e,g as m,y as g,q as _,o as b,B as v,v as BD,L as un}from"../../chunks/vendor-hf-doc-builder.js";import{T as RD}from"../../chunks/Tip-hf-doc-builder.js";import{D as P}from"../../chunks/Docstring-hf-doc-builder.js";import{C as j}from"../../chunks/CodeBlock-hf-doc-builder.js";import{I as C}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{E as hn}from"../../chunks/ExampleCodeBlock-hf-doc-builder.js";function YD(z){let T,$,y,w,x;return w=new j({props:{code:`from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer # Sentiment analysis pipeline pipeline("sentiment-analysis") # Question answering pipeline, specifying the checkpoint identifier pipeline("question-answering", model="distilbert-base-cased-distilled-squad", tokenizer="bert-base-cased") # Named entity recognition pipeline, passing in a specific model and tokenizer model = AutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english") tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") pipeline("ner", model=model, tokenizer=tokenizer)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline, AutoModelForTokenClassification, AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Sentiment analysis pipeline</span> <span class="hljs-meta">&gt;&gt;&gt; </span>pipeline(<span class="hljs-string">&quot;sentiment-analysis&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Question answering pipeline, specifying the checkpoint identifier</span> <span class="hljs-meta">&gt;&gt;&gt; </span>pipeline(<span class="hljs-string">&quot;question-answering&quot;</span>, model=<span class="hljs-string">&quot;distilbert-base-cased-distilled-squad&quot;</span>, tokenizer=<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Named entity recognition pipeline, passing in a specific model and tokenizer</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&quot;dbmdz/bert-large-cased-finetuned-conll03-english&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pipeline(<span class="hljs-string">&quot;ner&quot;</span>, model=model, tokenizer=tokenizer)`}}),{c(){T=o("p"),$=a("Examples:"),y=l(),h(w.$$.fragment)},l(f){T=r(f,"P",{});var k=s(T);$=i(k,"Examples:"),k.forEach(n),y=d(f),u(w.$$.fragment,f)},m(f,k){m(f,T,k),e(T,$),m(f,y,k),g(w,f,k),x=!0},p:un,i(f){x||(_(w.$$.fragment,f),x=!0)},o(f){b(w.$$.fragment,f),x=!1},d(f){f&&n(T),f&&n(y),v(w,f)}}}function XD(z){let T,$,y,w,x;return{c(){T=o("p"),$=a(`However, this is not automatically a win for performance. It can be either a 10x speedup or 5x slowdown depending on hardware, data and the actual model being used.`),y=l(),w=o("p"),x=a("Example where it\u2019s mostly a speedup:")},l(f){T=r(f,"P",{});var k=s(T);$=i(k,`However, this is not automatically a win for performance. It can be either a 10x speedup or 5x slowdown depending on hardware, data and the actual model being used.`),k.forEach(n),y=d(f),w=r(f,"P",{});var Ee=s(w);x=i(Ee,"Example where it\u2019s mostly a speedup:"),Ee.forEach(n)},m(f,k){m(f,T,k),e(T,$),m(f,y,k),m(f,w,k),e(w,x)},d(f){f&&n(T),f&&n(y),f&&n(w)}}}function KD(z){let T,$,y,w,x;return w=new j({props:{code:`conversation = Conversation("Going to the movies tonight - any suggestions?") # Steps usually performed by the model when generating a response: # 1. Mark the user input as processed (moved to the history) conversation.mark_processed() # 2. Append a mode response conversation.append_response("The Big lebowski.") conversation.add_user_input("Is it good?")`,highlighted:`conversation = Conversation(<span class="hljs-string">&quot;Going to the movies tonight - any suggestions?&quot;</span>) <span class="hljs-comment"># Steps usually performed by the model when generating a response:</span> <span class="hljs-comment"># 1. Mark the user input as processed (moved to the history)</span> conversation.mark_processed() <span class="hljs-comment"># 2. Append a mode response</span> conversation.append_response(<span class="hljs-string">&quot;The Big lebowski.&quot;</span>) conversation.add_user_input(<span class="hljs-string">&quot;Is it good?&quot;</span>)`}}),{c(){T=o("p"),$=a("Usage:"),y=l(),h(w.$$.fragment)},l(f){T=r(f,"P",{});var k=s(T);$=i(k,"Usage:"),k.forEach(n),y=d(f),u(w.$$.fragment,f)},m(f,k){m(f,T,k),e(T,$),m(f,y,k),g(w,f,k),x=!0},p:un,i(f){x||(_(w.$$.fragment,f),x=!0)},o(f){b(w.$$.fragment,f),x=!1},d(f){f&&n(T),f&&n(y),v(w,f)}}}function JD(z){let T,$,y,w,x;return w=new j({props:{code:`conversational_pipeline = pipeline("conversational") conversation_1 = Conversation("Going to the movies tonight - any suggestions?") conversation_2 = Conversation("What's the last book you have read?") conversational_pipeline([conversation_1, conversation_2]) conversation_1.add_user_input("Is it an action movie?") conversation_2.add_user_input("What is the genre of this book?") conversational_pipeline([conversation_1, conversation_2])`,highlighted:`conversational_pipeline = pipeline(<span class="hljs-string">&quot;conversational&quot;</span>) conversation_1 = Conversation(<span class="hljs-string">&quot;Going to the movies tonight - any suggestions?&quot;</span>) conversation_2 = Conversation(<span class="hljs-string">&quot;What&#x27;s the last book you have read?&quot;</span>) conversational_pipeline([conversation_1, conversation_2]) conversation_1.add_user_input(<span class="hljs-string">&quot;Is it an action movie?&quot;</span>) conversation_2.add_user_input(<span class="hljs-string">&quot;What is the genre of this book?&quot;</span>) conversational_pipeline([conversation_1, conversation_2])`}}),{c(){T=o("p"),$=a("Usage:"),y=l(),h(w.$$.fragment)},l(f){T=r(f,"P",{});var k=s(T);$=i(k,"Usage:"),k.forEach(n),y=d(f),u(w.$$.fragment,f)},m(f,k){m(f,T,k),e(T,$),m(f,y,k),g(w,f,k),x=!0},p:un,i(f){x||(_(w.$$.fragment,f),x=!0)},o(f){b(w.$$.fragment,f),x=!1},d(f){f&&n(T),f&&n(y),v(w,f)}}}function e7(z){let T,$,y,w,x;return{c(){T=o("p"),$=a(`This pipeline only works for inputs with exactly one token masked. Experimental: We added support for multiple masks. The returned values are raw model output, and correspond to disjoint probabilities where one might expect joint probabilities (See `),y=o("a"),w=a("discussion"),x=a(")."),this.h()},l(f){T=r(f,"P",{});var k=s(T);$=i(k,`This pipeline only works for inputs with exactly one token masked. Experimental: We added support for multiple masks. The returned values are raw model output, and correspond to disjoint probabilities where one might expect joint probabilities (See `),y=r(k,"A",{href:!0,rel:!0});var Ee=s(y);w=i(Ee,"discussion"),Ee.forEach(n),x=i(k,")."),k.forEach(n),this.h()},h(){c(y,"href","https://github.com/huggingface/transformers/pull/10222"),c(y,"rel","nofollow")},m(f,k){m(f,T,k),e(T,$),e(T,y),e(y,w),e(T,x)},d(f){f&&n(T)}}}function t7(z){let T,$,y,w,x;return w=new j({props:{code:`# use bart in pytorch summarizer = pipeline("summarization") summarizer("An apple a day, keeps the doctor away", min_length=5, max_length=20) # use t5 in tf summarizer = pipeline("summarization", model="t5-base", tokenizer="t5-base", framework="tf") summarizer("An apple a day, keeps the doctor away", min_length=5, max_length=20)`,highlighted:`<span class="hljs-comment"># use bart in pytorch</span> summarizer = pipeline(<span class="hljs-string">&quot;summarization&quot;</span>) summarizer(<span class="hljs-string">&quot;An apple a day, keeps the doctor away&quot;</span>, min_length=<span class="hljs-number">5</span>, max_length=<span class="hljs-number">20</span>) <span class="hljs-comment"># use t5 in tf</span> summarizer = pipeline(<span class="hljs-string">&quot;summarization&quot;</span>, model=<span class="hljs-string">&quot;t5-base&quot;</span>, tokenizer=<span class="hljs-string">&quot;t5-base&quot;</span>, framework=<span class="hljs-string">&quot;tf&quot;</span>) summarizer(<span class="hljs-string">&quot;An apple a day, keeps the doctor away&quot;</span>, min_length=<span class="hljs-number">5</span>, max_length=<span class="hljs-number">20</span>)`}}),{c(){T=o("p"),$=a("Usage:"),y=l(),h(w.$$.fragment)},l(f){T=r(f,"P",{});var k=s(T);$=i(k,"Usage:"),k.forEach(n),y=d(f),u(w.$$.fragment,f)},m(f,k){m(f,T,k),e(T,$),m(f,y,k),g(w,f,k),x=!0},p:un,i(f){x||(_(w.$$.fragment,f),x=!0)},o(f){b(w.$$.fragment,f),x=!1},d(f){f&&n(T),f&&n(y),v(w,f)}}}function n7(z){let T,$,y,w,x;return w=new j({props:{code:`data = { "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }`,highlighted:`data = { <span class="hljs-string">&quot;actors&quot;</span>: [<span class="hljs-string">&quot;brad pitt&quot;</span>, <span class="hljs-string">&quot;leonardo di caprio&quot;</span>, <span class="hljs-string">&quot;george clooney&quot;</span>], <span class="hljs-string">&quot;age&quot;</span>: [<span class="hljs-string">&quot;56&quot;</span>, <span class="hljs-string">&quot;45&quot;</span>, <span class="hljs-string">&quot;59&quot;</span>], <span class="hljs-string">&quot;number of movies&quot;</span>: [<span class="hljs-string">&quot;87&quot;</span>, <span class="hljs-string">&quot;53&quot;</span>, <span class="hljs-string">&quot;69&quot;</span>], <span class="hljs-string">&quot;date of birth&quot;</span>: [<span class="hljs-string">&quot;7 february 1967&quot;</span>, <span class="hljs-string">&quot;10 june 1996&quot;</span>, <span class="hljs-string">&quot;28 november 1967&quot;</span>], }`}}),{c(){T=o("p"),$=a("Example:"),y=l(),h(w.$$.fragment)},l(f){T=r(f,"P",{});var k=s(T);$=i(k,"Example:"),k.forEach(n),y=d(f),u(w.$$.fragment,f)},m(f,k){m(f,T,k),e(T,$),m(f,y,k),g(w,f,k),x=!0},p:un,i(f){x||(_(w.$$.fragment,f),x=!0)},o(f){b(w.$$.fragment,f),x=!1},d(f){f&&n(T),f&&n(y),v(w,f)}}}function o7(z){let T,$,y,w,x;return w=new j({props:{code:`import pandas as pd table = pd.DataFrame.from_dict(data)`,highlighted:`<span class="hljs-keyword">import</span> pandas <span class="hljs-keyword">as</span> pd table = pd.DataFrame.from_dict(data)`}}),{c(){T=o("p"),$=a("Example:"),y=l(),h(w.$$.fragment)},l(f){T=r(f,"P",{});var k=s(T);$=i(k,"Example:"),k.forEach(n),y=d(f),u(w.$$.fragment,f)},m(f,k){m(f,T,k),e(T,$),m(f,y,k),g(w,f,k),x=!0},p:un,i(f){x||(_(w.$$.fragment,f),x=!0)},o(f){b(w.$$.fragment,f),x=!1},d(f){f&&n(T),f&&n(y),v(w,f)}}}function r7(z){let T,$,y,w,x;return w=new j({props:{code:`text2text_generator = pipeline("text2text-generation") text2text_generator("question: What is 42 ? context: 42 is the answer to life, the universe and everything")`,highlighted:`text2text_generator = pipeline(<span class="hljs-string">&quot;text2text-generation&quot;</span>) text2text_generator(<span class="hljs-string">&quot;question: What is 42 ? context: 42 is the answer to life, the universe and everything&quot;</span>)`}}),{c(){T=o("p"),$=a("Usage:"),y=l(),h(w.$$.fragment)},l(f){T=r(f,"P",{});var k=s(T);$=i(k,"Usage:"),k.forEach(n),y=d(f),u(w.$$.fragment,f)},m(f,k){m(f,T,k),e(T,$),m(f,y,k),g(w,f,k),x=!0},p:un,i(f){x||(_(w.$$.fragment,f),x=!0)},o(f){b(w.$$.fragment,f),x=!1},d(f){f&&n(T),f&&n(y),v(w,f)}}}function s7(z){let T,$,y,w,x;return w=new j({props:{code:`en_fr_translator = pipeline("translation_en_to_fr") en_fr_translator("How old are you?")`,highlighted:`en_fr_translator = pipeline(<span class="hljs-string">&quot;translation_en_to_fr&quot;</span>) en_fr_translator(<span class="hljs-string">&quot;How old are you?&quot;</span>)`}}),{c(){T=o("p"),$=a("Usage:"),y=l(),h(w.$$.fragment)},l(f){T=r(f,"P",{});var k=s(T);$=i(k,"Usage:"),k.forEach(n),y=d(f),u(w.$$.fragment,f)},m(f,k){m(f,T,k),e(T,$),m(f,y,k),g(w,f,k),x=!0},p:un,i(f){x||(_(w.$$.fragment,f),x=!0)},o(f){b(w.$$.fragment,f),x=!1},d(f){f&&n(T),f&&n(y),v(w,f)}}}function a7(z){let T,$,y,w,x;return w=new j({props:{code:`# Explicitly ask for tensor allocation on CUDA device :0 pipe = pipeline(..., device=0) with pipe.device_placement(): # Every framework specific tensor allocation will be done on the request device output = pipe(...)`,highlighted:`<span class="hljs-comment"># Explicitly ask for tensor allocation on CUDA device :0</span> pipe = pipeline(..., device=<span class="hljs-number">0</span>) <span class="hljs-keyword">with</span> pipe.device_placement(): <span class="hljs-comment"># Every framework specific tensor allocation will be done on the request device</span> output = pipe(...)`}}),{c(){T=o("p"),$=a("Examples:"),y=l(),h(w.$$.fragment)},l(f){T=r(f,"P",{});var k=s(T);$=i(k,"Examples:"),k.forEach(n),y=d(f),u(w.$$.fragment,f)},m(f,k){m(f,T,k),e(T,$),m(f,y,k),g(w,f,k),x=!0},p:un,i(f){x||(_(w.$$.fragment,f),x=!0)},o(f){b(w.$$.fragment,f),x=!1},d(f){f&&n(T),f&&n(y),v(w,f)}}}function i7(z){let T,$,y,w,x,f,k,Ee,Vb,Qh,gn,Hb,Ci,Wb,Zb,Rh,Di,Bb,Vh,_n,Fd,rr,Yb,zi,Xb,Kb,Jb,sr,Ld,ev,tv,E,Od,Ii,nv,ov,Ud,ji,rv,sv,Nd,Si,av,iv,Gd,Mi,lv,dv,Qd,Fi,cv,pv,Rd,Li,mv,fv,Vd,Oi,hv,uv,Hd,Ui,gv,_v,Wd,Ni,bv,vv,Zd,Gi,wv,Tv,Bd,Qi,kv,Pv,Yd,Ri,yv,xv,Xd,Vi,$v,Ev,Kd,Hi,qv,Av,Jd,Wi,Cv,Dv,ec,Zi,zv,Iv,tc,Bi,jv,Sv,nc,Yi,Mv,Fv,oc,Xi,Lv,Ov,rc,Ki,Uv,Nv,sc,Ji,Gv,Qv,ac,el,Rv,Hh,st,bn,ic,ar,Vv,lc,Hv,Wh,vn,Wv,dc,Zv,Bv,Zh,tl,Yv,Bh,ir,Yh,wn,Xv,lr,Kv,Jv,Xh,dr,Kh,Tn,ew,cc,tw,nw,Jh,cr,eu,kn,ow,pc,rw,sw,tu,pr,nu,nl,aw,ou,mr,ru,X,fr,iw,hr,lw,ol,dw,cw,pw,mc,mw,fw,at,ur,hw,rl,uw,gw,_w,gr,bw,sl,vw,ww,Tw,fc,kw,Pw,Pn,su,it,yn,hc,_r,yw,uc,xw,au,je,$w,gc,Ew,qw,_c,Aw,Cw,iu,br,lu,xn,du,vr,cu,wr,pu,al,Dw,mu,Tr,fu,$n,zw,bc,Iw,jw,hu,kr,uu,il,Sw,gu,ll,Mw,_u,ce,vc,wc,Tc,Fw,Lw,kc,Pc,Ow,Uw,yc,xc,Nw,Gw,Pr,$c,Qw,Rw,lt,Ec,Vw,Hw,qc,Ww,Zw,Ac,Bw,Yw,Cc,Dc,Xw,bu,dt,En,zc,yr,Kw,Ic,Jw,vu,qe,jc,e1,t1,Sc,n1,o1,Mc,r1,s1,wu,Se,a1,Fc,i1,l1,Lc,d1,c1,Tu,xr,ku,dl,p1,Pu,$r,yu,cl,m1,xu,qn,f1,Oc,h1,u1,$u,ct,An,Uc,Er,g1,Nc,_1,Eu,pl,b1,qu,Cn,v1,Gc,w1,T1,Au,ml,k1,Cu,fl,Qc,P1,Du,qr,zu,hl,y1,Iu,pt,Dn,Rc,Ar,x1,Vc,$1,ju,ul,gl,E1,Su,mt,zn,Hc,Cr,q1,Wc,A1,Mu,ft,In,Zc,Dr,C1,Bc,D1,Fu,K,zr,z1,Ir,I1,Yc,j1,S1,M1,ht,F1,_l,L1,O1,Xc,U1,N1,G1,jr,Q1,Sr,R1,V1,H1,jn,Mr,W1,Fr,Z1,bl,B1,Y1,Lu,ut,Sn,Kc,Lr,X1,Jc,K1,Ou,ge,Or,J1,ep,e2,t2,tp,n2,o2,Mn,Ur,r2,Nr,s2,vl,a2,i2,Uu,gt,Fn,np,Gr,l2,op,d2,Nu,M,Qr,c2,Ae,p2,wl,m2,f2,Tl,h2,u2,rp,g2,_2,b2,Ln,v2,On,Rr,w2,Vr,T2,sp,k2,P2,y2,Un,Hr,x2,ap,$2,E2,Me,Wr,q2,ip,A2,C2,_e,D2,lp,z2,I2,dp,j2,S2,cp,M2,F2,pp,L2,O2,U2,Nn,Zr,N2,Ce,G2,mp,Q2,R2,fp,V2,H2,hp,W2,Z2,Gu,G,Br,B2,up,Y2,X2,_t,K2,kl,J2,eT,gp,tT,nT,oT,be,rT,_p,sT,aT,bp,iT,lT,vp,dT,cT,Yr,pT,mT,fT,Gn,hT,Qn,Xr,uT,wp,gT,Qu,bt,Rn,Tp,Kr,_T,kp,bT,Ru,J,Jr,vT,es,wT,Pp,TT,kT,PT,vt,yT,Pl,xT,$T,yp,ET,qT,AT,ts,CT,ns,DT,zT,IT,we,os,jT,rs,ST,xp,MT,FT,LT,$p,OT,UT,De,Ep,qp,NT,GT,Ap,Cp,QT,RT,Dp,zp,VT,HT,Ip,jp,WT,Vu,wt,Vn,Sp,ss,ZT,Mp,BT,Hu,ee,as,YT,Fp,XT,KT,Tt,JT,yl,ek,tk,Lp,nk,ok,rk,is,sk,ls,ak,ik,lk,Hn,ds,dk,Op,ck,Wu,kt,Wn,Up,cs,pk,Np,mk,Zu,Q,ps,fk,Pt,hk,Gp,uk,gk,xl,_k,bk,vk,yt,wk,$l,Tk,kk,Qp,Pk,yk,xk,ms,$k,fs,Ek,qk,Ak,Zn,Ck,Bn,hs,Dk,Rp,zk,Bu,xt,Yn,Vp,us,Ik,Hp,jk,Yu,te,gs,Sk,_s,Mk,Wp,Fk,Lk,Ok,$t,Uk,El,Nk,Gk,Zp,Qk,Rk,Vk,bs,Hk,vs,Wk,Zk,Bk,Xn,ws,Yk,Bp,Xk,Xu,Et,Kn,Yp,Ts,Kk,Xp,Jk,Ku,ne,ks,eP,Ps,tP,Kp,nP,oP,rP,qt,sP,ql,aP,iP,Jp,lP,dP,cP,ys,pP,xs,mP,fP,hP,Jn,$s,uP,em,gP,Ju,At,eo,tm,Es,_P,nm,bP,eg,oe,qs,vP,As,wP,om,TP,kP,PP,rm,yP,xP,Cs,$P,Ds,EP,qP,AP,to,zs,CP,sm,DP,tg,Ct,no,am,Is,zP,im,IP,ng,S,js,jP,Dt,SP,lm,MP,FP,Al,LP,OP,UP,zt,NP,Cl,GP,QP,dm,RP,VP,HP,Ss,WP,Ms,ZP,BP,YP,Fe,Fs,XP,cm,KP,JP,pm,ey,ty,oo,Ls,ny,mm,oy,ry,ro,Os,sy,fm,ay,iy,so,Us,ly,hm,dy,og,ao,cy,Dl,py,my,rg,It,io,um,Ns,fy,gm,hy,sg,re,Gs,uy,Qs,gy,_m,_y,by,vy,jt,wy,zl,Ty,ky,bm,Py,yy,xy,Rs,$y,Vs,Ey,qy,Ay,lo,Hs,Cy,vm,Dy,ag,St,co,wm,Ws,zy,Tm,Iy,ig,F,Zs,jy,Mt,Sy,km,My,Fy,Il,Ly,Oy,Uy,Ft,Ny,jl,Gy,Qy,Pm,Ry,Vy,Hy,Bs,Wy,Ys,Zy,By,Yy,po,Xs,Xy,ym,Ky,Jy,Le,Ks,e0,Lt,t0,xm,n0,o0,$m,r0,s0,a0,Em,i0,l0,mo,Js,d0,qm,c0,lg,Ot,fo,Am,ea,p0,Cm,m0,dg,R,ta,f0,Dm,h0,u0,Ut,g0,Sl,_0,b0,zm,v0,w0,T0,L,k0,Im,P0,y0,jm,x0,$0,Sm,E0,q0,Mm,A0,C0,Fm,D0,z0,Lm,I0,j0,na,S0,M0,F0,ho,L0,uo,oa,O0,Om,U0,cg,Nt,go,Um,ra,N0,Nm,G0,pg,se,sa,Q0,aa,R0,Gm,V0,H0,W0,Gt,Z0,Ml,B0,Y0,Qm,X0,K0,J0,ia,e4,la,t4,n4,o4,U,da,r4,Rm,s4,a4,V,Vm,Hm,i4,l4,Wm,Zm,d4,c4,Bm,Ym,p4,m4,Xm,Km,f4,h4,Jm,ef,u4,g4,tf,nf,_4,b4,of,rf,v4,w4,ca,T4,sf,k4,P4,y4,_o,x4,af,$4,E4,bo,mg,Qt,vo,lf,pa,q4,df,A4,fg,H,ma,C4,Rt,D4,cf,z4,I4,Fl,j4,S4,M4,Vt,F4,Ll,L4,O4,pf,U4,N4,G4,fa,Q4,mf,R4,V4,H4,ha,W4,ua,Z4,B4,Y4,wo,ga,X4,ff,K4,hg,Ht,To,hf,_a,J4,uf,ex,ug,ae,ba,tx,va,nx,gf,ox,rx,sx,Wt,ax,Ol,ix,lx,_f,dx,cx,px,wa,mx,Ta,fx,hx,ux,ko,ka,gx,bf,_x,gg,Zt,Po,vf,Pa,bx,wf,vx,_g,O,ya,wx,Tf,Tx,kx,Bt,Px,Ul,yx,xx,kf,$x,Ex,qx,xa,Ax,$a,Cx,Dx,zx,yo,Ix,xo,Ea,jx,Pf,Sx,Mx,$o,qa,Fx,yf,Lx,bg,Yt,Eo,xf,Aa,Ox,$f,Ux,vg,I,Ca,Nx,Xt,Gx,Ef,Qx,Rx,Nl,Vx,Hx,Wx,Kt,Zx,Gl,Bx,Yx,qf,Xx,Kx,Jx,Da,e$,za,t$,n$,o$,qo,Ia,r$,Af,s$,a$,Oe,ja,i$,Cf,l$,d$,Df,c$,p$,Ao,Sa,m$,zf,f$,h$,Co,Ma,u$,If,g$,_$,Do,Fa,b$,jf,v$,wg,Jt,zo,Sf,La,w$,Mf,T$,Tg,W,Oa,k$,Ff,P$,y$,en,x$,Ql,$$,E$,Lf,q$,A$,C$,Ua,D$,Na,z$,I$,j$,Io,S$,jo,Ga,M$,Of,F$,kg,tn,So,Uf,Qa,L$,Nf,O$,Pg,ie,Ra,U$,Va,N$,Gf,G$,Q$,R$,nn,V$,Rl,H$,W$,Qf,Z$,B$,Y$,Ha,X$,Wa,K$,J$,e9,Ue,Za,t9,Rf,n9,o9,ze,Vf,Hf,r9,s9,Wf,Zf,a9,i9,Bf,Yf,l9,d9,Xf,Kf,c9,yg,on,Mo,Jf,Ba,p9,eh,m9,xg,Z,Ya,f9,Xa,h9,th,u9,g9,_9,Ie,b9,nh,v9,w9,oh,T9,k9,rh,P9,y9,x9,rn,$9,Vl,E9,q9,sh,A9,C9,D9,Ka,z9,Ja,I9,j9,S9,Fo,ei,M9,ti,F9,Hl,L9,O9,$g,sn,Lo,ah,ni,U9,ih,N9,Eg,le,oi,G9,an,Q9,lh,R9,V9,dh,H9,W9,Z9,ln,B9,Wl,Y9,X9,ch,K9,J9,eE,ri,tE,si,nE,oE,rE,Oo,ai,sE,ph,aE,qg,dn,Uo,mh,ii,iE,fh,lE,Ag,de,li,dE,cn,cE,hh,pE,mE,uh,fE,hE,uE,pn,gE,Zl,_E,bE,gh,vE,wE,TE,di,kE,ci,PE,yE,xE,No,pi,$E,_h,EE,Cg,mn,Go,bh,mi,qE,Bl,AE,vh,CE,Dg,A,fi,DE,wh,zE,IE,Th,jE,SE,kh,ME,FE,Ph,LE,OE,ve,UE,Yl,NE,GE,yh,QE,RE,xh,VE,HE,$h,WE,ZE,BE,Qo,hi,YE,Eh,XE,KE,Ne,ui,JE,qh,e5,t5,Ro,n5,Vo,gi,o5,Ah,r5,s5,Ho,_i,a5,bi,i5,Ch,l5,d5,c5,Wo,vi,p5,wi,m5,Dh,f5,h5,u5,Zo,Ti,g5,fn,_5,zh,b5,v5,Ih,w5,T5,k5,Bo,ki,P5,jh,y5,x5,Yo,Pi,$5,yi,E5,Sh,q5,A5,zg;return f=new C({}),ar=new C({}),ir=new j({props:{code:`pipe = pipeline("text-classification") pipe("This restaurant is awesome")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>pipe = pipeline(<span class="hljs-string">&quot;text-classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pipe(<span class="hljs-string">&quot;This restaurant is awesome&quot;</span>) [{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;POSITIVE&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9998743534088135</span>}]`}}),dr=new j({props:{code:`pipe = pipeline(model="roberta-large-mnli") pipe("This restaurant is awesome")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>pipe = pipeline(model=<span class="hljs-string">&quot;roberta-large-mnli&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pipe(<span class="hljs-string">&quot;This restaurant is awesome&quot;</span>) [{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;POSITIVE&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9998743534088135</span>}]`}}),cr=new j({props:{code:`pipe = pipeline("text-classification") pipe(["This restaurant is awesome", "This restaurant is aweful"])`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>pipe = pipeline(<span class="hljs-string">&quot;text-classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pipe([<span class="hljs-string">&quot;This restaurant is awesome&quot;</span>, <span class="hljs-string">&quot;This restaurant is aweful&quot;</span>]) [{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;POSITIVE&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9998743534088135</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;NEGATIVE&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9996669292449951</span>}]`}}),pr=new j({props:{code:`import datasets from transformers import pipeline from transformers.pipelines.pt_utils import KeyDataset from tqdm.auto import tqdm pipe = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h", device=0) dataset = datasets.load_dataset("superb", name="asr", split="test") # KeyDataset (only *pt*) will simply return the item in the dict returned by the dataset item # as we're not interested in the *target* part of the dataset. for out in tqdm(pipe(KeyDataset(dataset, "file"))): print(out) # {"text": "NUMBER TEN FRESH NELLY IS WAITING ON YOU GOOD NIGHT HUSBAND"} # {"text": ....} # ....`,highlighted:`<span class="hljs-keyword">import</span> datasets <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-keyword">from</span> transformers.pipelines.pt_utils <span class="hljs-keyword">import</span> KeyDataset <span class="hljs-keyword">from</span> tqdm.auto <span class="hljs-keyword">import</span> tqdm pipe = pipeline(<span class="hljs-string">&quot;automatic-speech-recognition&quot;</span>, model=<span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>, device=<span class="hljs-number">0</span>) dataset = datasets.load_dataset(<span class="hljs-string">&quot;superb&quot;</span>, name=<span class="hljs-string">&quot;asr&quot;</span>, split=<span class="hljs-string">&quot;test&quot;</span>) <span class="hljs-comment"># KeyDataset (only *pt*) will simply return the item in the dict returned by the dataset item</span> <span class="hljs-comment"># as we&#x27;re not interested in the *target* part of the dataset.</span> <span class="hljs-keyword">for</span> out <span class="hljs-keyword">in</span> tqdm(pipe(KeyDataset(dataset, <span class="hljs-string">&quot;file&quot;</span>))): <span class="hljs-built_in">print</span>(out) <span class="hljs-comment"># {&quot;text&quot;: &quot;NUMBER TEN FRESH NELLY IS WAITING ON YOU GOOD NIGHT HUSBAND&quot;}</span> <span class="hljs-comment"># {&quot;text&quot;: ....}</span> <span class="hljs-comment"># ....</span>`}}),mr=new j({props:{code:`from transformers import pipeline pipe = pipeline("text-classification") def data(): while True: # This could come from a dataset, a database, a queue or HTTP request # in a server # Caveat: because this is iterative, you cannot use \`num_workers > 1\` variable # to use multiple threads to preprocess data. You can still have 1 thread that # does the preprocessing while the main runs the big inference yield "This is a test" for out in pipe(data()): print(out) # {"text": "NUMBER TEN FRESH NELLY IS WAITING ON YOU GOOD NIGHT HUSBAND"} # {"text": ....} # ....`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline pipe = pipeline(<span class="hljs-string">&quot;text-classification&quot;</span>) <span class="hljs-keyword">def</span> <span class="hljs-title function_">data</span>(): <span class="hljs-keyword">while</span> <span class="hljs-literal">True</span>: <span class="hljs-comment"># This could come from a dataset, a database, a queue or HTTP request</span> <span class="hljs-comment"># in a server</span> <span class="hljs-comment"># Caveat: because this is iterative, you cannot use \`num_workers &gt; 1\` variable</span> <span class="hljs-comment"># to use multiple threads to preprocess data. You can still have 1 thread that</span> <span class="hljs-comment"># does the preprocessing while the main runs the big inference</span> <span class="hljs-keyword">yield</span> <span class="hljs-string">&quot;This is a test&quot;</span> <span class="hljs-keyword">for</span> out <span class="hljs-keyword">in</span> pipe(data()): <span class="hljs-built_in">print</span>(out) <span class="hljs-comment"># {&quot;text&quot;: &quot;NUMBER TEN FRESH NELLY IS WAITING ON YOU GOOD NIGHT HUSBAND&quot;}</span> <span class="hljs-comment"># {&quot;text&quot;: ....}</span> <span class="hljs-comment"># ....</span>`}}),fr=new P({props:{name:"transformers.pipeline",anchor:"transformers.pipeline",parameters:[{name:"task",val:": str = None"},{name:"model",val:": typing.Optional = None"},{name:"config",val:": typing.Union[str, transformers.configuration_utils.PretrainedConfig, NoneType] = None"},{name:"tokenizer",val:": typing.Union[str, transformers.tokenization_utils.PreTrainedTokenizer, transformers.tokenization_utils_fast.PreTrainedTokenizerFast, NoneType] = None"},{name:"feature_extractor",val:": typing.Union[str, ForwardRef('SequenceFeatureExtractor'), NoneType] = None"},{name:"framework",val:": typing.Optional[str] = None"},{name:"revision",val:": typing.Optional[str] = None"},{name:"use_fast",val:": bool = True"},{name:"use_auth_token",val:": typing.Union[str, bool, NoneType] = None"},{name:"device",val:": typing.Union[int, str, ForwardRef('torch.device'), NoneType] = None"},{name:"device_map",val:" = None"},{name:"torch_dtype",val:" = None"},{name:"trust_remote_code",val:": typing.Optional[bool] = None"},{name:"model_kwargs",val:": typing.Dict[str, typing.Any] = None"},{name:"pipeline_class",val:": typing.Optional[typing.Any] = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.pipeline.task",description:`<strong>task</strong> (<code>str</code>) &#x2014; The task defining which pipeline will be returned. Currently accepted tasks are:</p> <ul> <li><code>&quot;audio-classification&quot;</code>: will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.AudioClassificationPipeline">AudioClassificationPipeline</a>.</li> <li><code>&quot;automatic-speech-recognition&quot;</code>: will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline">AutomaticSpeechRecognitionPipeline</a>.</li> <li><code>&quot;conversational&quot;</code>: will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.ConversationalPipeline">ConversationalPipeline</a>.</li> <li><code>&quot;feature-extraction&quot;</code>: will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.FeatureExtractionPipeline">FeatureExtractionPipeline</a>.</li> <li><code>&quot;fill-mask&quot;</code>: will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.FillMaskPipeline">FillMaskPipeline</a>:.</li> <li><code>&quot;image-classification&quot;</code>: will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.ImageClassificationPipeline">ImageClassificationPipeline</a>.</li> <li><code>&quot;question-answering&quot;</code>: will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.QuestionAnsweringPipeline">QuestionAnsweringPipeline</a>.</li> <li><code>&quot;table-question-answering&quot;</code>: will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.TableQuestionAnsweringPipeline">TableQuestionAnsweringPipeline</a>.</li> <li><code>&quot;text2text-generation&quot;</code>: will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.Text2TextGenerationPipeline">Text2TextGenerationPipeline</a>.</li> <li><code>&quot;text-classification&quot;</code> (alias <code>&quot;sentiment-analysis&quot;</code> available): will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.TextClassificationPipeline">TextClassificationPipeline</a>.</li> <li><code>&quot;text-generation&quot;</code>: will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.TextGenerationPipeline">TextGenerationPipeline</a>:.</li> <li><code>&quot;token-classification&quot;</code> (alias <code>&quot;ner&quot;</code> available): will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.TokenClassificationPipeline">TokenClassificationPipeline</a>.</li> <li><code>&quot;translation&quot;</code>: will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.TranslationPipeline">TranslationPipeline</a>.</li> <li><code>&quot;translation_xx_to_yy&quot;</code>: will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.TranslationPipeline">TranslationPipeline</a>.</li> <li><code>&quot;summarization&quot;</code>: will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.SummarizationPipeline">SummarizationPipeline</a>.</li> <li><code>&quot;zero-shot-classification&quot;</code>: will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.ZeroShotClassificationPipeline">ZeroShotClassificationPipeline</a>.</li> </ul>`,name:"task"},{anchor:"transformers.pipeline.model",description:`<strong>model</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>, <em>optional</em>) &#x2014; The model that will be used by the pipeline to make predictions. This can be a model identifier or an actual instance of a pretrained model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> (for PyTorch) or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> (for TensorFlow).</p> <p>If not provided, the default for the <code>task</code> will be loaded.`,name:"model"},{anchor:"transformers.pipeline.config",description:`<strong>config</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>, <em>optional</em>) &#x2014; The configuration that will be used by the pipeline to instantiate the model. This can be a model identifier or an actual pretrained model configuration inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>.</p> <p>If not provided, the default configuration file for the requested model will be used. That means that if <code>model</code> is given, its default configuration will be used. However, if <code>model</code> is not supplied, this <code>task</code>&#x2019;s default model&#x2019;s config is used instead.`,name:"config"},{anchor:"transformers.pipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>, <em>optional</em>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This can be a model identifier or an actual pretrained tokenizer inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.</p> <p>If not provided, the default tokenizer for the given <code>model</code> will be loaded (if it is a string). If <code>model</code> is not specified or not a string, then the default tokenizer for <code>config</code> is loaded (if it is a string). However, if <code>config</code> is also not given or not a string, then the default tokenizer for the given <code>task</code> will be loaded.`,name:"tokenizer"},{anchor:"transformers.pipeline.feature_extractor",description:`<strong>feature_extractor</strong> (<code>str</code> or <code>PreTrainedFeatureExtractor</code>, <em>optional</em>) &#x2014; The feature extractor that will be used by the pipeline to encode data for the model. This can be a model identifier or an actual pretrained feature extractor inheriting from <code>PreTrainedFeatureExtractor</code>.</p> <p>Feature extractors are used for non-NLP models, such as Speech or Vision models as well as multi-modal models. Multi-modal models will also require a tokenizer to be passed.</p> <p>If not provided, the default feature extractor for the given <code>model</code> will be loaded (if it is a string). If <code>model</code> is not specified or not a string, then the default feature extractor for <code>config</code> is loaded (if it is a string). However, if <code>config</code> is also not given or not a string, then the default feature extractor for the given <code>task</code> will be loaded.`,name:"feature_extractor"},{anchor:"transformers.pipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.pipeline.revision",description:`<strong>revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;main&quot;</code>) &#x2014; When passing a task name or a string model identifier: The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <code>revision</code> can be any identifier allowed by git.`,name:"revision"},{anchor:"transformers.pipeline.use_fast",description:`<strong>use_fast</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to use a Fast tokenizer if possible (a <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>).`,name:"use_fast"},{anchor:"transformers.pipeline.use_auth_token",description:`<strong>use_auth_token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>huggingface-cli login</code> (stored in <code>~/.huggingface</code>).`,name:"use_auth_token"},{anchor:"transformers.pipeline.device",description:`<strong>device</strong> (<code>int</code> or <code>str</code> or <code>torch.device</code>) &#x2014; Defines the device (<em>e.g.</em>, <code>&quot;cpu&quot;</code>, <code>&quot;cuda:1&quot;</code>, <code>&quot;mps&quot;</code>, or a GPU ordinal rank like <code>1</code>) on which this pipeline will be allocated.`,name:"device"},{anchor:"transformers.pipeline.device_map",description:`<strong>device_map</strong> (<code>str</code> or <code>Dict[str, Union[int, str, torch.device]</code>, <em>optional</em>) &#x2014; Sent directly as <code>model_kwargs</code> (just a simpler shortcut). When <code>accelerate</code> library is present, set <code>device_map=&quot;auto&quot;</code> to compute the most optimized <code>device_map</code> automatically. <a href="https://huggingface.co/docs/accelerate/main/en/big_modeling#accelerate.cpu_offload" rel="nofollow">More information</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>Do not use <code>device_map</code> AND <code>device</code> at the same time as they will conflict</p> </div>`,name:"device_map"},{anchor:"transformers.pipeline.torch_dtype",description:`<strong>torch_dtype</strong> (<code>str</code> or <code>torch.dtype</code>, <em>optional</em>) &#x2014; Sent directly as <code>model_kwargs</code> (just a simpler shortcut) to use the available precision for this model (<code>torch.float16</code>, <code>torch.bfloat16</code>, &#x2026; or <code>&quot;auto&quot;</code>).`,name:"torch_dtype"},{anchor:"transformers.pipeline.trust_remote_code",description:`<strong>trust_remote_code</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to allow for custom code defined on the Hub in their own modeling, configuration, tokenization or even pipeline files. This option should only be set to <code>True</code> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine. model_kwargs &#x2014; Additional dictionary of keyword arguments passed along to the model&#x2019;s <code>from_pretrained(..., **model_kwargs)</code> function. kwargs &#x2014; Additional keyword arguments passed along to the specific pipeline init (see the documentation for the corresponding pipeline class for possible values).`,name:"trust_remote_code"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/__init__.py#L450",returnDescription:` <p>A suitable pipeline for the task.</p> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.Pipeline" >Pipeline</a></p> `}}),Pn=new hn({props:{anchor:"transformers.pipeline.example",$$slots:{default:[YD]},$$scope:{ctx:z}}}),_r=new C({}),br=new j({props:{code:`from transformers import pipeline from transformers.pipelines.pt_utils import KeyDataset import datasets dataset = datasets.load_dataset("imdb", name="plain_text", split="unsupervised") pipe = pipeline("text-classification", device=0) for out in pipe(KeyDataset(dataset, "text"), batch_size=8, truncation="only_first"): print(out) # [{'label': 'POSITIVE', 'score': 0.9998743534088135}] # Exactly the same output as before, but the content are passed # as batches to the model`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-keyword">from</span> transformers.pipelines.pt_utils <span class="hljs-keyword">import</span> KeyDataset <span class="hljs-keyword">import</span> datasets dataset = datasets.load_dataset(<span class="hljs-string">&quot;imdb&quot;</span>, name=<span class="hljs-string">&quot;plain_text&quot;</span>, split=<span class="hljs-string">&quot;unsupervised&quot;</span>) pipe = pipeline(<span class="hljs-string">&quot;text-classification&quot;</span>, device=<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> out <span class="hljs-keyword">in</span> pipe(KeyDataset(dataset, <span class="hljs-string">&quot;text&quot;</span>), batch_size=<span class="hljs-number">8</span>, truncation=<span class="hljs-string">&quot;only_first&quot;</span>): <span class="hljs-built_in">print</span>(out) <span class="hljs-comment"># [{&#x27;label&#x27;: &#x27;POSITIVE&#x27;, &#x27;score&#x27;: 0.9998743534088135}]</span> <span class="hljs-comment"># Exactly the same output as before, but the content are passed</span> <span class="hljs-comment"># as batches to the model</span>`}}),xn=new RD({props:{warning:!0,$$slots:{default:[XD]},$$scope:{ctx:z}}}),vr=new j({props:{code:`from transformers import pipeline from torch.utils.data import Dataset from tqdm.auto import tqdm pipe = pipeline("text-classification", device=0) class MyDataset(Dataset): def __len__(self): return 5000 def __getitem__(self, i): return "This is a test" dataset = MyDataset() for batch_size in [1, 8, 64, 256]: print("-" * 30) print(f"Streaming batch_size={batch_size}") for out in tqdm(pipe(dataset, batch_size=batch_size), total=len(dataset)): pass`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-keyword">from</span> torch.utils.data <span class="hljs-keyword">import</span> Dataset <span class="hljs-keyword">from</span> tqdm.auto <span class="hljs-keyword">import</span> tqdm pipe = pipeline(<span class="hljs-string">&quot;text-classification&quot;</span>, device=<span class="hljs-number">0</span>) <span class="hljs-keyword">class</span> <span class="hljs-title class_">MyDataset</span>(<span class="hljs-title class_ inherited__">Dataset</span>): <span class="hljs-keyword">def</span> <span class="hljs-title function_">__len__</span>(<span class="hljs-params">self</span>): <span class="hljs-keyword">return</span> <span class="hljs-number">5000</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">__getitem__</span>(<span class="hljs-params">self, i</span>): <span class="hljs-keyword">return</span> <span class="hljs-string">&quot;This is a test&quot;</span> dataset = MyDataset() <span class="hljs-keyword">for</span> batch_size <span class="hljs-keyword">in</span> [<span class="hljs-number">1</span>, <span class="hljs-number">8</span>, <span class="hljs-number">64</span>, <span class="hljs-number">256</span>]: <span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;-&quot;</span> * <span class="hljs-number">30</span>) <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;Streaming batch_size=<span class="hljs-subst">{batch_size}</span>&quot;</span>) <span class="hljs-keyword">for</span> out <span class="hljs-keyword">in</span> tqdm(pipe(dataset, batch_size=batch_size), total=<span class="hljs-built_in">len</span>(dataset)): <span class="hljs-keyword">pass</span>`}}),wr=new j({props:{code:`# On GTX 970 ------------------------------ Streaming no batching 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 5000/5000 [00:26<00:00, 187.52it/s] ------------------------------ Streaming batch_size=8 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 5000/5000 [00:04<00:00, 1205.95it/s] ------------------------------ Streaming batch_size=64 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 5000/5000 [00:02<00:00, 2478.24it/s] ------------------------------ Streaming batch_size=256 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 5000/5000 [00:01<00:00, 2554.43it/s] (diminishing returns, saturated the GPU)`,highlighted:`<span class="hljs-section"># On GTX 970 ------------------------------</span> Streaming no batching 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 5000/5000 [00:26&lt;00:00, 187.52it/s] <span class="hljs-code">------------------------------ Streaming batch_size=8 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 5000/5000 [00:04&lt;00:00, 1205.95it/s] ------------------------------</span> Streaming batch<span class="hljs-emphasis">_size=64 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 5000/5000 [00:02&lt;00:00, 2478.24it/s] ------------------------------ Streaming batch_</span>size=256 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 5000/5000 [00:01&lt;00:00, 2554.43it/s] (diminishing returns, saturated the GPU)`}}),Tr=new j({props:{code:`class MyDataset(Dataset): def __len__(self): return 5000 def __getitem__(self, i): if i % 64 == 0: n = 100 else: n = 1 return "This is a test" * n`,highlighted:`<span class="hljs-keyword">class</span> <span class="hljs-title class_">MyDataset</span>(<span class="hljs-title class_ inherited__">Dataset</span>): <span class="hljs-keyword">def</span> <span class="hljs-title function_">__len__</span>(<span class="hljs-params">self</span>): <span class="hljs-keyword">return</span> <span class="hljs-number">5000</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">__getitem__</span>(<span class="hljs-params">self, i</span>): <span class="hljs-keyword">if</span> i % <span class="hljs-number">64</span> == <span class="hljs-number">0</span>: n = <span class="hljs-number">100</span> <span class="hljs-keyword">else</span>: n = <span class="hljs-number">1</span> <span class="hljs-keyword">return</span> <span class="hljs-string">&quot;This is a test&quot;</span> * n`}}),kr=new j({props:{code:`------------------------------ Streaming no batching 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1000/1000 [00:05<00:00, 183.69it/s] ------------------------------ Streaming batch_size=8 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1000/1000 [00:03<00:00, 265.74it/s] ------------------------------ Streaming batch_size=64 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1000/1000 [00:26<00:00, 37.80it/s] ------------------------------ Streaming batch_size=256 0%| | 0/1000 [00:00<?, ?it/s] Traceback (most recent call last): File "/home/nicolas/src/transformers/test.py", line 42, in <module> for out in tqdm(pipe(dataset, batch_size=256), total=len(dataset)): .... q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head) RuntimeError: CUDA out of memory. Tried to allocate 376.00 MiB (GPU 0; 3.95 GiB total capacity; 1.72 GiB already allocated; 354.88 MiB free; 2.46 GiB reserved in total by PyTorch)`,highlighted:`<span class="hljs-comment">------------------------------</span> Streaming no batching <span class="hljs-number">100</span>%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| <span class="hljs-number">1000</span>/<span class="hljs-number">1000</span> [<span class="hljs-number">00</span>:<span class="hljs-number">05</span>&lt;<span class="hljs-number">00</span>:<span class="hljs-number">00</span>, <span class="hljs-number">183.69</span><span class="hljs-keyword">it</span>/s] <span class="hljs-comment">------------------------------</span> Streaming batch_size=<span class="hljs-number">8</span> <span class="hljs-number">100</span>%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| <span class="hljs-number">1000</span>/<span class="hljs-number">1000</span> [<span class="hljs-number">00</span>:<span class="hljs-number">03</span>&lt;<span class="hljs-number">00</span>:<span class="hljs-number">00</span>, <span class="hljs-number">265.74</span><span class="hljs-keyword">it</span>/s] <span class="hljs-comment">------------------------------</span> Streaming batch_size=<span class="hljs-number">64</span> <span class="hljs-number">100</span>%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| <span class="hljs-number">1000</span>/<span class="hljs-number">1000</span> [<span class="hljs-number">00</span>:<span class="hljs-number">26</span>&lt;<span class="hljs-number">00</span>:<span class="hljs-number">00</span>, <span class="hljs-number">37.80</span><span class="hljs-keyword">it</span>/s] <span class="hljs-comment">------------------------------</span> Streaming batch_size=<span class="hljs-number">256</span> <span class="hljs-number">0</span>%| | <span class="hljs-number">0</span>/<span class="hljs-number">1000</span> [<span class="hljs-number">00</span>:<span class="hljs-number">00</span><span class="hljs-meta">&lt;?</span>, ?<span class="hljs-keyword">it</span>/s] Traceback (most recent call <span class="hljs-keyword">last</span>): File <span class="hljs-string">&quot;/home/nicolas/src/transformers/test.py&quot;</span>, <span class="hljs-built_in">line</span> <span class="hljs-number">42</span>, <span class="hljs-keyword">in</span> &lt;module&gt; <span class="hljs-keyword">for</span> out <span class="hljs-keyword">in</span> tqdm(pipe(dataset, batch_size=<span class="hljs-number">256</span>), total=<span class="hljs-built_in">len</span>(dataset)): .... q = q / math.<span class="hljs-built_in">sqrt</span>(dim_per_head) <span class="hljs-comment"># (bs, n_heads, q_length, dim_per_head)</span> RuntimeError: CUDA out <span class="hljs-keyword">of</span> memory. Tried <span class="hljs-built_in">to</span> allocate <span class="hljs-number">376.00</span> MiB (GPU <span class="hljs-number">0</span>; <span class="hljs-number">3.95</span> GiB total capacity; <span class="hljs-number">1.72</span> GiB already allocated; <span class="hljs-number">354.88</span> MiB free; <span class="hljs-number">2.46</span> GiB reserved <span class="hljs-keyword">in</span> total <span class="hljs-keyword">by</span> PyTorch)`}}),yr=new C({}),xr=new j({props:{code:`preprocessed = pipe.preprocess(inputs) model_outputs = pipe.forward(preprocessed) outputs = pipe.postprocess(model_outputs)`,highlighted:`preprocessed = pipe.preprocess(inputs) model_outputs = pipe.forward(preprocessed) outputs = pipe.postprocess(model_outputs)`}}),$r=new j({props:{code:`all_model_outputs = [] for preprocessed in pipe.preprocess(inputs): model_outputs = pipe.forward(preprocessed) all_model_outputs.append(model_outputs) outputs = pipe.postprocess(all_model_outputs)`,highlighted:`all_model_outputs = [] <span class="hljs-keyword">for</span> preprocessed <span class="hljs-keyword">in</span> pipe.preprocess(inputs): model_outputs = pipe.forward(preprocessed) all_model_outputs.append(model_outputs) outputs = pipe.postprocess(all_model_outputs)`}}),Er=new C({}),qr=new j({props:{code:`class MyPipeline(TextClassificationPipeline): def postprocess(): # Your code goes here scores = scores * 100 # And here my_pipeline = MyPipeline(model=model, tokenizer=tokenizer, ...) # or if you use *pipeline* function, then: my_pipeline = pipeline(model="xxxx", pipeline_class=MyPipeline)`,highlighted:`<span class="hljs-keyword">class</span> <span class="hljs-title class_">MyPipeline</span>(<span class="hljs-title class_ inherited__">TextClassificationPipeline</span>): <span class="hljs-keyword">def</span> <span class="hljs-title function_">postprocess</span>(): <span class="hljs-comment"># Your code goes here</span> scores = scores * <span class="hljs-number">100</span> <span class="hljs-comment"># And here</span> my_pipeline = MyPipeline(model=model, tokenizer=tokenizer, ...) <span class="hljs-comment"># or if you use *pipeline* function, then:</span> my_pipeline = pipeline(model=<span class="hljs-string">&quot;xxxx&quot;</span>, pipeline_class=MyPipeline)`}}),Ar=new C({}),Cr=new C({}),Dr=new C({}),zr=new P({props:{name:"class transformers.AudioClassificationPipeline",anchor:"transformers.AudioClassificationPipeline",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.AudioClassificationPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.AudioClassificationPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.AudioClassificationPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.AudioClassificationPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.AudioClassificationPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.AudioClassificationPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.AudioClassificationPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.AudioClassificationPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.AudioClassificationPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.`,name:"device"},{anchor:"transformers.AudioClassificationPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/audio_classification.py#L66"}}),Mr=new P({props:{name:"__call__",anchor:"transformers.AudioClassificationPipeline.__call__",parameters:[{name:"inputs",val:": typing.Union[numpy.ndarray, bytes, str]"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.AudioClassificationPipeline.__call__.inputs",description:`<strong>inputs</strong> (<code>np.ndarray</code> or <code>bytes</code> or <code>str</code>) &#x2014; The inputs is either a raw waveform (<code>np.ndarray</code> of shape (n, ) of type <code>np.float32</code> or <code>np.float64</code>) at the correct sampling rate (no further check will be done) or a <code>str</code> that is the filename of the audio file, the file will be read at the correct sampling rate to get the waveform using <em>ffmpeg</em>. This requires <em>ffmpeg</em> to be installed on the system. If <em>inputs</em> is <code>bytes</code> it is supposed to be the content of an audio file and is interpreted by <em>ffmpeg</em> in the same way.`,name:"inputs"},{anchor:"transformers.AudioClassificationPipeline.__call__.top_k",description:`<strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to None) &#x2014; The number of top labels that will be returned by the pipeline. If the provided number is <code>None</code> or higher than the number of labels available in the model configuration, it will default to the number of labels.`,name:"top_k"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/audio_classification.py#L89",returnDescription:` <ul> <li><strong>label</strong> (<code>str</code>) \u2014 The label predicted.</li> <li><strong>score</strong> (<code>float</code>) \u2014 The corresponding probability.</li> </ul> `,returnType:` <p>A list of <code>dict</code> with the following keys</p> `}}),Lr=new C({}),Or=new P({props:{name:"class transformers.AutomaticSpeechRecognitionPipeline",anchor:"transformers.AutomaticSpeechRecognitionPipeline",parameters:[{name:"feature_extractor",val:": typing.Union[ForwardRef('SequenceFeatureExtractor'), str]"},{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.AutomaticSpeechRecognitionPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.AutomaticSpeechRecognitionPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.AutomaticSpeechRecognitionPipeline.feature_extractor",description:`<strong>feature_extractor</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.SequenceFeatureExtractor">SequenceFeatureExtractor</a>) &#x2014; The feature extractor that will be used by the pipeline to encode waveform for the model.`,name:"feature_extractor"},{anchor:"transformers.AutomaticSpeechRecognitionPipeline.chunk_length_s",description:`<strong>chunk_length_s</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The input length for in each chunk. If <code>chunk_length_s = 0</code> then chunking is disabled (default). Only available for CTC models, e.g. <a href="/docs/transformers/pr_19429/en/model_doc/wav2vec2#transformers.Wav2Vec2ForCTC">Wav2Vec2ForCTC</a>.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>For more information on how to effectively use <code>chunk_length_s</code>, please have a look at the <a href="https://huggingface.co/blog/asr-chunking" rel="nofollow">ASR chunking blog post</a>.</p> </div>`,name:"chunk_length_s"},{anchor:"transformers.AutomaticSpeechRecognitionPipeline.stride_length_s",description:`<strong>stride_length_s</strong> (<code>float</code>, <em>optional</em>, defaults to <code>chunk_length_s / 6</code>) &#x2014; The length of stride on the left and right of each chunk. Used only with <code>chunk_length_s &gt; 0</code>. This enables the model to <em>see</em> more context and infer letters better than without this context but the pipeline discards the stride bits at the end to make the final reconstitution as perfect as possible.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>For more information on how to effectively use <code>stride_length_s</code>, please have a look at the <a href="https://huggingface.co/blog/asr-chunking" rel="nofollow">ASR chunking blog post</a>.</p> </div>`,name:"stride_length_s"},{anchor:"transformers.AutomaticSpeechRecognitionPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed. If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.AutomaticSpeechRecognitionPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.`,name:"device"},{anchor:"transformers.AutomaticSpeechRecognitionPipeline.decoder",description:`<strong>decoder</strong> (<code>pyctcdecode.BeamSearchDecoderCTC</code>, <em>optional</em>) &#x2014; <a href="https://github.com/kensho-technologies/pyctcdecode/blob/2fd33dc37c4111417e08d89ccd23d28e9b308d19/pyctcdecode/decoder.py#L180" rel="nofollow">PyCTCDecode&#x2019;s BeamSearchDecoderCTC</a> can be passed for language model boosted decoding. See <a href="/docs/transformers/pr_19429/en/model_doc/wav2vec2#transformers.Wav2Vec2ProcessorWithLM">Wav2Vec2ProcessorWithLM</a> for more information.`,name:"decoder"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/automatic_speech_recognition.py#L68"}}),Ur=new P({props:{name:"__call__",anchor:"transformers.AutomaticSpeechRecognitionPipeline.__call__",parameters:[{name:"inputs",val:": typing.Union[numpy.ndarray, bytes, str]"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.AutomaticSpeechRecognitionPipeline.__call__.inputs",description:`<strong>inputs</strong> (<code>np.ndarray</code> or <code>bytes</code> or <code>str</code> or <code>dict</code>) &#x2014; The inputs is either :<ul> <li><code>str</code> that is the filename of the audio file, the file will be read at the correct sampling rate to get the waveform using <em>ffmpeg</em>. This requires <em>ffmpeg</em> to be installed on the system.</li> <li><code>bytes</code> it is supposed to be the content of an audio file and is interpreted by <em>ffmpeg</em> in the same way.</li> <li>(<code>np.ndarray</code> of shape (n, ) of type <code>np.float32</code> or <code>np.float64</code>) Raw audio at the correct sampling rate (no further check will be done)</li> <li><code>dict</code> form can be used to pass raw audio sampled at arbitrary <code>sampling_rate</code> and let this pipeline do the resampling. The dict must be in the format <code>{&quot;sampling_rate&quot;: int, &quot;raw&quot;: np.array}</code> with optionally a <code>&quot;stride&quot;: (left: int, right: int)</code> than can ask the pipeline to treat the first <code>left</code> samples and last <code>right</code> samples to be ignored in decoding (but used at inference to provide more context to the model). Only use <code>stride</code> with CTC models.</li> </ul>`,name:"inputs"},{anchor:"transformers.AutomaticSpeechRecognitionPipeline.__call__.return_timestamps",description:`<strong>return_timestamps</strong> (<em>optional</em>, <code>str</code>) &#x2014; Only available for pure CTC models. If set to <code>&quot;char&quot;</code>, the pipeline will return <code>timestamps</code> along the text for every character in the text. For instance if you get <code>[{&quot;text&quot;: &quot;h&quot;, &quot;timestamps&quot;: (0.5,0.6), {&quot;text&quot;: &quot;i&quot;, &quot;timestamps&quot;: (0.7, .9)}]</code>, then it means the model predicts that the letter &#x201C;h&#x201D; was pronounced after <code>0.5</code> and before <code>0.6</code> seconds. If set to <code>&quot;word&quot;</code>, the pipeline will return <code>timestamps</code> along the text for every word in the text. For instance if you get <code>[{&quot;text&quot;: &quot;hi &quot;, &quot;timestamps&quot;: (0.5,0.9), {&quot;text&quot;: &quot;there&quot;, &quot;timestamps&quot;: (1.0, .1.5)}]</code>, then it means the model predicts that the word &#x201C;hi&#x201D; was pronounced after <code>0.5</code> and before <code>0.9</code> seconds.`,name:"return_timestamps"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/automatic_speech_recognition.py#L142",returnDescription:` <p>A dictionary with the following keys:</p> <ul> <li><strong>text</strong> (<code>str</code> ) \u2014 The recognized text.</li> <li><strong>chunks</strong> (<em>optional(, <code>List[Dict]</code>) When using <code>return_timestamps</code>, the <code>chunks</code> will become a list containing all the various text chunks identified by the model, </em>e.g.* <code>[&#123;"text": "hi ", "timestamps": (0.5,0.9), &#123;"text": "there", "timestamps": (1.0, 1.5)&#125;]</code>. The original full text can roughly be recovered by doing <code>"".join(chunk["text"] for chunk in output["chunks"])</code>.</li> </ul> `,returnType:` <p><code>Dict</code></p> `}}),Gr=new C({}),Qr=new P({props:{name:"class transformers.Conversation",anchor:"transformers.Conversation",parameters:[{name:"text",val:": str = None"},{name:"conversation_id",val:": UUID = None"},{name:"past_user_inputs",val:" = None"},{name:"generated_responses",val:" = None"}],parametersDescription:[{anchor:"transformers.Conversation.text",description:`<strong>text</strong> (<code>str</code>, <em>optional</em>) &#x2014; The initial user input to start the conversation. If not provided, a user input needs to be provided manually using the <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.Conversation.add_user_input">add_user_input()</a> method before the conversation can begin.`,name:"text"},{anchor:"transformers.Conversation.conversation_id",description:`<strong>conversation_id</strong> (<code>uuid.UUID</code>, <em>optional</em>) &#x2014; Unique identifier for the conversation. If not provided, a random UUID4 id will be assigned to the conversation.`,name:"conversation_id"},{anchor:"transformers.Conversation.past_user_inputs",description:`<strong>past_user_inputs</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; Eventual past history of the conversation of the user. You don&#x2019;t need to pass it manually if you use the pipeline interactively but if you want to recreate history you need to set both <code>past_user_inputs</code> and <code>generated_responses</code> with equal length lists of strings`,name:"past_user_inputs"},{anchor:"transformers.Conversation.generated_responses",description:`<strong>generated_responses</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; Eventual past history of the conversation of the model. You don&#x2019;t need to pass it manually if you use the pipeline interactively but if you want to recreate history you need to set both <code>past_user_inputs</code> and <code>generated_responses</code> with equal length lists of strings`,name:"generated_responses"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/conversational.py#L18"}}),Ln=new hn({props:{anchor:"transformers.Conversation.example",$$slots:{default:[KD]},$$scope:{ctx:z}}}),Rr=new P({props:{name:"add_user_input",anchor:"transformers.Conversation.add_user_input",parameters:[{name:"text",val:": str"},{name:"overwrite",val:": bool = False"}],parametersDescription:[{anchor:"transformers.Conversation.add_user_input.text",description:"<strong>text</strong> (<code>str</code>) &#x2014; The user input for the next conversation round.",name:"text"},{anchor:"transformers.Conversation.add_user_input.overwrite",description:`<strong>overwrite</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not existing and unprocessed user input should be overwritten when this function is called.`,name:"overwrite"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/conversational.py#L82"}}),Hr=new P({props:{name:"append_response",anchor:"transformers.Conversation.append_response",parameters:[{name:"response",val:": str"}],parametersDescription:[{anchor:"transformers.Conversation.append_response.response",description:"<strong>response</strong> (<code>str</code>) &#x2014; The model generated response.",name:"response"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/conversational.py#L115"}}),Wr=new P({props:{name:"iter_texts",anchor:"transformers.Conversation.iter_texts",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/conversational.py#L124"}}),Zr=new P({props:{name:"mark_processed",anchor:"transformers.Conversation.mark_processed",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/conversational.py#L106"}}),Br=new P({props:{name:"class transformers.ConversationalPipeline",anchor:"transformers.ConversationalPipeline",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.ConversationalPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.ConversationalPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.ConversationalPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.ConversationalPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.ConversationalPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.ConversationalPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.ConversationalPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.ConversationalPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.ConversationalPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.`,name:"device"},{anchor:"transformers.ConversationalPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"},{anchor:"transformers.ConversationalPipeline.min_length_for_response",description:`<strong>min_length_for_response</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; The minimum length (in number of tokens) for a response.`,name:"min_length_for_response"},{anchor:"transformers.ConversationalPipeline.minimum_tokens",description:`<strong>minimum_tokens</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; The minimum length of tokens to leave for a response.`,name:"minimum_tokens"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/conversational.py#L163"}}),Gn=new hn({props:{anchor:"transformers.ConversationalPipeline.example",$$slots:{default:[JD]},$$scope:{ctx:z}}}),Xr=new P({props:{name:"__call__",anchor:"transformers.ConversationalPipeline.__call__",parameters:[{name:"conversations",val:": typing.Union[transformers.pipelines.conversational.Conversation, typing.List[transformers.pipelines.conversational.Conversation]]"},{name:"num_workers",val:" = 0"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.ConversationalPipeline.__call__.conversations",description:`<strong>conversations</strong> (a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.Conversation">Conversation</a> or a list of <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.Conversation">Conversation</a>) &#x2014; Conversations to generate responses for.`,name:"conversations"},{anchor:"transformers.ConversationalPipeline.__call__.clean_up_tokenization_spaces",description:`<strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clean up the potential extra spaces in the text output. generate_kwargs &#x2014; Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework <a href="./model#generative-models">here</a>).`,name:"clean_up_tokenization_spaces"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/conversational.py#L218",returnDescription:` <p>Conversation(s) with updated generated responses for those containing a new user input.</p> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.Conversation" >Conversation</a> or a list of <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.Conversation" >Conversation</a></p> `}}),Kr=new C({}),Jr=new P({props:{name:"class transformers.DocumentQuestionAnsweringPipeline",anchor:"transformers.DocumentQuestionAnsweringPipeline",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.DocumentQuestionAnsweringPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.DocumentQuestionAnsweringPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.DocumentQuestionAnsweringPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.DocumentQuestionAnsweringPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.DocumentQuestionAnsweringPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.DocumentQuestionAnsweringPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.DocumentQuestionAnsweringPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.DocumentQuestionAnsweringPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.DocumentQuestionAnsweringPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.`,name:"device"},{anchor:"transformers.DocumentQuestionAnsweringPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/document_question_answering.py#L102"}}),os=new P({props:{name:"__call__",anchor:"transformers.DocumentQuestionAnsweringPipeline.__call__",parameters:[{name:"image",val:": typing.Union[ForwardRef('Image.Image'), str]"},{name:"question",val:": typing.Optional[str] = None"},{name:"word_boxes",val:": typing.Tuple[str, typing.List[float]] = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.DocumentQuestionAnsweringPipeline.__call__.image",description:`<strong>image</strong> (<code>str</code> or <code>PIL.Image</code>) &#x2014; The pipeline handles three types of images:</p> <ul> <li>A string containing a http link pointing to an image</li> <li>A string containing a local path to an image</li> <li>An image loaded in PIL directly</li> </ul> <p>The pipeline accepts either a single image or a batch of images. If given a single image, it can be broadcasted to multiple questions.`,name:"image"},{anchor:"transformers.DocumentQuestionAnsweringPipeline.__call__.question",description:`<strong>question</strong> (<code>str</code>) &#x2014; A question to ask of the document.`,name:"question"},{anchor:"transformers.DocumentQuestionAnsweringPipeline.__call__.word_boxes",description:`<strong>word_boxes</strong> (<code>List[str, Tuple[float, float, float, float]]</code>, <em>optional</em>) &#x2014; A list of words and bounding boxes (normalized 0-&gt;1000). If you provide this optional input, then the pipeline will use these words and boxes instead of running OCR on the image to derive them for models that need them (e.g. LayoutLM). This allows you to reuse OCR&#x2019;d results across many invocations of the pipeline without having to re-run it each time.`,name:"word_boxes"},{anchor:"transformers.DocumentQuestionAnsweringPipeline.__call__.top_k",description:`<strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The number of answers to return (will be chosen by order of likelihood). Note that we return less than top_k answers if there are not enough options available within the context.`,name:"top_k"},{anchor:"transformers.DocumentQuestionAnsweringPipeline.__call__.doc_stride",description:`<strong>doc_stride</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; If the words in the document are too long to fit with the question for the model, it will be split in several chunks with some overlap. This argument controls the size of that overlap.`,name:"doc_stride"},{anchor:"transformers.DocumentQuestionAnsweringPipeline.__call__.max_answer_len",description:`<strong>max_answer_len</strong> (<code>int</code>, <em>optional</em>, defaults to 15) &#x2014; The maximum length of predicted answers (e.g., only answers with a shorter length are considered).`,name:"max_answer_len"},{anchor:"transformers.DocumentQuestionAnsweringPipeline.__call__.max_seq_len",description:`<strong>max_seq_len</strong> (<code>int</code>, <em>optional</em>, defaults to 384) &#x2014; The maximum length of the total sentence (context + question) in tokens of each chunk passed to the model. The context will be split in several chunks (using <code>doc_stride</code> as overlap) if needed.`,name:"max_seq_len"},{anchor:"transformers.DocumentQuestionAnsweringPipeline.__call__.max_question_len",description:`<strong>max_question_len</strong> (<code>int</code>, <em>optional</em>, defaults to 64) &#x2014; The maximum length of the question after tokenization. It will be truncated if needed.`,name:"max_question_len"},{anchor:"transformers.DocumentQuestionAnsweringPipeline.__call__.handle_impossible_answer",description:`<strong>handle_impossible_answer</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not we accept impossible as an answer.`,name:"handle_impossible_answer"},{anchor:"transformers.DocumentQuestionAnsweringPipeline.__call__.lang",description:`<strong>lang</strong> (<code>str</code>, <em>optional</em>) &#x2014; Language to use while running OCR. Defaults to english.`,name:"lang"},{anchor:"transformers.DocumentQuestionAnsweringPipeline.__call__.tesseract_config",description:`<strong>tesseract_config</strong> (<code>str</code>, <em>optional</em>) &#x2014; Additional flags to pass to tesseract while running OCR.`,name:"tesseract_config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/document_question_answering.py#L171",returnDescription:` <p>Each result comes as a dictionary with the following keys:</p> <ul> <li><strong>score</strong> (<code>float</code>) \u2014 The probability associated to the answer.</li> <li><strong>start</strong> (<code>int</code>) \u2014 The start word index of the answer (in the OCR\u2019d version of the input or provided <code>word_boxes</code>).</li> <li><strong>end</strong> (<code>int</code>) \u2014 The end word index of the answer (in the OCR\u2019d version of the input or provided <code>word_boxes</code>).</li> <li><strong>answer</strong> (<code>str</code>) \u2014 The answer to the question.</li> </ul> `,returnType:` <p>A <code>dict</code> or a list of <code>dict</code></p> `}}),ss=new C({}),as=new P({props:{name:"class transformers.FeatureExtractionPipeline",anchor:"transformers.FeatureExtractionPipeline",parameters:[{name:"model",val:": typing.Union[ForwardRef('PreTrainedModel'), ForwardRef('TFPreTrainedModel')]"},{name:"tokenizer",val:": typing.Optional[transformers.tokenization_utils.PreTrainedTokenizer] = None"},{name:"feature_extractor",val:": typing.Optional[ForwardRef('SequenceFeatureExtractor')] = None"},{name:"modelcard",val:": typing.Optional[transformers.modelcard.ModelCard] = None"},{name:"framework",val:": typing.Optional[str] = None"},{name:"task",val:": str = ''"},{name:"args_parser",val:": ArgumentHandler = None"},{name:"device",val:": typing.Union[int, str, ForwardRef('torch.device')] = -1"},{name:"binary_output",val:": bool = False"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.FeatureExtractionPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.FeatureExtractionPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.FeatureExtractionPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.FeatureExtractionPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.FeatureExtractionPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.FeatureExtractionPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.FeatureExtractionPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.`,name:"device"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/feature_extraction.py#L7"}}),ds=new P({props:{name:"__call__",anchor:"transformers.FeatureExtractionPipeline.__call__",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.FeatureExtractionPipeline.__call__.args",description:"<strong>args</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several texts (or one list of texts) to get the features of.",name:"args"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/feature_extraction.py#L69",returnDescription:` <p>The features computed by the model.</p> `,returnType:` <p>A nested list of <code>float</code></p> `}}),cs=new C({}),ps=new P({props:{name:"class transformers.FillMaskPipeline",anchor:"transformers.FillMaskPipeline",parameters:[{name:"model",val:": typing.Union[ForwardRef('PreTrainedModel'), ForwardRef('TFPreTrainedModel')]"},{name:"tokenizer",val:": typing.Optional[transformers.tokenization_utils.PreTrainedTokenizer] = None"},{name:"feature_extractor",val:": typing.Optional[ForwardRef('SequenceFeatureExtractor')] = None"},{name:"modelcard",val:": typing.Optional[transformers.modelcard.ModelCard] = None"},{name:"framework",val:": typing.Optional[str] = None"},{name:"task",val:": str = ''"},{name:"args_parser",val:": ArgumentHandler = None"},{name:"device",val:": typing.Union[int, str, ForwardRef('torch.device')] = -1"},{name:"binary_output",val:": bool = False"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.FillMaskPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.FillMaskPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.FillMaskPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.FillMaskPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.FillMaskPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.FillMaskPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.FillMaskPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.FillMaskPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.FillMaskPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.`,name:"device"},{anchor:"transformers.FillMaskPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"},{anchor:"transformers.FillMaskPipeline.top_k",description:`<strong>top_k</strong> (<code>int</code>, defaults to 5) &#x2014; The number of predictions to return.`,name:"top_k"},{anchor:"transformers.FillMaskPipeline.targets",description:`<strong>targets</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014; When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower).`,name:"targets"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/fill_mask.py#L34"}}),Zn=new RD({props:{$$slots:{default:[e7]},$$scope:{ctx:z}}}),hs=new P({props:{name:"__call__",anchor:"transformers.FillMaskPipeline.__call__",parameters:[{name:"inputs",val:""},{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.FillMaskPipeline.__call__.args",description:`<strong>args</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several texts (or one list of prompts) with masked tokens.`,name:"args"},{anchor:"transformers.FillMaskPipeline.__call__.targets",description:`<strong>targets</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014; When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower).`,name:"targets"},{anchor:"transformers.FillMaskPipeline.__call__.top_k",description:`<strong>top_k</strong> (<code>int</code>, <em>optional</em>) &#x2014; When passed, overrides the number of predictions to return.`,name:"top_k"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/fill_mask.py#L205",returnDescription:` <p>Each result comes as list of dictionaries with the following keys:</p> <ul> <li><strong>sequence</strong> (<code>str</code>) \u2014 The corresponding input with the mask token prediction.</li> <li><strong>score</strong> (<code>float</code>) \u2014 The corresponding probability.</li> <li><strong>token</strong> (<code>int</code>) \u2014 The predicted token id (to replace the masked one).</li> <li><strong>token</strong> (<code>str</code>) \u2014 The predicted token (to replace the masked one).</li> </ul> `,returnType:` <p>A list or a list of list of <code>dict</code></p> `}}),us=new C({}),gs=new P({props:{name:"class transformers.ImageClassificationPipeline",anchor:"transformers.ImageClassificationPipeline",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.ImageClassificationPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.ImageClassificationPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.ImageClassificationPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.ImageClassificationPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.ImageClassificationPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.ImageClassificationPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.ImageClassificationPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.ImageClassificationPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.ImageClassificationPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.`,name:"device"},{anchor:"transformers.ImageClassificationPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/image_classification.py#L32"}}),ws=new P({props:{name:"__call__",anchor:"transformers.ImageClassificationPipeline.__call__",parameters:[{name:"images",val:": typing.Union[str, typing.List[str], ForwardRef('Image.Image'), typing.List[ForwardRef('Image.Image')]]"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.ImageClassificationPipeline.__call__.images",description:`<strong>images</strong> (<code>str</code>, <code>List[str]</code>, <code>PIL.Image</code> or <code>List[PIL.Image]</code>) &#x2014; The pipeline handles three types of images:</p> <ul> <li>A string containing a http link pointing to an image</li> <li>A string containing a local path to an image</li> <li>An image loaded in PIL directly</li> </ul> <p>The pipeline accepts either a single image or a batch of images, which must then be passed as a string. Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL images.`,name:"images"},{anchor:"transformers.ImageClassificationPipeline.__call__.top_k",description:`<strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to 5) &#x2014; The number of top labels that will be returned by the pipeline. If the provided number is higher than the number of labels available in the model configuration, it will default to the number of labels.`,name:"top_k"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/image_classification.py#L59",returnDescription:` <p>A dictionary or a list of dictionaries containing result. If the input is a single image, will return a dictionary, if the input is a list of several images, will return a list of dictionaries corresponding to the images.</p> <p>The dictionaries contain the following keys:</p> <ul> <li><strong>label</strong> (<code>str</code>) \u2014 The label identified by the model.</li> <li><strong>score</strong> (<code>int</code>) \u2014 The score attributed by the model for that label.</li> </ul> `}}),Ts=new C({}),ks=new P({props:{name:"class transformers.ImageSegmentationPipeline",anchor:"transformers.ImageSegmentationPipeline",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.ImageSegmentationPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.ImageSegmentationPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.ImageSegmentationPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.ImageSegmentationPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.ImageSegmentationPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.ImageSegmentationPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.ImageSegmentationPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.ImageSegmentationPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.ImageSegmentationPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.`,name:"device"},{anchor:"transformers.ImageSegmentationPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/image_segmentation.py#L30"}}),$s=new P({props:{name:"__call__",anchor:"transformers.ImageSegmentationPipeline.__call__",parameters:[{name:"images",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.ImageSegmentationPipeline.__call__.images",description:`<strong>images</strong> (<code>str</code>, <code>List[str]</code>, <code>PIL.Image</code> or <code>List[PIL.Image]</code>) &#x2014; The pipeline handles three types of images:</p> <ul> <li>A string containing an HTTP(S) link pointing to an image</li> <li>A string containing a local path to an image</li> <li>An image loaded in PIL directly</li> </ul> <p>The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the same format: all as HTTP(S) links, all as local paths, or all as PIL images.`,name:"images"},{anchor:"transformers.ImageSegmentationPipeline.__call__.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>semantic</code>) &#x2014; Segmentation task to be performed, choose [<code>semantic</code>, <code>instance</code> and <code>panoptic</code>] depending on model capabilities.`,name:"task"},{anchor:"transformers.ImageSegmentationPipeline.__call__.threshold",description:`<strong>threshold</strong> (<code>float</code>, <em>optional</em>, defaults to 0.9) &#x2014; Probability threshold to filter out predicted masks.`,name:"threshold"},{anchor:"transformers.ImageSegmentationPipeline.__call__.overlap_mask_area_threshold",description:`<strong>overlap_mask_area_threshold</strong> (<code>float</code>, <em>optional</em>, defaults to 0.5) &#x2014; Mask overlap threshold to eliminate small, disconnected segments.`,name:"overlap_mask_area_threshold"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/image_segmentation.py#L67",returnDescription:` <p>A dictionary or a list of dictionaries containing the result. If the input is a single image, will return a list of dictionaries, if the input is a list of several images, will return a list of list of dictionaries corresponding to each image.</p> <p>The dictionaries contain the mask, label and score (where applicable) of each detected object and contains the following keys:</p> <ul> <li><strong>label</strong> (<code>str</code>) \u2014 The class label identified by the model.</li> <li><strong>mask</strong> (<code>PIL.Image</code>) \u2014 A binary mask of the detected object as a Pil Image of shape (width, height) of the original image. Returns a mask filled with zeros if no object is found.</li> <li><strong>score</strong> (<em>optional</em> <code>float</code>) \u2014 Optionally, when the model is capable of estimating a confidence of the \u201Cobject\u201D described by the label and the mask.</li> </ul> `}}),Es=new C({}),qs=new P({props:{name:"class transformers.ImageToTextPipeline",anchor:"transformers.ImageToTextPipeline",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.ImageToTextPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.ImageToTextPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.ImageToTextPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.ImageToTextPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.ImageToTextPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.ImageToTextPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.ImageToTextPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.ImageToTextPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.ImageToTextPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.`,name:"device"},{anchor:"transformers.ImageToTextPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/image_to_text.py#L29"}}),zs=new P({props:{name:"__call__",anchor:"transformers.ImageToTextPipeline.__call__",parameters:[{name:"images",val:": typing.Union[str, typing.List[str], ForwardRef('Image.Image'), typing.List[ForwardRef('Image.Image')]]"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.ImageToTextPipeline.__call__.images",description:`<strong>images</strong> (<code>str</code>, <code>List[str]</code>, <code>PIL.Image</code> or <code>List[PIL.Image]</code>) &#x2014; The pipeline handles three types of images:</p> <ul> <li>A string containing a HTTP(s) link pointing to an image</li> <li>A string containing a local path to an image</li> <li>An image loaded in PIL directly</li> </ul> <p>The pipeline accepts either a single image or a batch of images.`,name:"images"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/image_to_text.py#L50",returnDescription:` <p>Each result comes as a dictionary with the following key:</p> <ul> <li><strong>generated_text</strong> (<code>str</code>) \u2014 The generated text.</li> </ul> `,returnType:` <p>A list or a list of list of <code>dict</code></p> `}}),Is=new C({}),js=new P({props:{name:"class transformers.TokenClassificationPipeline",anchor:"transformers.TokenClassificationPipeline",parameters:[{name:"args_parser",val:" = <transformers.pipelines.token_classification.TokenClassificationArgumentHandler object at 0x7f0401585430>"},{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.TokenClassificationPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.TokenClassificationPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.TokenClassificationPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.TokenClassificationPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.TokenClassificationPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.TokenClassificationPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.TokenClassificationPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.TokenClassificationPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.TokenClassificationPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.`,name:"device"},{anchor:"transformers.TokenClassificationPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"},{anchor:"transformers.TokenClassificationPipeline.ignore_labels",description:`<strong>ignore_labels</strong> (<code>List[str]</code>, defaults to <code>[&quot;O&quot;]</code>) &#x2014; A list of labels to ignore.`,name:"ignore_labels"},{anchor:"transformers.TokenClassificationPipeline.grouped_entities",description:`<strong>grouped_entities</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; DEPRECATED, use <code>aggregation_strategy</code> instead. Whether or not to group the tokens corresponding to the same entity together in the predictions or not.`,name:"grouped_entities"},{anchor:"transformers.TokenClassificationPipeline.aggregation_strategy",description:`<strong>aggregation_strategy</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;none&quot;</code>) &#x2014; The strategy to fuse (or not) tokens based on the model prediction.</p> <ul> <li>&#x201C;none&#x201D; : Will simply not do any aggregation and simply return raw results from the model</li> <li>&#x201C;simple&#x201D; : Will attempt to group entities following the default schema. (A, B-TAG), (B, I-TAG), (C, I-TAG), (D, B-TAG2) (E, B-TAG2) will end up being [{&#x201C;word&#x201D;: ABC, &#x201C;entity&#x201D;: &#x201C;TAG&#x201D;}, {&#x201C;word&#x201D;: &#x201C;D&#x201D;, &#x201C;entity&#x201D;: &#x201C;TAG2&#x201D;}, {&#x201C;word&#x201D;: &#x201C;E&#x201D;, &#x201C;entity&#x201D;: &#x201C;TAG2&#x201D;}] Notice that two consecutive B tags will end up as different entities. On word based languages, we might end up splitting words undesirably : Imagine Microsoft being tagged as [{&#x201C;word&#x201D;: &#x201C;Micro&#x201D;, &#x201C;entity&#x201D;: &#x201C;ENTERPRISE&#x201D;}, {&#x201C;word&#x201D;: &#x201C;soft&#x201D;, &#x201C;entity&#x201D;: &#x201C;NAME&#x201D;}]. Look for FIRST, MAX, AVERAGE for ways to mitigate that and disambiguate words (on languages that support that meaning, which is basically tokens separated by a space). These mitigations will only work on real words, &#x201C;New york&#x201D; might still be tagged with two different entities.</li> <li>&#x201C;first&#x201D; : (works only on word based models) Will use the <code>SIMPLE</code> strategy except that words, cannot end up with different tags. Words will simply use the tag of the first token of the word when there is ambiguity.</li> <li>&#x201C;average&#x201D; : (works only on word based models) Will use the <code>SIMPLE</code> strategy except that words, cannot end up with different tags. scores will be averaged first across tokens, and then the maximum label is applied.</li> <li>&#x201C;max&#x201D; : (works only on word based models) Will use the <code>SIMPLE</code> strategy except that words, cannot end up with different tags. Word entity will simply be the token with the maximum score.</li> </ul>`,name:"aggregation_strategy"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/token_classification.py#L86"}}),Fs=new P({props:{name:"aggregate_words",anchor:"transformers.TokenClassificationPipeline.aggregate_words",parameters:[{name:"entities",val:": typing.List[dict]"},{name:"aggregation_strategy",val:": AggregationStrategy"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/token_classification.py#L368"}}),Ls=new P({props:{name:"gather_pre_entities",anchor:"transformers.TokenClassificationPipeline.gather_pre_entities",parameters:[{name:"sentence",val:": str"},{name:"input_ids",val:": ndarray"},{name:"scores",val:": ndarray"},{name:"offset_mapping",val:": typing.Union[typing.List[typing.Tuple[int, int]], NoneType]"},{name:"special_tokens_mask",val:": ndarray"},{name:"aggregation_strategy",val:": AggregationStrategy"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/token_classification.py#L254"}}),Os=new P({props:{name:"group_entities",anchor:"transformers.TokenClassificationPipeline.group_entities",parameters:[{name:"entities",val:": typing.List[dict]"}],parametersDescription:[{anchor:"transformers.TokenClassificationPipeline.group_entities.entities",description:"<strong>entities</strong> (<code>dict</code>) &#x2014; The entities predicted by the pipeline.",name:"entities"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/token_classification.py#L430"}}),Us=new P({props:{name:"group_sub_entities",anchor:"transformers.TokenClassificationPipeline.group_sub_entities",parameters:[{name:"entities",val:": typing.List[dict]"}],parametersDescription:[{anchor:"transformers.TokenClassificationPipeline.group_sub_entities.entities",description:"<strong>entities</strong> (<code>dict</code>) &#x2014; The entities predicted by the pipeline.",name:"entities"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/token_classification.py#L395"}}),Ns=new C({}),Gs=new P({props:{name:"class transformers.ObjectDetectionPipeline",anchor:"transformers.ObjectDetectionPipeline",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.ObjectDetectionPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.ObjectDetectionPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.ObjectDetectionPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.ObjectDetectionPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.ObjectDetectionPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.ObjectDetectionPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.ObjectDetectionPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.ObjectDetectionPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.ObjectDetectionPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.`,name:"device"},{anchor:"transformers.ObjectDetectionPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/object_detection.py#L24"}}),Hs=new P({props:{name:"__call__",anchor:"transformers.ObjectDetectionPipeline.__call__",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.ObjectDetectionPipeline.__call__.images",description:`<strong>images</strong> (<code>str</code>, <code>List[str]</code>, <code>PIL.Image</code> or <code>List[PIL.Image]</code>) &#x2014; The pipeline handles three types of images:</p> <ul> <li>A string containing an HTTP(S) link pointing to an image</li> <li>A string containing a local path to an image</li> <li>An image loaded in PIL directly</li> </ul> <p>The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the same format: all as HTTP(S) links, all as local paths, or all as PIL images.`,name:"images"},{anchor:"transformers.ObjectDetectionPipeline.__call__.threshold",description:`<strong>threshold</strong> (<code>float</code>, <em>optional</em>, defaults to 0.9) &#x2014; The probability necessary to make a prediction.`,name:"threshold"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/object_detection.py#L50",returnDescription:` <p>A list of dictionaries or a list of list of dictionaries containing the result. If the input is a single image, will return a list of dictionaries, if the input is a list of several images, will return a list of list of dictionaries corresponding to each image.</p> <p>The dictionaries contain the following keys:</p> <ul> <li><strong>label</strong> (<code>str</code>) \u2014 The class label identified by the model.</li> <li><strong>score</strong> (<code>float</code>) \u2014 The score attributed by the model for that label.</li> <li><strong>box</strong> (<code>List[Dict[str, int]]</code>) \u2014 The bounding box of detected object in image\u2019s original size.</li> </ul> `}}),Ws=new C({}),Zs=new P({props:{name:"class transformers.QuestionAnsweringPipeline",anchor:"transformers.QuestionAnsweringPipeline",parameters:[{name:"model",val:": typing.Union[ForwardRef('PreTrainedModel'), ForwardRef('TFPreTrainedModel')]"},{name:"tokenizer",val:": PreTrainedTokenizer"},{name:"modelcard",val:": typing.Optional[transformers.modelcard.ModelCard] = None"},{name:"framework",val:": typing.Optional[str] = None"},{name:"device",val:": int = -1"},{name:"task",val:": str = ''"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.QuestionAnsweringPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.QuestionAnsweringPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.QuestionAnsweringPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.QuestionAnsweringPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.QuestionAnsweringPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.QuestionAnsweringPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.QuestionAnsweringPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.QuestionAnsweringPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.QuestionAnsweringPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.`,name:"device"},{anchor:"transformers.QuestionAnsweringPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/question_answering.py#L224"}}),Xs=new P({props:{name:"__call__",anchor:"transformers.QuestionAnsweringPipeline.__call__",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.QuestionAnsweringPipeline.__call__.args",description:`<strong>args</strong> (<code>SquadExample</code> or a list of <code>SquadExample</code>) &#x2014; One or several <code>SquadExample</code> containing the question and context.`,name:"args"},{anchor:"transformers.QuestionAnsweringPipeline.__call__.X",description:`<strong>X</strong> (<code>SquadExample</code> or a list of <code>SquadExample</code>, <em>optional</em>) &#x2014; One or several <code>SquadExample</code> containing the question and context (will be treated the same way as if passed as the first positional argument).`,name:"X"},{anchor:"transformers.QuestionAnsweringPipeline.__call__.data",description:`<strong>data</strong> (<code>SquadExample</code> or a list of <code>SquadExample</code>, <em>optional</em>) &#x2014; One or several <code>SquadExample</code> containing the question and context (will be treated the same way as if passed as the first positional argument).`,name:"data"},{anchor:"transformers.QuestionAnsweringPipeline.__call__.question",description:`<strong>question</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several question(s) (must be used in conjunction with the <code>context</code> argument).`,name:"question"},{anchor:"transformers.QuestionAnsweringPipeline.__call__.context",description:`<strong>context</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several context(s) associated with the question(s) (must be used in conjunction with the <code>question</code> argument).`,name:"context"},{anchor:"transformers.QuestionAnsweringPipeline.__call__.topk",description:`<strong>topk</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The number of answers to return (will be chosen by order of likelihood). Note that we return less than topk answers if there are not enough options available within the context.`,name:"topk"},{anchor:"transformers.QuestionAnsweringPipeline.__call__.doc_stride",description:`<strong>doc_stride</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; If the context is too long to fit with the question for the model, it will be split in several chunks with some overlap. This argument controls the size of that overlap.`,name:"doc_stride"},{anchor:"transformers.QuestionAnsweringPipeline.__call__.max_answer_len",description:`<strong>max_answer_len</strong> (<code>int</code>, <em>optional</em>, defaults to 15) &#x2014; The maximum length of predicted answers (e.g., only answers with a shorter length are considered).`,name:"max_answer_len"},{anchor:"transformers.QuestionAnsweringPipeline.__call__.max_seq_len",description:`<strong>max_seq_len</strong> (<code>int</code>, <em>optional</em>, defaults to 384) &#x2014; The maximum length of the total sentence (context + question) in tokens of each chunk passed to the model. The context will be split in several chunks (using <code>doc_stride</code> as overlap) if needed.`,name:"max_seq_len"},{anchor:"transformers.QuestionAnsweringPipeline.__call__.max_question_len",description:`<strong>max_question_len</strong> (<code>int</code>, <em>optional</em>, defaults to 64) &#x2014; The maximum length of the question after tokenization. It will be truncated if needed.`,name:"max_question_len"},{anchor:"transformers.QuestionAnsweringPipeline.__call__.handle_impossible_answer",description:`<strong>handle_impossible_answer</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not we accept impossible as an answer.`,name:"handle_impossible_answer"},{anchor:"transformers.QuestionAnsweringPipeline.__call__.align_to_words",description:`<strong>align_to_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Attempts to align the answer to real words. Improves quality on space separated langages. Might hurt on non-space-separated languages (like Japanese or Chinese)`,name:"align_to_words"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/question_answering.py#L330",returnDescription:` <p>Each result comes as a dictionary with the following keys:</p> <ul> <li><strong>score</strong> (<code>float</code>) \u2014 The probability associated to the answer.</li> <li><strong>start</strong> (<code>int</code>) \u2014 The character start index of the answer (in the tokenized version of the input).</li> <li><strong>end</strong> (<code>int</code>) \u2014 The character end index of the answer (in the tokenized version of the input).</li> <li><strong>answer</strong> (<code>str</code>) \u2014 The answer to the question.</li> </ul> `,returnType:` <p>A <code>dict</code> or a list of <code>dict</code></p> `}}),Ks=new P({props:{name:"create_sample",anchor:"transformers.QuestionAnsweringPipeline.create_sample",parameters:[{name:"question",val:": typing.Union[str, typing.List[str]]"},{name:"context",val:": typing.Union[str, typing.List[str]]"}],parametersDescription:[{anchor:"transformers.QuestionAnsweringPipeline.create_sample.question",description:"<strong>question</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; The question(s) asked.",name:"question"},{anchor:"transformers.QuestionAnsweringPipeline.create_sample.context",description:"<strong>context</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; The context(s) in which we will look for the answer.",name:"context"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/question_answering.py#L265",returnDescription:` <p>The corresponding <code>SquadExample</code> grouping question and context.</p> `,returnType:` <p>One or a list of <code>SquadExample</code></p> `}}),Js=new P({props:{name:"span_to_answer",anchor:"transformers.QuestionAnsweringPipeline.span_to_answer",parameters:[{name:"text",val:": str"},{name:"start",val:": int"},{name:"end",val:": int"}],parametersDescription:[{anchor:"transformers.QuestionAnsweringPipeline.span_to_answer.text",description:"<strong>text</strong> (<code>str</code>) &#x2014; The actual context to extract the answer from.",name:"text"},{anchor:"transformers.QuestionAnsweringPipeline.span_to_answer.start",description:"<strong>start</strong> (<code>int</code>) &#x2014; The answer starting token index.",name:"start"},{anchor:"transformers.QuestionAnsweringPipeline.span_to_answer.end",description:"<strong>end</strong> (<code>int</code>) &#x2014; The answer end token index.",name:"end"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/question_answering.py#L606",returnDescription:` <p>str, \u2018start\u2019: int, \u2018end\u2019: int}\`</p> `,returnType:` <p>Dictionary like \`{\u2018answer\u2019</p> `}}),ea=new C({}),ta=new P({props:{name:"class transformers.SummarizationPipeline",anchor:"transformers.SummarizationPipeline",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.SummarizationPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.SummarizationPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.SummarizationPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.SummarizationPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.SummarizationPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.SummarizationPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.SummarizationPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.SummarizationPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.SummarizationPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.`,name:"device"},{anchor:"transformers.SummarizationPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/text2text_generation.py#L196"}}),ho=new hn({props:{anchor:"transformers.SummarizationPipeline.example",$$slots:{default:[t7]},$$scope:{ctx:z}}}),oa=new P({props:{name:"__call__",anchor:"transformers.SummarizationPipeline.__call__",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.SummarizationPipeline.__call__.documents",description:`<strong>documents</strong> (<em>str</em> or <code>List[str]</code>) &#x2014; One or several articles (or one list of articles) to summarize.`,name:"documents"},{anchor:"transformers.SummarizationPipeline.__call__.return_text",description:`<strong>return_text</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to include the decoded texts in the outputs`,name:"return_text"},{anchor:"transformers.SummarizationPipeline.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to include the tensors of predictions (as token indices) in the outputs.`,name:"return_tensors"},{anchor:"transformers.SummarizationPipeline.__call__.clean_up_tokenization_spaces",description:`<strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clean up the potential extra spaces in the text output. generate_kwargs &#x2014; Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework <a href="./model#generative-models">here</a>).`,name:"clean_up_tokenization_spaces"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/text2text_generation.py#L222",returnDescription:` <p>Each result comes as a dictionary with the following keys:</p> <ul> <li><strong>summary_text</strong> (<code>str</code>, present when <code>return_text=True</code>) \u2014 The summary of the corresponding input.</li> <li><strong>summary_token_ids</strong> (<code>torch.Tensor</code> or <code>tf.Tensor</code>, present when <code>return_tensors=True</code>) \u2014 The token ids of the summary.</li> </ul> `,returnType:` <p>A list or a list of list of <code>dict</code></p> `}}),ra=new C({}),sa=new P({props:{name:"class transformers.TableQuestionAnsweringPipeline",anchor:"transformers.TableQuestionAnsweringPipeline",parameters:[{name:"args_parser",val:" = <transformers.pipelines.table_question_answering.TableQuestionAnsweringArgumentHandler object at 0x7f040156eb20>"},{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.TableQuestionAnsweringPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.TableQuestionAnsweringPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.TableQuestionAnsweringPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.TableQuestionAnsweringPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.TableQuestionAnsweringPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.TableQuestionAnsweringPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.TableQuestionAnsweringPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.TableQuestionAnsweringPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.TableQuestionAnsweringPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.`,name:"device"},{anchor:"transformers.TableQuestionAnsweringPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/table_question_answering.py#L89"}}),da=new P({props:{name:"__call__",anchor:"transformers.TableQuestionAnsweringPipeline.__call__",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.TableQuestionAnsweringPipeline.__call__.table",description:`<strong>table</strong> (<code>pd.DataFrame</code> or <code>Dict</code>) &#x2014; Pandas DataFrame or dictionary that will be converted to a DataFrame containing all the table values. See above for an example of dictionary.`,name:"table"},{anchor:"transformers.TableQuestionAnsweringPipeline.__call__.query",description:`<strong>query</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; Query or list of queries that will be sent to the model alongside the table.`,name:"query"},{anchor:"transformers.TableQuestionAnsweringPipeline.__call__.sequential",description:`<strong>sequential</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to do inference sequentially or as a batch. Batching is faster, but models like SQA require the inference to be done sequentially to extract relations within sequences, given their conversational nature.`,name:"sequential"},{anchor:"transformers.TableQuestionAnsweringPipeline.__call__.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.TableQuestionAnsweringPipeline.__call__.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <code>TapasTruncationStrategy</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;drop_rows_to_fit&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate row by row, removing rows from the table.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/table_question_answering.py#L256",returnDescription:` <p>Each result is a dictionary with the following keys:</p> <ul> <li><strong>answer</strong> (<code>str</code>) \u2014 The answer of the query given the table. If there is an aggregator, the answer will be preceded by <code>AGGREGATOR &gt;</code>.</li> <li><strong>coordinates</strong> (<code>List[Tuple[int, int]]</code>) \u2014 Coordinates of the cells of the answers.</li> <li><strong>cells</strong> (<code>List[str]</code>) \u2014 List of strings made up of the answer cell values.</li> <li><strong>aggregator</strong> (<code>str</code>) \u2014 If the model has an aggregator, this returns the aggregator.</li> </ul> `,returnType:` <p>A dictionary or a list of dictionaries containing results</p> `}}),_o=new hn({props:{anchor:"transformers.TableQuestionAnsweringPipeline.__call__.example",$$slots:{default:[n7]},$$scope:{ctx:z}}}),bo=new hn({props:{anchor:"transformers.TableQuestionAnsweringPipeline.__call__.example-2",$$slots:{default:[o7]},$$scope:{ctx:z}}}),pa=new C({}),ma=new P({props:{name:"class transformers.TextClassificationPipeline",anchor:"transformers.TextClassificationPipeline",parameters:[{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.TextClassificationPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.TextClassificationPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.TextClassificationPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.TextClassificationPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.TextClassificationPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.TextClassificationPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.TextClassificationPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.TextClassificationPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.TextClassificationPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.`,name:"device"},{anchor:"transformers.TextClassificationPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"},{anchor:"transformers.TextClassificationPipeline.return_all_scores",description:`<strong>return_all_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to return all prediction scores or just the one of the predicted class.`,name:"return_all_scores"},{anchor:"transformers.TextClassificationPipeline.function_to_apply",description:`<strong>function_to_apply</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;default&quot;</code>) &#x2014; The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:</p> <ul> <li><code>&quot;default&quot;</code>: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output.</li> <li><code>&quot;sigmoid&quot;</code>: Applies the sigmoid function on the output.</li> <li><code>&quot;softmax&quot;</code>: Applies the softmax function on the output.</li> <li><code>&quot;none&quot;</code>: Does not apply any function on the output.</li> </ul>`,name:"function_to_apply"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/text_classification.py#L48"}}),ga=new P({props:{name:"__call__",anchor:"transformers.TextClassificationPipeline.__call__",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.TextClassificationPipeline.__call__.args",description:`<strong>args</strong> (<code>str</code> or <code>List[str]</code> or <code>Dict[str]</code>, or <code>List[Dict[str]]</code>) &#x2014; One or several texts to classify. In order to use text pairs for your classification, you can send a dictionnary containing <code>{&quot;text&quot;, &quot;text_pair&quot;}</code> keys, or a list of those.`,name:"args"},{anchor:"transformers.TextClassificationPipeline.__call__.top_k",description:`<strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to <code>1</code>) &#x2014; How many results to return.`,name:"top_k"},{anchor:"transformers.TextClassificationPipeline.__call__.function_to_apply",description:`<strong>function_to_apply</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;default&quot;</code>) &#x2014; The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:</p> <p>If this argument is not specified, then it will apply the following functions according to the number of labels:</p> <ul> <li>If the model has a single label, will apply the sigmoid function on the output.</li> <li>If the model has several labels, will apply the softmax function on the output.</li> </ul> <p>Possible values are:</p> <ul> <li><code>&quot;sigmoid&quot;</code>: Applies the sigmoid function on the output.</li> <li><code>&quot;softmax&quot;</code>: Applies the softmax function on the output.</li> <li><code>&quot;none&quot;</code>: Does not apply any function on the output.</li> </ul>`,name:"function_to_apply"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/text_classification.py#L106",returnDescription:` <p>Each result comes as list of dictionaries with the following keys:</p> <ul> <li><strong>label</strong> (<code>str</code>) \u2014 The label predicted.</li> <li><strong>score</strong> (<code>float</code>) \u2014 The corresponding probability.</li> </ul> <p>If <code>top_k</code> is used, one such dictionary is returned per label.</p> `,returnType:` <p>A list or a list of list of <code>dict</code></p> `}}),_a=new C({}),ba=new P({props:{name:"class transformers.TextGenerationPipeline",anchor:"transformers.TextGenerationPipeline",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.TextGenerationPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.TextGenerationPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.TextGenerationPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.TextGenerationPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.TextGenerationPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.TextGenerationPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.TextGenerationPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.TextGenerationPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.TextGenerationPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.`,name:"device"},{anchor:"transformers.TextGenerationPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/text_generation.py#L21"}}),ka=new P({props:{name:"__call__",anchor:"transformers.TextGenerationPipeline.__call__",parameters:[{name:"text_inputs",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.TextGenerationPipeline.__call__.args",description:`<strong>args</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several prompts (or one list of prompts) to complete.`,name:"args"},{anchor:"transformers.TextGenerationPipeline.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to include the tensors of predictions (as token indices) in the outputs.`,name:"return_tensors"},{anchor:"transformers.TextGenerationPipeline.__call__.return_text",description:`<strong>return_text</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to include the decoded texts in the outputs.`,name:"return_text"},{anchor:"transformers.TextGenerationPipeline.__call__.return_full_text",description:`<strong>return_full_text</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>False</code> only added text is returned, otherwise the full text is returned Only meaningful if <em>return_text</em> is set to True.`,name:"return_full_text"},{anchor:"transformers.TextGenerationPipeline.__call__.clean_up_tokenization_spaces",description:`<strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clean up the potential extra spaces in the text output.`,name:"clean_up_tokenization_spaces"},{anchor:"transformers.TextGenerationPipeline.__call__.prefix",description:`<strong>prefix</strong> (<code>str</code>, <em>optional</em>) &#x2014; Prefix added to prompt.`,name:"prefix"},{anchor:"transformers.TextGenerationPipeline.__call__.handle_long_generation",description:`<strong>handle_long_generation</strong> (<code>str</code>, <em>optional</em>) &#x2014; By default, this pipelines does not handle long generation (ones that exceed in one form or the other the model maximum length). There is no perfect way to adress this (more info :<a href="https://github.com/huggingface/transformers/issues/14033#issuecomment-948385227" rel="nofollow">https://github.com/huggingface/transformers/issues/14033#issuecomment-948385227</a>). This provides common strategies to work around that problem depending on your use case.</p> <ul> <li><code>None</code> : default strategy where nothing in particular happens</li> <li><code>&quot;hole&quot;</code>: Truncates left of input, and leaves a gap wide enough to let generation happen (might truncate a lot of the prompt and not suitable when generation exceed the model capacity)</li> </ul> <p>generate_kwargs &#x2014; Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework <a href="./model#generative-models">here</a>).`,name:"handle_long_generation"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/text_generation.py#L148",returnDescription:` <p>Each result comes as a dictionary with the following keys:</p> <ul> <li><strong>generated_text</strong> (<code>str</code>, present when <code>return_text=True</code>) \u2014 The generated text.</li> <li><strong>generated_token_ids</strong> (<code>torch.Tensor</code> or <code>tf.Tensor</code>, present when <code>return_tensors=True</code>) \u2014 The token ids of the generated text.</li> </ul> `,returnType:` <p>A list or a list of list of <code>dict</code></p> `}}),Pa=new C({}),ya=new P({props:{name:"class transformers.Text2TextGenerationPipeline",anchor:"transformers.Text2TextGenerationPipeline",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.Text2TextGenerationPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.Text2TextGenerationPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.Text2TextGenerationPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.Text2TextGenerationPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.Text2TextGenerationPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.Text2TextGenerationPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.Text2TextGenerationPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.Text2TextGenerationPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.Text2TextGenerationPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.`,name:"device"},{anchor:"transformers.Text2TextGenerationPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/text2text_generation.py#L26"}}),yo=new hn({props:{anchor:"transformers.Text2TextGenerationPipeline.example",$$slots:{default:[r7]},$$scope:{ctx:z}}}),Ea=new P({props:{name:"__call__",anchor:"transformers.Text2TextGenerationPipeline.__call__",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.Text2TextGenerationPipeline.__call__.args",description:`<strong>args</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; Input text for the encoder.`,name:"args"},{anchor:"transformers.Text2TextGenerationPipeline.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to include the tensors of predictions (as token indices) in the outputs.`,name:"return_tensors"},{anchor:"transformers.Text2TextGenerationPipeline.__call__.return_text",description:`<strong>return_text</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to include the decoded texts in the outputs.`,name:"return_text"},{anchor:"transformers.Text2TextGenerationPipeline.__call__.clean_up_tokenization_spaces",description:`<strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clean up the potential extra spaces in the text output.`,name:"clean_up_tokenization_spaces"},{anchor:"transformers.Text2TextGenerationPipeline.__call__.truncation",description:`<strong>truncation</strong> (<code>TruncationStrategy</code>, <em>optional</em>, defaults to <code>TruncationStrategy.DO_NOT_TRUNCATE</code>) &#x2014; The truncation strategy for the tokenization within the pipeline. <code>TruncationStrategy.DO_NOT_TRUNCATE</code> (default) will never truncate, but it is sometimes desirable to truncate the input to fit the model&#x2019;s max_length instead of throwing an error down the line. generate_kwargs &#x2014; Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework <a href="./model#generative-models">here</a>).`,name:"truncation"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/text2text_generation.py#L119",returnDescription:` <p>Each result comes as a dictionary with the following keys:</p> <ul> <li><strong>generated_text</strong> (<code>str</code>, present when <code>return_text=True</code>) \u2014 The generated text.</li> <li><strong>generated_token_ids</strong> (<code>torch.Tensor</code> or <code>tf.Tensor</code>, present when <code>return_tensors=True</code>) \u2014 The token ids of the generated text.</li> </ul> `,returnType:` <p>A list or a list of list of <code>dict</code></p> `}}),qa=new P({props:{name:"check_inputs",anchor:"transformers.Text2TextGenerationPipeline.check_inputs",parameters:[{name:"input_length",val:": int"},{name:"min_length",val:": int"},{name:"max_length",val:": int"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/text2text_generation.py#L92"}}),Aa=new C({}),Ca=new P({props:{name:"class transformers.TokenClassificationPipeline",anchor:"transformers.TokenClassificationPipeline",parameters:[{name:"args_parser",val:" = <transformers.pipelines.token_classification.TokenClassificationArgumentHandler object at 0x7f0401585430>"},{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.TokenClassificationPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.TokenClassificationPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.TokenClassificationPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.TokenClassificationPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.TokenClassificationPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.TokenClassificationPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.TokenClassificationPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.TokenClassificationPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.TokenClassificationPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.`,name:"device"},{anchor:"transformers.TokenClassificationPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"},{anchor:"transformers.TokenClassificationPipeline.ignore_labels",description:`<strong>ignore_labels</strong> (<code>List[str]</code>, defaults to <code>[&quot;O&quot;]</code>) &#x2014; A list of labels to ignore.`,name:"ignore_labels"},{anchor:"transformers.TokenClassificationPipeline.grouped_entities",description:`<strong>grouped_entities</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; DEPRECATED, use <code>aggregation_strategy</code> instead. Whether or not to group the tokens corresponding to the same entity together in the predictions or not.`,name:"grouped_entities"},{anchor:"transformers.TokenClassificationPipeline.aggregation_strategy",description:`<strong>aggregation_strategy</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;none&quot;</code>) &#x2014; The strategy to fuse (or not) tokens based on the model prediction.</p> <ul> <li>&#x201C;none&#x201D; : Will simply not do any aggregation and simply return raw results from the model</li> <li>&#x201C;simple&#x201D; : Will attempt to group entities following the default schema. (A, B-TAG), (B, I-TAG), (C, I-TAG), (D, B-TAG2) (E, B-TAG2) will end up being [{&#x201C;word&#x201D;: ABC, &#x201C;entity&#x201D;: &#x201C;TAG&#x201D;}, {&#x201C;word&#x201D;: &#x201C;D&#x201D;, &#x201C;entity&#x201D;: &#x201C;TAG2&#x201D;}, {&#x201C;word&#x201D;: &#x201C;E&#x201D;, &#x201C;entity&#x201D;: &#x201C;TAG2&#x201D;}] Notice that two consecutive B tags will end up as different entities. On word based languages, we might end up splitting words undesirably : Imagine Microsoft being tagged as [{&#x201C;word&#x201D;: &#x201C;Micro&#x201D;, &#x201C;entity&#x201D;: &#x201C;ENTERPRISE&#x201D;}, {&#x201C;word&#x201D;: &#x201C;soft&#x201D;, &#x201C;entity&#x201D;: &#x201C;NAME&#x201D;}]. Look for FIRST, MAX, AVERAGE for ways to mitigate that and disambiguate words (on languages that support that meaning, which is basically tokens separated by a space). These mitigations will only work on real words, &#x201C;New york&#x201D; might still be tagged with two different entities.</li> <li>&#x201C;first&#x201D; : (works only on word based models) Will use the <code>SIMPLE</code> strategy except that words, cannot end up with different tags. Words will simply use the tag of the first token of the word when there is ambiguity.</li> <li>&#x201C;average&#x201D; : (works only on word based models) Will use the <code>SIMPLE</code> strategy except that words, cannot end up with different tags. scores will be averaged first across tokens, and then the maximum label is applied.</li> <li>&#x201C;max&#x201D; : (works only on word based models) Will use the <code>SIMPLE</code> strategy except that words, cannot end up with different tags. Word entity will simply be the token with the maximum score.</li> </ul>`,name:"aggregation_strategy"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/token_classification.py#L86"}}),Ia=new P({props:{name:"__call__",anchor:"transformers.TokenClassificationPipeline.__call__",parameters:[{name:"inputs",val:": typing.Union[str, typing.List[str]]"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.TokenClassificationPipeline.__call__.inputs",description:`<strong>inputs</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several texts (or one list of texts) for token classification.`,name:"inputs"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/token_classification.py#L162",returnDescription:` <p>Each result comes as a list of dictionaries (one for each token in the corresponding input, or each entity if this pipeline was instantiated with an aggregation_strategy) with the following keys:</p> <ul> <li><strong>word</strong> (<code>str</code>) \u2014 The token/word classified. This is obtained by decoding the selected tokens. If you want to have the exact string in the original sentence, use <code>start</code> and <code>stop</code>.</li> <li><strong>score</strong> (<code>float</code>) \u2014 The corresponding probability for <code>entity</code>.</li> <li><strong>entity</strong> (<code>str</code>) \u2014 The entity predicted for that token/word (it is named <em>entity_group</em> when <em>aggregation_strategy</em> is not <code>"none"</code>.</li> <li><strong>index</strong> (<code>int</code>, only present when <code>aggregation_strategy="none"</code>) \u2014 The index of the corresponding token in the sentence.</li> <li><strong>start</strong> (<code>int</code>, <em>optional</em>) \u2014 The index of the start of the corresponding entity in the sentence. Only exists if the offsets are available within the tokenizer</li> <li><strong>end</strong> (<code>int</code>, <em>optional</em>) \u2014 The index of the end of the corresponding entity in the sentence. Only exists if the offsets are available within the tokenizer</li> </ul> `,returnType:` <p>A list or a list of list of <code>dict</code></p> `}}),ja=new P({props:{name:"aggregate_words",anchor:"transformers.TokenClassificationPipeline.aggregate_words",parameters:[{name:"entities",val:": typing.List[dict]"},{name:"aggregation_strategy",val:": AggregationStrategy"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/token_classification.py#L368"}}),Sa=new P({props:{name:"gather_pre_entities",anchor:"transformers.TokenClassificationPipeline.gather_pre_entities",parameters:[{name:"sentence",val:": str"},{name:"input_ids",val:": ndarray"},{name:"scores",val:": ndarray"},{name:"offset_mapping",val:": typing.Union[typing.List[typing.Tuple[int, int]], NoneType]"},{name:"special_tokens_mask",val:": ndarray"},{name:"aggregation_strategy",val:": AggregationStrategy"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/token_classification.py#L254"}}),Ma=new P({props:{name:"group_entities",anchor:"transformers.TokenClassificationPipeline.group_entities",parameters:[{name:"entities",val:": typing.List[dict]"}],parametersDescription:[{anchor:"transformers.TokenClassificationPipeline.group_entities.entities",description:"<strong>entities</strong> (<code>dict</code>) &#x2014; The entities predicted by the pipeline.",name:"entities"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/token_classification.py#L430"}}),Fa=new P({props:{name:"group_sub_entities",anchor:"transformers.TokenClassificationPipeline.group_sub_entities",parameters:[{name:"entities",val:": typing.List[dict]"}],parametersDescription:[{anchor:"transformers.TokenClassificationPipeline.group_sub_entities.entities",description:"<strong>entities</strong> (<code>dict</code>) &#x2014; The entities predicted by the pipeline.",name:"entities"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/token_classification.py#L395"}}),La=new C({}),Oa=new P({props:{name:"class transformers.TranslationPipeline",anchor:"transformers.TranslationPipeline",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.TranslationPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.TranslationPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.TranslationPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.TranslationPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.TranslationPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.TranslationPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.TranslationPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.TranslationPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.TranslationPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.`,name:"device"},{anchor:"transformers.TranslationPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/text2text_generation.py#L263"}}),Io=new hn({props:{anchor:"transformers.TranslationPipeline.example",$$slots:{default:[s7]},$$scope:{ctx:z}}}),Ga=new P({props:{name:"__call__",anchor:"transformers.TranslationPipeline.__call__",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.TranslationPipeline.__call__.args",description:`<strong>args</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; Texts to be translated.`,name:"args"},{anchor:"transformers.TranslationPipeline.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to include the tensors of predictions (as token indices) in the outputs.`,name:"return_tensors"},{anchor:"transformers.TranslationPipeline.__call__.return_text",description:`<strong>return_text</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to include the decoded texts in the outputs.`,name:"return_text"},{anchor:"transformers.TranslationPipeline.__call__.clean_up_tokenization_spaces",description:`<strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clean up the potential extra spaces in the text output.`,name:"clean_up_tokenization_spaces"},{anchor:"transformers.TranslationPipeline.__call__.src_lang",description:`<strong>src_lang</strong> (<code>str</code>, <em>optional</em>) &#x2014; The language of the input. Might be required for multilingual models. Will not have any effect for single pair translation models`,name:"src_lang"},{anchor:"transformers.TranslationPipeline.__call__.tgt_lang",description:`<strong>tgt_lang</strong> (<code>str</code>, <em>optional</em>) &#x2014; The language of the desired output. Might be required for multilingual models. Will not have any effect for single pair translation models generate_kwargs &#x2014; Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework <a href="./model#generative-models">here</a>).`,name:"tgt_lang"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/text2text_generation.py#L315",returnDescription:` <p>Each result comes as a dictionary with the following keys:</p> <ul> <li><strong>translation_text</strong> (<code>str</code>, present when <code>return_text=True</code>) \u2014 The translation.</li> <li><strong>translation_token_ids</strong> (<code>torch.Tensor</code> or <code>tf.Tensor</code>, present when <code>return_tensors=True</code>) \u2014 The token ids of the translation.</li> </ul> `,returnType:` <p>A list or a list of list of <code>dict</code></p> `}}),Qa=new C({}),Ra=new P({props:{name:"class transformers.VisualQuestionAnsweringPipeline",anchor:"transformers.VisualQuestionAnsweringPipeline",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.VisualQuestionAnsweringPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.VisualQuestionAnsweringPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.VisualQuestionAnsweringPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.VisualQuestionAnsweringPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.VisualQuestionAnsweringPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.VisualQuestionAnsweringPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.VisualQuestionAnsweringPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.VisualQuestionAnsweringPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.VisualQuestionAnsweringPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.`,name:"device"},{anchor:"transformers.VisualQuestionAnsweringPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/visual_question_answering.py#L19"}}),Za=new P({props:{name:"__call__",anchor:"transformers.VisualQuestionAnsweringPipeline.__call__",parameters:[{name:"image",val:": typing.Union[ForwardRef('Image.Image'), str]"},{name:"question",val:": str = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.VisualQuestionAnsweringPipeline.__call__.image",description:`<strong>image</strong> (<code>str</code>, <code>List[str]</code>, <code>PIL.Image</code> or <code>List[PIL.Image]</code>) &#x2014; The pipeline handles three types of images:</p> <ul> <li>A string containing a http link pointing to an image</li> <li>A string containing a local path to an image</li> <li>An image loaded in PIL directly</li> </ul> <p>The pipeline accepts either a single image or a batch of images. If given a single image, it can be broadcasted to multiple questions.`,name:"image"},{anchor:"transformers.VisualQuestionAnsweringPipeline.__call__.question",description:`<strong>question</strong> (<code>str</code>, <code>List[str]</code>) &#x2014; The question(s) asked. If given a single question, it can be broadcasted to multiple images.`,name:"question"},{anchor:"transformers.VisualQuestionAnsweringPipeline.__call__.top_k",description:`<strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to 5) &#x2014; The number of top labels that will be returned by the pipeline. If the provided number is higher than the number of labels available in the model configuration, it will default to the number of labels.`,name:"top_k"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/visual_question_answering.py#L46",returnDescription:` <ul> <li><strong>label</strong> (<code>str</code>) \u2014 The label identified by the model.</li> <li><strong>score</strong> (<code>int</code>) \u2014 The score attributed by the model for that label.</li> </ul> `,returnType:` <p>A dictionary or a list of dictionaries containing the result. The dictionaries contain the following keys</p> `}}),Ba=new C({}),Ya=new P({props:{name:"class transformers.ZeroShotClassificationPipeline",anchor:"transformers.ZeroShotClassificationPipeline",parameters:[{name:"args_parser",val:" = <transformers.pipelines.zero_shot_classification.ZeroShotClassificationArgumentHandler object at 0x7f040158f040>"},{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.ZeroShotClassificationPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.ZeroShotClassificationPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.ZeroShotClassificationPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.ZeroShotClassificationPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.ZeroShotClassificationPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.ZeroShotClassificationPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.ZeroShotClassificationPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.ZeroShotClassificationPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.ZeroShotClassificationPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.`,name:"device"},{anchor:"transformers.ZeroShotClassificationPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/zero_shot_classification.py#L46"}}),ei=new P({props:{name:"__call__",anchor:"transformers.ZeroShotClassificationPipeline.__call__",parameters:[{name:"sequences",val:": typing.Union[str, typing.List[str]]"},{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.ZeroShotClassificationPipeline.__call__.sequences",description:`<strong>sequences</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; The sequence(s) to classify, will be truncated if the model input is too large.`,name:"sequences"},{anchor:"transformers.ZeroShotClassificationPipeline.__call__.candidate_labels",description:`<strong>candidate_labels</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; The set of possible class labels to classify each sequence into. Can be a single label, a string of comma-separated labels, or a list of labels.`,name:"candidate_labels"},{anchor:"transformers.ZeroShotClassificationPipeline.__call__.hypothesis_template",description:`<strong>hypothesis_template</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;This example is {}.&quot;</code>) &#x2014; The template used to turn each label into an NLI-style hypothesis. This template must include a {} or similar syntax for the candidate label to be inserted into the template. For example, the default template is <code>&quot;This example is {}.&quot;</code> With the candidate label <code>&quot;sports&quot;</code>, this would be fed into the model like <code>&quot;&lt;cls&gt; sequence to classify &lt;sep&gt; This example is sports . &lt;sep&gt;&quot;</code>. The default template works well in many cases, but it may be worthwhile to experiment with different templates depending on the task setting.`,name:"hypothesis_template"},{anchor:"transformers.ZeroShotClassificationPipeline.__call__.multi_label",description:`<strong>multi_label</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not multiple candidate labels can be true. If <code>False</code>, the scores are normalized such that the sum of the label likelihoods for each sequence is 1. If <code>True</code>, the labels are considered independent and probabilities are normalized for each candidate by doing a softmax of the entailment score vs. the contradiction score.`,name:"multi_label"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/zero_shot_classification.py#L139",returnDescription:` <p>Each result comes as a dictionary with the following keys:</p> <ul> <li><strong>sequence</strong> (<code>str</code>) \u2014 The sequence for which this is the output.</li> <li><strong>labels</strong> (<code>List[str]</code>) \u2014 The labels sorted by order of likelihood.</li> <li><strong>scores</strong> (<code>List[float]</code>) \u2014 The probabilities for each of the labels.</li> </ul> `,returnType:` <p>A <code>dict</code> or a list of <code>dict</code></p> `}}),ni=new C({}),oi=new P({props:{name:"class transformers.ZeroShotImageClassificationPipeline",anchor:"transformers.ZeroShotImageClassificationPipeline",parameters:[{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.ZeroShotImageClassificationPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.ZeroShotImageClassificationPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.ZeroShotImageClassificationPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.ZeroShotImageClassificationPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.ZeroShotImageClassificationPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.ZeroShotImageClassificationPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.ZeroShotImageClassificationPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.ZeroShotImageClassificationPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.ZeroShotImageClassificationPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.`,name:"device"},{anchor:"transformers.ZeroShotImageClassificationPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/zero_shot_image_classification.py#L31"}}),ai=new P({props:{name:"__call__",anchor:"transformers.ZeroShotImageClassificationPipeline.__call__",parameters:[{name:"images",val:": typing.Union[str, typing.List[str], ForwardRef('Image'), typing.List[ForwardRef('Image')]]"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.ZeroShotImageClassificationPipeline.__call__.images",description:`<strong>images</strong> (<code>str</code>, <code>List[str]</code>, <code>PIL.Image</code> or <code>List[PIL.Image]</code>) &#x2014; The pipeline handles three types of images:</p> <ul> <li>A string containing a http link pointing to an image</li> <li>A string containing a local path to an image</li> <li>An image loaded in PIL directly</li> </ul>`,name:"images"},{anchor:"transformers.ZeroShotImageClassificationPipeline.__call__.candidate_labels",description:`<strong>candidate_labels</strong> (<code>List[str]</code>) &#x2014; The candidate labels for this image`,name:"candidate_labels"},{anchor:"transformers.ZeroShotImageClassificationPipeline.__call__.hypothesis_template",description:`<strong>hypothesis_template</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;This is a photo of {}&quot;</code>) &#x2014; The sentence used in cunjunction with <em>candidate_labels</em> to attempt the image classification by replacing the placeholder with the candidate_labels. Then likelihood is estimated by using logits_per_image`,name:"hypothesis_template"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/zero_shot_image_classification.py#L50",returnDescription:` <p>A list of dictionaries containing result, one dictionnary per proposed label. The dictionaries contain the following keys:</p> <ul> <li><strong>label</strong> (<code>str</code>) \u2014 The label identified by the model. It is one of the suggested <code>candidate_label</code>.</li> <li><strong>score</strong> (<code>float</code>) \u2014 The score attributed by the model for that label (between 0 and 1).</li> </ul> `}}),ii=new C({}),li=new P({props:{name:"class transformers.ZeroShotObjectDetectionPipeline",anchor:"transformers.ZeroShotObjectDetectionPipeline",parameters:[{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.ZeroShotObjectDetectionPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.ZeroShotObjectDetectionPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.ZeroShotObjectDetectionPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.ZeroShotObjectDetectionPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.ZeroShotObjectDetectionPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.ZeroShotObjectDetectionPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.ZeroShotObjectDetectionPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.ZeroShotObjectDetectionPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.ZeroShotObjectDetectionPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.`,name:"device"},{anchor:"transformers.ZeroShotObjectDetectionPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/zero_shot_object_detection.py#L31"}}),pi=new P({props:{name:"__call__",anchor:"transformers.ZeroShotObjectDetectionPipeline.__call__",parameters:[{name:"images",val:": typing.Union[str, typing.List[str], ForwardRef('Image.Image'), typing.List[ForwardRef('Image.Image')]]"},{name:"text_queries",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]]] = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.ZeroShotObjectDetectionPipeline.__call__.images",description:`<strong>images</strong> (<code>str</code>, <code>List[str]</code>, <code>PIL.Image</code> or <code>List[PIL.Image]</code>) &#x2014; The pipeline handles three types of images:</p> <ul> <li>A string containing an http url pointing to an image</li> <li>A string containing a local path to an image</li> <li>An image loaded in PIL directly</li> </ul>`,name:"images"},{anchor:"transformers.ZeroShotObjectDetectionPipeline.__call__.text_queries",description:"<strong>text_queries</strong> (<code>str</code> or <code>List[str]</code> or <code>List[List[str]]</code>) &#x2014; Text queries to query the target image with.",name:"text_queries"},{anchor:"transformers.ZeroShotObjectDetectionPipeline.__call__.If",description:"<strong>If</strong> given multiple images, <code>text_queries</code> should be provided as a list of lists, where each nested list &#x2014;",name:"If"},{anchor:"transformers.ZeroShotObjectDetectionPipeline.__call__.contains",description:"<strong>contains</strong> the text queries for the corresponding image. &#x2014;",name:"contains"},{anchor:"transformers.ZeroShotObjectDetectionPipeline.__call__.threshold",description:`<strong>threshold</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The probability necessary to make a prediction.`,name:"threshold"},{anchor:"transformers.ZeroShotObjectDetectionPipeline.__call__.top_k",description:`<strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to None) &#x2014; The number of top predictions that will be returned by the pipeline. If the provided number is <code>None</code> or higher than the number of predictions available, it will default to the number of predictions.`,name:"top_k"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/zero_shot_object_detection.py#L52",returnDescription:` <p>A list of lists containing prediction results, one list per input image. Each list contains dictionaries with the following keys:</p> <ul> <li><strong>label</strong> (<code>str</code>) \u2014 Text query corresponding to the found object.</li> <li><strong>score</strong> (<code>float</code>) \u2014 Score corresponding to the object (between 0 and 1).</li> <li><strong>box</strong> (<code>Dict[str,int]</code>) \u2014 Bounding box of the detected object in image\u2019s original size. It is a dictionary with <code>x_min</code>, <code>x_max</code>, <code>y_min</code>, <code>y_max</code> keys.</li> </ul> `}}),mi=new C({}),fi=new P({props:{name:"class transformers.Pipeline",anchor:"transformers.Pipeline",parameters:[{name:"model",val:": typing.Union[ForwardRef('PreTrainedModel'), ForwardRef('TFPreTrainedModel')]"},{name:"tokenizer",val:": typing.Optional[transformers.tokenization_utils.PreTrainedTokenizer] = None"},{name:"feature_extractor",val:": typing.Optional[ForwardRef('SequenceFeatureExtractor')] = None"},{name:"modelcard",val:": typing.Optional[transformers.modelcard.ModelCard] = None"},{name:"framework",val:": typing.Optional[str] = None"},{name:"task",val:": str = ''"},{name:"args_parser",val:": ArgumentHandler = None"},{name:"device",val:": typing.Union[int, str, ForwardRef('torch.device')] = -1"},{name:"binary_output",val:": bool = False"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.Pipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.Pipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.Pipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.Pipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.Pipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.Pipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.Pipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.Pipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.Pipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.`,name:"device"},{anchor:"transformers.Pipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L722"}}),hi=new P({props:{name:"check_model_type",anchor:"transformers.Pipeline.check_model_type",parameters:[{name:"supported_models",val:": typing.Union[typing.List[str], dict]"}],parametersDescription:[{anchor:"transformers.Pipeline.check_model_type.supported_models",description:`<strong>supported_models</strong> (<code>List[str]</code> or <code>dict</code>) &#x2014; The list of models supported by the pipeline, or a dictionary with model class values.`,name:"supported_models"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L907"}}),ui=new P({props:{name:"device_placement",anchor:"transformers.Pipeline.device_placement",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L847",returnDescription:` <p>Context manager</p> `}}),Ro=new hn({props:{anchor:"transformers.Pipeline.device_placement.example",$$slots:{default:[a7]},$$scope:{ctx:z}}}),gi=new P({props:{name:"ensure_tensor_on_device",anchor:"transformers.Pipeline.ensure_tensor_on_device",parameters:[{name:"**inputs",val:""}],parametersDescription:[{anchor:"transformers.Pipeline.ensure_tensor_on_device.inputs",description:`<strong>inputs</strong> (keyword arguments that should be <code>torch.Tensor</code>, the rest is ignored) &#x2014; The tensors to place on <code>self.device</code>.`,name:"inputs"},{anchor:"transformers.Pipeline.ensure_tensor_on_device.Recursive",description:"<strong>Recursive</strong> on lists <strong>only</strong>. &#x2014;",name:"Recursive"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L873",returnDescription:` <p>The same as <code>inputs</code> but on the proper device.</p> `,returnType:` <p><code>Dict[str, torch.Tensor]</code></p> `}}),_i=new P({props:{name:"postprocess",anchor:"transformers.Pipeline.postprocess",parameters:[{name:"model_outputs",val:": ModelOutput"},{name:"**postprocess_parameters",val:": typing.Dict"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L964"}}),vi=new P({props:{name:"predict",anchor:"transformers.Pipeline.predict",parameters:[{name:"X",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L841"}}),Ti=new P({props:{name:"preprocess",anchor:"transformers.Pipeline.preprocess",parameters:[{name:"input_",val:": typing.Any"},{name:"**preprocess_parameters",val:": typing.Dict"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L943"}}),ki=new P({props:{name:"save_pretrained",anchor:"transformers.Pipeline.save_pretrained",parameters:[{name:"save_directory",val:": str"}],parametersDescription:[{anchor:"transformers.Pipeline.save_pretrained.save_directory",description:`<strong>save_directory</strong> (<code>str</code>) &#x2014; A path to the directory where to saved. It will be created if it doesn&#x2019;t exist.`,name:"save_directory"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L790"}}),Pi=new P({props:{name:"transform",anchor:"transformers.Pipeline.transform",parameters:[{name:"X",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L835"}}),{c(){T=o("meta"),$=l(),y=o("h1"),w=o("a"),x=o("span"),h(f.$$.fragment),k=l(),Ee=o("span"),Vb=a("Pipelines"),Qh=l(),gn=o("p"),Hb=a(`The pipelines are a great and easy way to use models for inference. These pipelines are objects that abstract most of the complex code from the library, offering a simple API dedicated to several tasks, including Named Entity Recognition, Masked Language Modeling, Sentiment Analysis, Feature Extraction and Question Answering. See the `),Ci=o("a"),Wb=a("task summary"),Zb=a(" for examples of use."),Rh=l(),Di=o("p"),Bb=a("There are two categories of pipeline abstractions to be aware about:"),Vh=l(),_n=o("ul"),Fd=o("li"),rr=o("p"),Yb=a("The "),zi=o("a"),Xb=a("pipeline()"),Kb=a(" which is the most powerful object encapsulating all other pipelines."),Jb=l(),sr=o("li"),Ld=o("p"),ev=a("The other task-specific pipelines:"),tv=l(),E=o("ul"),Od=o("li"),Ii=o("a"),nv=a("AudioClassificationPipeline"),ov=l(),Ud=o("li"),ji=o("a"),rv=a("AutomaticSpeechRecognitionPipeline"),sv=l(),Nd=o("li"),Si=o("a"),av=a("ConversationalPipeline"),iv=l(),Gd=o("li"),Mi=o("a"),lv=a("DocumentQuestionAnsweringPipeline"),dv=l(),Qd=o("li"),Fi=o("a"),cv=a("FeatureExtractionPipeline"),pv=l(),Rd=o("li"),Li=o("a"),mv=a("FillMaskPipeline"),fv=l(),Vd=o("li"),Oi=o("a"),hv=a("ImageClassificationPipeline"),uv=l(),Hd=o("li"),Ui=o("a"),gv=a("ImageSegmentationPipeline"),_v=l(),Wd=o("li"),Ni=o("a"),bv=a("ImageToTextPipeline"),vv=l(),Zd=o("li"),Gi=o("a"),wv=a("ObjectDetectionPipeline"),Tv=l(),Bd=o("li"),Qi=o("a"),kv=a("QuestionAnsweringPipeline"),Pv=l(),Yd=o("li"),Ri=o("a"),yv=a("SummarizationPipeline"),xv=l(),Xd=o("li"),Vi=o("a"),$v=a("TableQuestionAnsweringPipeline"),Ev=l(),Kd=o("li"),Hi=o("a"),qv=a("TextClassificationPipeline"),Av=l(),Jd=o("li"),Wi=o("a"),Cv=a("TextGenerationPipeline"),Dv=l(),ec=o("li"),Zi=o("a"),zv=a("Text2TextGenerationPipeline"),Iv=l(),tc=o("li"),Bi=o("a"),jv=a("TokenClassificationPipeline"),Sv=l(),nc=o("li"),Yi=o("a"),Mv=a("TranslationPipeline"),Fv=l(),oc=o("li"),Xi=o("a"),Lv=a("VisualQuestionAnsweringPipeline"),Ov=l(),rc=o("li"),Ki=o("a"),Uv=a("ZeroShotClassificationPipeline"),Nv=l(),sc=o("li"),Ji=o("a"),Gv=a("ZeroShotImageClassificationPipeline"),Qv=l(),ac=o("li"),el=o("a"),Rv=a("ZeroShotObjectDetectionPipeline"),Hh=l(),st=o("h2"),bn=o("a"),ic=o("span"),h(ar.$$.fragment),Vv=l(),lc=o("span"),Hv=a("The pipeline abstraction"),Wh=l(),vn=o("p"),Wv=a("The "),dc=o("em"),Zv=a("pipeline"),Bv=a(` abstraction is a wrapper around all the other available pipelines. It is instantiated as any other pipeline but can provide additional quality of life.`),Zh=l(),tl=o("p"),Yv=a("Simple call on one item:"),Bh=l(),h(ir.$$.fragment),Yh=l(),wn=o("p"),Xv=a("If you want to use a specific model from the "),lr=o("a"),Kv=a("hub"),Jv=a(` you can ignore the task if the model on the hub already defines it:`),Xh=l(),h(dr.$$.fragment),Kh=l(),Tn=o("p"),ew=a("To call a pipeline on many items, you can either call with a "),cc=o("em"),tw=a("list"),nw=a("."),Jh=l(),h(cr.$$.fragment),eu=l(),kn=o("p"),ow=a("To iterate of full datasets it is recommended to use a "),pc=o("code"),rw=a("dataset"),sw=a(` directly. This means you don\u2019t need to allocate the whole dataset at once, nor do you need to do batching yourself. This should work just as fast as custom loops on GPU. If it doesn\u2019t don\u2019t hesitate to create an issue.`),tu=l(),h(pr.$$.fragment),nu=l(),nl=o("p"),aw=a("For ease of use, a generator is also possible:"),ou=l(),h(mr.$$.fragment),ru=l(),X=o("div"),h(fr.$$.fragment),iw=l(),hr=o("p"),lw=a("Utility factory method to build a "),ol=o("a"),dw=a("Pipeline"),cw=a("."),pw=l(),mc=o("p"),mw=a("Pipelines are made of:"),fw=l(),at=o("ul"),ur=o("li"),hw=a("A "),rl=o("a"),uw=a("tokenizer"),gw=a(" in charge of mapping raw textual input to token."),_w=l(),gr=o("li"),bw=a("A "),sl=o("a"),vw=a("model"),ww=a(" to make predictions from the inputs."),Tw=l(),fc=o("li"),kw=a("Some (optional) post processing for enhancing model\u2019s output."),Pw=l(),h(Pn.$$.fragment),su=l(),it=o("h2"),yn=o("a"),hc=o("span"),h(_r.$$.fragment),yw=l(),uc=o("span"),xw=a("Pipeline batching"),au=l(),je=o("p"),$w=a(`All pipelines can use batching. This will work whenever the pipeline uses its streaming ability (so when passing lists or `),gc=o("code"),Ew=a("Dataset"),qw=a(" or "),_c=o("code"),Aw=a("generator"),Cw=a(")."),iu=l(),h(br.$$.fragment),lu=l(),h(xn.$$.fragment),du=l(),h(vr.$$.fragment),cu=l(),h(wr.$$.fragment),pu=l(),al=o("p"),Dw=a("Example where it\u2019s most a slowdown:"),mu=l(),h(Tr.$$.fragment),fu=l(),$n=o("p"),zw=a("This is a occasional very long sentence compared to the other. In that case, the "),bc=o("strong"),Iw=a("whole"),jw=a(` batch will need to be 400 tokens long, so the whole batch will be [64, 400] instead of [64, 4], leading to the high slowdown. Even worse, on bigger batches, the program simply crashes.`),hu=l(),h(kr.$$.fragment),uu=l(),il=o("p"),Sw=a(`There are no good (general) solutions for this problem, and your mileage may vary depending on your use cases. Rule of thumb:`),gu=l(),ll=o("p"),Mw=a("For users, a rule of thumb is:"),_u=l(),ce=o("ul"),vc=o("li"),wc=o("p"),Tc=o("strong"),Fw=a(`Measure performance on your load, with your hardware. Measure, measure, and keep measuring. Real numbers are the only way to go.`),Lw=l(),kc=o("li"),Pc=o("p"),Ow=a("If you are latency constrained (live product doing inference), don\u2019t batch"),Uw=l(),yc=o("li"),xc=o("p"),Nw=a("If you are using CPU, don\u2019t batch."),Gw=l(),Pr=o("li"),$c=o("p"),Qw=a("If you are using throughput (you want to run your model on a bunch of static data), on GPU, then:"),Rw=l(),lt=o("ul"),Ec=o("li"),Vw=a(`If you have no clue about the size of the sequence_length (\u201Cnatural\u201D data), by default don\u2019t batch, measure and try tentatively to add it, add OOM checks to recover when it will fail (and it will at some point if you don\u2019t control the sequence_length.)`),Hw=l(),qc=o("li"),Ww=a(`If your sequence_length is super regular, then batching is more likely to be VERY interesting, measure and push it until you get OOMs.`),Zw=l(),Ac=o("li"),Bw=a("The larger the GPU the more likely batching is going to be more interesting"),Yw=l(),Cc=o("li"),Dc=o("p"),Xw=a("As soon as you enable batching, make sure you can handle OOMs nicely."),bu=l(),dt=o("h2"),En=o("a"),zc=o("span"),h(yr.$$.fragment),Kw=l(),Ic=o("span"),Jw=a("Pipeline chunk batching"),vu=l(),qe=o("p"),jc=o("code"),e1=a("zero-shot-classification"),t1=a(" and "),Sc=o("code"),n1=a("question-answering"),o1=a(` are slightly specific in the sense, that a single input might yield multiple forward pass of a model. Under normal circumstances, this would yield issues with `),Mc=o("code"),r1=a("batch_size"),s1=a(" argument."),wu=l(),Se=o("p"),a1=a("In order to circumvent this issue, both of these pipelines are a bit specific, they are "),Fc=o("code"),i1=a("ChunkPipeline"),l1=a(` instead of regular `),Lc=o("code"),d1=a("Pipeline"),c1=a(". In short:"),Tu=l(),h(xr.$$.fragment),ku=l(),dl=o("p"),p1=a("Now becomes:"),Pu=l(),h($r.$$.fragment),yu=l(),cl=o("p"),m1=a(`This should be very transparent to your code because the pipelines are used in the same way.`),xu=l(),qn=o("p"),f1=a(`This is a simplified view, since the pipeline can handle automatically the batch to ! Meaning you don\u2019t have to care about how many forward passes you inputs are actually going to trigger, you can optimize the `),Oc=o("code"),h1=a("batch_size"),u1=a(` independently of the inputs. The caveats from the previous section still apply.`),$u=l(),ct=o("h2"),An=o("a"),Uc=o("span"),h(Er.$$.fragment),g1=l(),Nc=o("span"),_1=a("Pipeline custom code"),Eu=l(),pl=o("p"),b1=a("If you want to override a specific pipeline."),qu=l(),Cn=o("p"),v1=a(`Don\u2019t hesitate to create an issue for your task at hand, the goal of the pipeline is to be easy to use and support most cases, so `),Gc=o("code"),w1=a("transformers"),T1=a(" could maybe support your use case."),Au=l(),ml=o("p"),k1=a("If you want to try simply you can:"),Cu=l(),fl=o("ul"),Qc=o("li"),P1=a("Subclass your pipeline of choice"),Du=l(),h(qr.$$.fragment),zu=l(),hl=o("p"),y1=a("That should enable you to do all the custom code you want."),Iu=l(),pt=o("h2"),Dn=o("a"),Rc=o("span"),h(Ar.$$.fragment),x1=l(),Vc=o("span"),$1=a("Implementing a pipeline"),ju=l(),ul=o("p"),gl=o("a"),E1=a("Implementing a new pipeline"),Su=l(),mt=o("h2"),zn=o("a"),Hc=o("span"),h(Cr.$$.fragment),q1=l(),Wc=o("span"),A1=a("The task specific pipelines"),Mu=l(),ft=o("h3"),In=o("a"),Zc=o("span"),h(Dr.$$.fragment),C1=l(),Bc=o("span"),D1=a("AudioClassificationPipeline"),Fu=l(),K=o("div"),h(zr.$$.fragment),z1=l(),Ir=o("p"),I1=a("Audio classification pipeline using any "),Yc=o("code"),j1=a("AutoModelForAudioClassification"),S1=a(`. This pipeline predicts the class of a raw waveform or an audio file. In case of an audio file, ffmpeg should be installed to support multiple audio formats.`),M1=l(),ht=o("p"),F1=a("This pipeline can currently be loaded from "),_l=o("a"),L1=a("pipeline()"),O1=a(` using the following task identifier: `),Xc=o("code"),U1=a('"audio-classification"'),N1=a("."),G1=l(),jr=o("p"),Q1=a(`See the list of available models on `),Sr=o("a"),R1=a("huggingface.co/models"),V1=a("."),H1=l(),jn=o("div"),h(Mr.$$.fragment),W1=l(),Fr=o("p"),Z1=a("Classify the sequence(s) given as inputs. See the "),bl=o("a"),B1=a("AutomaticSpeechRecognitionPipeline"),Y1=a(` documentation for more information.`),Lu=l(),ut=o("h3"),Sn=o("a"),Kc=o("span"),h(Lr.$$.fragment),X1=l(),Jc=o("span"),K1=a("AutomaticSpeechRecognitionPipeline"),Ou=l(),ge=o("div"),h(Or.$$.fragment),J1=l(),ep=o("p"),e2=a("Pipeline that aims at extracting spoken text contained within some audio."),t2=l(),tp=o("p"),n2=a(`The input can be either a raw waveform or a audio file. In case of the audio file, ffmpeg should be installed for to support multiple audio formats`),o2=l(),Mn=o("div"),h(Ur.$$.fragment),r2=l(),Nr=o("p"),s2=a("Classify the sequence(s) given as inputs. See the "),vl=o("a"),a2=a("AutomaticSpeechRecognitionPipeline"),i2=a(` documentation for more information.`),Uu=l(),gt=o("h3"),Fn=o("a"),np=o("span"),h(Gr.$$.fragment),l2=l(),op=o("span"),d2=a("ConversationalPipeline"),Nu=l(),M=o("div"),h(Qr.$$.fragment),c2=l(),Ae=o("p"),p2=a(`Utility class containing a conversation and its history. This class is meant to be used as an input to the `),wl=o("a"),m2=a("ConversationalPipeline"),f2=a(`. The conversation contains a number of utility function to manage the addition of new user input and generated model responses. A conversation needs to contain an unprocessed user input before being passed to the `),Tl=o("a"),h2=a("ConversationalPipeline"),u2=a(`. This user input is either created when the class is instantiated, or by calling `),rp=o("code"),g2=a('conversational_pipeline.append_response("input")'),_2=a(" after a conversation turn."),b2=l(),h(Ln.$$.fragment),v2=l(),On=o("div"),h(Rr.$$.fragment),w2=l(),Vr=o("p"),T2=a("Add a user input to the conversation for the next round. This populates the internal "),sp=o("code"),k2=a("new_user_input"),P2=a(" field."),y2=l(),Un=o("div"),h(Hr.$$.fragment),x2=l(),ap=o("p"),$2=a("Append a response to the list of generated responses."),E2=l(),Me=o("div"),h(Wr.$$.fragment),q2=l(),ip=o("p"),A2=a("Iterates over all blobs of the conversation."),C2=l(),_e=o("p"),D2=a("Returns: Iterator of (is_user, text_chunk) in chronological order of the conversation. "),lp=o("code"),z2=a("is_user"),I2=a(" is a "),dp=o("code"),j2=a("bool"),S2=a(`, `),cp=o("code"),M2=a("text_chunks"),F2=a(" is a "),pp=o("code"),L2=a("str"),O2=a("."),U2=l(),Nn=o("div"),h(Zr.$$.fragment),N2=l(),Ce=o("p"),G2=a("Mark the conversation as processed (moves the content of "),mp=o("code"),Q2=a("new_user_input"),R2=a(" to "),fp=o("code"),V2=a("past_user_inputs"),H2=a(`) and empties the `),hp=o("code"),W2=a("new_user_input"),Z2=a(" field."),Gu=l(),G=o("div"),h(Br.$$.fragment),B2=l(),up=o("p"),Y2=a("Multi-turn conversational pipeline."),X2=l(),_t=o("p"),K2=a("This conversational pipeline can currently be loaded from "),kl=o("a"),J2=a("pipeline()"),eT=a(` using the following task identifier: `),gp=o("code"),tT=a('"conversational"'),nT=a("."),oT=l(),be=o("p"),rT=a(`The models that this pipeline can use are models that have been fine-tuned on a multi-turn conversational task, currently: `),_p=o("em"),sT=a("\u2018microsoft/DialoGPT-small\u2019"),aT=a(", "),bp=o("em"),iT=a("\u2018microsoft/DialoGPT-medium\u2019"),lT=a(", "),vp=o("em"),dT=a("\u2018microsoft/DialoGPT-large\u2019"),cT=a(`. See the up-to-date list of available models on `),Yr=o("a"),pT=a("huggingface.co/models"),mT=a("."),fT=l(),h(Gn.$$.fragment),hT=l(),Qn=o("div"),h(Xr.$$.fragment),uT=l(),wp=o("p"),gT=a("Generate responses for the conversation(s) given as inputs."),Qu=l(),bt=o("h3"),Rn=o("a"),Tp=o("span"),h(Kr.$$.fragment),_T=l(),kp=o("span"),bT=a("DocumentQuestionAnsweringPipeline"),Ru=l(),J=o("div"),h(Jr.$$.fragment),vT=l(),es=o("p"),wT=a("Document Question Answering pipeline using any "),Pp=o("code"),TT=a("AutoModelForDocumentQuestionAnswering"),kT=a(`. The inputs/outputs are similar to the (extractive) question answering pipeline; however, the pipeline takes an image (and optional OCR\u2019d words/boxes) as input instead of text context.`),PT=l(),vt=o("p"),yT=a("This document question answering pipeline can currently be loaded from "),Pl=o("a"),xT=a("pipeline()"),$T=a(` using the following task identifier: `),yp=o("code"),ET=a('"document-question-answering"'),qT=a("."),AT=l(),ts=o("p"),CT=a(`The models that this pipeline can use are models that have been fine-tuned on a document question answering task. See the up-to-date list of available models on `),ns=o("a"),DT=a("huggingface.co/models"),zT=a("."),IT=l(),we=o("div"),h(os.$$.fragment),jT=l(),rs=o("p"),ST=a(`Answer the question(s) given as inputs by using the document(s). A document is defined as an image and an optional list of (word, box) tuples which represent the text in the document. If the `),xp=o("code"),MT=a("word_boxes"),FT=a(` are not provided, it will use the Tesseract OCR engine (if available) to extract the words and boxes automatically for LayoutLM-like models which require them as input. For Donut, no OCR is run.`),LT=l(),$p=o("p"),OT=a("You can invoke the pipeline several ways:"),UT=l(),De=o("ul"),Ep=o("li"),qp=o("code"),NT=a("pipeline(image=image, question=question)"),GT=l(),Ap=o("li"),Cp=o("code"),QT=a("pipeline(image=image, question=question, word_boxes=word_boxes)"),RT=l(),Dp=o("li"),zp=o("code"),VT=a('pipeline([{"image": image, "question": question}])'),HT=l(),Ip=o("li"),jp=o("code"),WT=a('pipeline([{"image": image, "question": question, "word_boxes": word_boxes}])'),Vu=l(),wt=o("h3"),Vn=o("a"),Sp=o("span"),h(ss.$$.fragment),ZT=l(),Mp=o("span"),BT=a("FeatureExtractionPipeline"),Hu=l(),ee=o("div"),h(as.$$.fragment),YT=l(),Fp=o("p"),XT=a(`Feature extraction pipeline using no model head. This pipeline extracts the hidden states from the base transformer, which can be used as features in downstream tasks.`),KT=l(),Tt=o("p"),JT=a("This feature extraction pipeline can currently be loaded from "),yl=o("a"),ek=a("pipeline()"),tk=a(` using the task identifier: `),Lp=o("code"),nk=a('"feature-extraction"'),ok=a("."),rk=l(),is=o("p"),sk=a(`All models may be used for this pipeline. See a list of all models, including community-contributed models on `),ls=o("a"),ak=a("huggingface.co/models"),ik=a("."),lk=l(),Hn=o("div"),h(ds.$$.fragment),dk=l(),Op=o("p"),ck=a("Extract the features of the input(s)."),Wu=l(),kt=o("h3"),Wn=o("a"),Up=o("span"),h(cs.$$.fragment),pk=l(),Np=o("span"),mk=a("FillMaskPipeline"),Zu=l(),Q=o("div"),h(ps.$$.fragment),fk=l(),Pt=o("p"),hk=a("Masked language modeling prediction pipeline using any "),Gp=o("code"),uk=a("ModelWithLMHead"),gk=a(". See the "),xl=o("a"),_k=a(`masked language modeling examples`),bk=a(" for more information."),vk=l(),yt=o("p"),wk=a("This mask filling pipeline can currently be loaded from "),$l=o("a"),Tk=a("pipeline()"),kk=a(` using the following task identifier: `),Qp=o("code"),Pk=a('"fill-mask"'),yk=a("."),xk=l(),ms=o("p"),$k=a(`The models that this pipeline can use are models that have been trained with a masked language modeling objective, which includes the bi-directional models in the library. See the up-to-date list of available models on `),fs=o("a"),Ek=a("huggingface.co/models"),qk=a("."),Ak=l(),h(Zn.$$.fragment),Ck=l(),Bn=o("div"),h(hs.$$.fragment),Dk=l(),Rp=o("p"),zk=a("Fill the masked token in the text(s) given as inputs."),Bu=l(),xt=o("h3"),Yn=o("a"),Vp=o("span"),h(us.$$.fragment),Ik=l(),Hp=o("span"),jk=a("ImageClassificationPipeline"),Yu=l(),te=o("div"),h(gs.$$.fragment),Sk=l(),_s=o("p"),Mk=a("Image classification pipeline using any "),Wp=o("code"),Fk=a("AutoModelForImageClassification"),Lk=a(`. This pipeline predicts the class of an image.`),Ok=l(),$t=o("p"),Uk=a("This image classification pipeline can currently be loaded from "),El=o("a"),Nk=a("pipeline()"),Gk=a(` using the following task identifier: `),Zp=o("code"),Qk=a('"image-classification"'),Rk=a("."),Vk=l(),bs=o("p"),Hk=a(`See the list of available models on `),vs=o("a"),Wk=a("huggingface.co/models"),Zk=a("."),Bk=l(),Xn=o("div"),h(ws.$$.fragment),Yk=l(),Bp=o("p"),Xk=a("Assign labels to the image(s) passed as inputs."),Xu=l(),Et=o("h3"),Kn=o("a"),Yp=o("span"),h(Ts.$$.fragment),Kk=l(),Xp=o("span"),Jk=a("ImageSegmentationPipeline"),Ku=l(),ne=o("div"),h(ks.$$.fragment),eP=l(),Ps=o("p"),tP=a("Image segmentation pipeline using any "),Kp=o("code"),nP=a("AutoModelForXXXSegmentation"),oP=a(`. This pipeline predicts masks of objects and their classes.`),rP=l(),qt=o("p"),sP=a("This image segmentation pipeline can currently be loaded from "),ql=o("a"),aP=a("pipeline()"),iP=a(` using the following task identifier: `),Jp=o("code"),lP=a('"image-segmentation"'),dP=a("."),cP=l(),ys=o("p"),pP=a(`See the list of available models on `),xs=o("a"),mP=a("huggingface.co/models"),fP=a("."),hP=l(),Jn=o("div"),h($s.$$.fragment),uP=l(),em=o("p"),gP=a("Perform segmentation (detect masks & classes) in the image(s) passed as inputs."),Ju=l(),At=o("h3"),eo=o("a"),tm=o("span"),h(Es.$$.fragment),_P=l(),nm=o("span"),bP=a("ImageToTextPipeline"),eg=l(),oe=o("div"),h(qs.$$.fragment),vP=l(),As=o("p"),wP=a("Image To Text pipeline using a "),om=o("code"),TP=a("AutoModelForVision2Seq"),kP=a(". This pipeline predicts a caption for a given image."),PP=l(),rm=o("p"),yP=a(`This image to text pipeline can currently be loaded from pipeline() using the following task identifier: \u201Cimage-to-text\u201D.`),xP=l(),Cs=o("p"),$P=a(`See the list of available models on `),Ds=o("a"),EP=a("huggingface.co/models"),qP=a("."),AP=l(),to=o("div"),h(zs.$$.fragment),CP=l(),sm=o("p"),DP=a("Assign labels to the image(s) passed as inputs."),tg=l(),Ct=o("h3"),no=o("a"),am=o("span"),h(Is.$$.fragment),zP=l(),im=o("span"),IP=a("NerPipeline"),ng=l(),S=o("div"),h(js.$$.fragment),jP=l(),Dt=o("p"),SP=a("Named Entity Recognition pipeline using any "),lm=o("code"),MP=a("ModelForTokenClassification"),FP=a(". See the "),Al=o("a"),LP=a(`named entity recognition examples`),OP=a(" for more information."),UP=l(),zt=o("p"),NP=a("This token recognition pipeline can currently be loaded from "),Cl=o("a"),GP=a("pipeline()"),QP=a(` using the following task identifier: `),dm=o("code"),RP=a('"ner"'),VP=a(" (for predicting the classes of tokens in a sequence: person, organisation, location or miscellaneous)."),HP=l(),Ss=o("p"),WP=a(`The models that this pipeline can use are models that have been fine-tuned on a token classification task. See the up-to-date list of available models on `),Ms=o("a"),ZP=a("huggingface.co/models"),BP=a("."),YP=l(),Fe=o("div"),h(Fs.$$.fragment),XP=l(),cm=o("p"),KP=a("Override tokens from a given word that disagree to force agreement on word boundaries."),JP=l(),pm=o("p"),ey=a(`Example: micro|soft| com|pany| B-ENT I-NAME I-ENT I-ENT will be rewritten with first strategy as microsoft| company| B-ENT I-ENT`),ty=l(),oo=o("div"),h(Ls.$$.fragment),ny=l(),mm=o("p"),oy=a("Fuse various numpy arrays into dicts with all the information needed for aggregation"),ry=l(),ro=o("div"),h(Os.$$.fragment),sy=l(),fm=o("p"),ay=a("Find and group together the adjacent tokens with the same entity predicted."),iy=l(),so=o("div"),h(Us.$$.fragment),ly=l(),hm=o("p"),dy=a("Group together the adjacent tokens with the same entity predicted."),og=l(),ao=o("p"),cy=a("See "),Dl=o("a"),py=a("TokenClassificationPipeline"),my=a(" for all details."),rg=l(),It=o("h3"),io=o("a"),um=o("span"),h(Ns.$$.fragment),fy=l(),gm=o("span"),hy=a("ObjectDetectionPipeline"),sg=l(),re=o("div"),h(Gs.$$.fragment),uy=l(),Qs=o("p"),gy=a("Object detection pipeline using any "),_m=o("code"),_y=a("AutoModelForObjectDetection"),by=a(`. This pipeline predicts bounding boxes of objects and their classes.`),vy=l(),jt=o("p"),wy=a("This object detection pipeline can currently be loaded from "),zl=o("a"),Ty=a("pipeline()"),ky=a(` using the following task identifier: `),bm=o("code"),Py=a('"object-detection"'),yy=a("."),xy=l(),Rs=o("p"),$y=a("See the list of available models on "),Vs=o("a"),Ey=a("huggingface.co/models"),qy=a("."),Ay=l(),lo=o("div"),h(Hs.$$.fragment),Cy=l(),vm=o("p"),Dy=a("Detect objects (bounding boxes & classes) in the image(s) passed as inputs."),ag=l(),St=o("h3"),co=o("a"),wm=o("span"),h(Ws.$$.fragment),zy=l(),Tm=o("span"),Iy=a("QuestionAnsweringPipeline"),ig=l(),F=o("div"),h(Zs.$$.fragment),jy=l(),Mt=o("p"),Sy=a("Question Answering pipeline using any "),km=o("code"),My=a("ModelForQuestionAnswering"),Fy=a(". See the "),Il=o("a"),Ly=a(`question answering examples`),Oy=a(" for more information."),Uy=l(),Ft=o("p"),Ny=a("This question answering pipeline can currently be loaded from "),jl=o("a"),Gy=a("pipeline()"),Qy=a(` using the following task identifier: `),Pm=o("code"),Ry=a('"question-answering"'),Vy=a("."),Hy=l(),Bs=o("p"),Wy=a(`The models that this pipeline can use are models that have been fine-tuned on a question answering task. See the up-to-date list of available models on `),Ys=o("a"),Zy=a("huggingface.co/models"),By=a("."),Yy=l(),po=o("div"),h(Xs.$$.fragment),Xy=l(),ym=o("p"),Ky=a("Answer the question(s) given as inputs by using the context(s)."),Jy=l(),Le=o("div"),h(Ks.$$.fragment),e0=l(),Lt=o("p"),t0=a("QuestionAnsweringPipeline leverages the "),xm=o("code"),n0=a("SquadExample"),o0=a(` internally. This helper method encapsulate all the logic for converting question(s) and context(s) to `),$m=o("code"),r0=a("SquadExample"),s0=a("."),a0=l(),Em=o("p"),i0=a("We currently support extractive question answering."),l0=l(),mo=o("div"),h(Js.$$.fragment),d0=l(),qm=o("p"),c0=a("When decoding from token probabilities, this method maps token indexes to actual word in the initial context."),lg=l(),Ot=o("h3"),fo=o("a"),Am=o("span"),h(ea.$$.fragment),p0=l(),Cm=o("span"),m0=a("SummarizationPipeline"),dg=l(),R=o("div"),h(ta.$$.fragment),f0=l(),Dm=o("p"),h0=a("Summarize news articles and other documents."),u0=l(),Ut=o("p"),g0=a("This summarizing pipeline can currently be loaded from "),Sl=o("a"),_0=a("pipeline()"),b0=a(` using the following task identifier: `),zm=o("code"),v0=a('"summarization"'),w0=a("."),T0=l(),L=o("p"),k0=a(`The models that this pipeline can use are models that have been fine-tuned on a summarization task, which is currently, \u2019`),Im=o("em"),P0=a("bart-large-cnn"),y0=a("\u2019, \u2019"),jm=o("em"),x0=a("t5-small"),$0=a("\u2019, \u2019"),Sm=o("em"),E0=a("t5-base"),q0=a("\u2019, \u2019"),Mm=o("em"),A0=a("t5-large"),C0=a("\u2019, \u2019"),Fm=o("em"),D0=a("t5-3b"),z0=a("\u2019, \u2019"),Lm=o("em"),I0=a("t5-11b"),j0=a(`\u2019. See the up-to-date list of available models on `),na=o("a"),S0=a("huggingface.co/models"),M0=a("."),F0=l(),h(ho.$$.fragment),L0=l(),uo=o("div"),h(oa.$$.fragment),O0=l(),Om=o("p"),U0=a("Summarize the text(s) given as inputs."),cg=l(),Nt=o("h3"),go=o("a"),Um=o("span"),h(ra.$$.fragment),N0=l(),Nm=o("span"),G0=a("TableQuestionAnsweringPipeline"),pg=l(),se=o("div"),h(sa.$$.fragment),Q0=l(),aa=o("p"),R0=a("Table Question Answering pipeline using a "),Gm=o("code"),V0=a("ModelForTableQuestionAnswering"),H0=a(`. This pipeline is only available in PyTorch.`),W0=l(),Gt=o("p"),Z0=a("This tabular question answering pipeline can currently be loaded from "),Ml=o("a"),B0=a("pipeline()"),Y0=a(` using the following task identifier: `),Qm=o("code"),X0=a('"table-question-answering"'),K0=a("."),J0=l(),ia=o("p"),e4=a(`The models that this pipeline can use are models that have been fine-tuned on a tabular question answering task. See the up-to-date list of available models on `),la=o("a"),t4=a("huggingface.co/models"),n4=a("."),o4=l(),U=o("div"),h(da.$$.fragment),r4=l(),Rm=o("p"),s4=a("Answers queries according to a table. The pipeline accepts several types of inputs which are detailed below:"),a4=l(),V=o("ul"),Vm=o("li"),Hm=o("code"),i4=a("pipeline(table, query)"),l4=l(),Wm=o("li"),Zm=o("code"),d4=a("pipeline(table, [query])"),c4=l(),Bm=o("li"),Ym=o("code"),p4=a("pipeline(table=table, query=query)"),m4=l(),Xm=o("li"),Km=o("code"),f4=a("pipeline(table=table, query=[query])"),h4=l(),Jm=o("li"),ef=o("code"),u4=a('pipeline({"table": table, "query": query})'),g4=l(),tf=o("li"),nf=o("code"),_4=a('pipeline({"table": table, "query": [query]})'),b4=l(),of=o("li"),rf=o("code"),v4=a('pipeline([{"table": table, "query": query}, {"table": table, "query": query}])'),w4=l(),ca=o("p"),T4=a("The "),sf=o("code"),k4=a("table"),P4=a(" argument should be a dict or a DataFrame built from that dict, containing the whole table:"),y4=l(),h(_o.$$.fragment),x4=l(),af=o("p"),$4=a("This dictionary can be passed in as such, or can be converted to a pandas DataFrame:"),E4=l(),h(bo.$$.fragment),mg=l(),Qt=o("h3"),vo=o("a"),lf=o("span"),h(pa.$$.fragment),q4=l(),df=o("span"),A4=a("TextClassificationPipeline"),fg=l(),H=o("div"),h(ma.$$.fragment),C4=l(),Rt=o("p"),D4=a("Text classification pipeline using any "),cf=o("code"),z4=a("ModelForSequenceClassification"),I4=a(". See the "),Fl=o("a"),j4=a(`sequence classification examples`),S4=a(" for more information."),M4=l(),Vt=o("p"),F4=a("This text classification pipeline can currently be loaded from "),Ll=o("a"),L4=a("pipeline()"),O4=a(` using the following task identifier: `),pf=o("code"),U4=a('"sentiment-analysis"'),N4=a(" (for classifying sequences according to positive or negative sentiments)."),G4=l(),fa=o("p"),Q4=a("If multiple classification labels are available ("),mf=o("code"),R4=a("model.config.num_labels >= 2"),V4=a(`), the pipeline will run a softmax over the results. If there is a single label, the pipeline will run a sigmoid over the result.`),H4=l(),ha=o("p"),W4=a(`The models that this pipeline can use are models that have been fine-tuned on a sequence classification task. See the up-to-date list of available models on `),ua=o("a"),Z4=a("huggingface.co/models"),B4=a("."),Y4=l(),wo=o("div"),h(ga.$$.fragment),X4=l(),ff=o("p"),K4=a("Classify the text(s) given as inputs."),hg=l(),Ht=o("h3"),To=o("a"),hf=o("span"),h(_a.$$.fragment),J4=l(),uf=o("span"),ex=a("TextGenerationPipeline"),ug=l(),ae=o("div"),h(ba.$$.fragment),tx=l(),va=o("p"),nx=a("Language generation pipeline using any "),gf=o("code"),ox=a("ModelWithLMHead"),rx=a(`. This pipeline predicts the words that will follow a specified text prompt.`),sx=l(),Wt=o("p"),ax=a("This language generation pipeline can currently be loaded from "),Ol=o("a"),ix=a("pipeline()"),lx=a(` using the following task identifier: `),_f=o("code"),dx=a('"text-generation"'),cx=a("."),px=l(),wa=o("p"),mx=a(`The models that this pipeline can use are models that have been trained with an autoregressive language modeling objective, which includes the uni-directional models in the library (e.g. gpt2). See the list of available models on `),Ta=o("a"),fx=a("huggingface.co/models"),hx=a("."),ux=l(),ko=o("div"),h(ka.$$.fragment),gx=l(),bf=o("p"),_x=a("Complete the prompt(s) given as inputs."),gg=l(),Zt=o("h3"),Po=o("a"),vf=o("span"),h(Pa.$$.fragment),bx=l(),wf=o("span"),vx=a("Text2TextGenerationPipeline"),_g=l(),O=o("div"),h(ya.$$.fragment),wx=l(),Tf=o("p"),Tx=a("Pipeline for text to text generation using seq2seq models."),kx=l(),Bt=o("p"),Px=a("This Text2TextGenerationPipeline pipeline can currently be loaded from "),Ul=o("a"),yx=a("pipeline()"),xx=a(` using the following task identifier: `),kf=o("code"),$x=a('"text2text-generation"'),Ex=a("."),qx=l(),xa=o("p"),Ax=a(`The models that this pipeline can use are models that have been fine-tuned on a translation task. See the up-to-date list of available models on `),$a=o("a"),Cx=a("huggingface.co/models"),Dx=a("."),zx=l(),h(yo.$$.fragment),Ix=l(),xo=o("div"),h(Ea.$$.fragment),jx=l(),Pf=o("p"),Sx=a("Generate the output text(s) using text(s) given as inputs."),Mx=l(),$o=o("div"),h(qa.$$.fragment),Fx=l(),yf=o("p"),Lx=a("Checks whether there might be something wrong with given input with regard to the model."),bg=l(),Yt=o("h3"),Eo=o("a"),xf=o("span"),h(Aa.$$.fragment),Ox=l(),$f=o("span"),Ux=a("TokenClassificationPipeline"),vg=l(),I=o("div"),h(Ca.$$.fragment),Nx=l(),Xt=o("p"),Gx=a("Named Entity Recognition pipeline using any "),Ef=o("code"),Qx=a("ModelForTokenClassification"),Rx=a(". See the "),Nl=o("a"),Vx=a(`named entity recognition examples`),Hx=a(" for more information."),Wx=l(),Kt=o("p"),Zx=a("This token recognition pipeline can currently be loaded from "),Gl=o("a"),Bx=a("pipeline()"),Yx=a(` using the following task identifier: `),qf=o("code"),Xx=a('"ner"'),Kx=a(" (for predicting the classes of tokens in a sequence: person, organisation, location or miscellaneous)."),Jx=l(),Da=o("p"),e$=a(`The models that this pipeline can use are models that have been fine-tuned on a token classification task. See the up-to-date list of available models on `),za=o("a"),t$=a("huggingface.co/models"),n$=a("."),o$=l(),qo=o("div"),h(Ia.$$.fragment),r$=l(),Af=o("p"),s$=a("Classify each token of the text(s) given as inputs."),a$=l(),Oe=o("div"),h(ja.$$.fragment),i$=l(),Cf=o("p"),l$=a("Override tokens from a given word that disagree to force agreement on word boundaries."),d$=l(),Df=o("p"),c$=a(`Example: micro|soft| com|pany| B-ENT I-NAME I-ENT I-ENT will be rewritten with first strategy as microsoft| company| B-ENT I-ENT`),p$=l(),Ao=o("div"),h(Sa.$$.fragment),m$=l(),zf=o("p"),f$=a("Fuse various numpy arrays into dicts with all the information needed for aggregation"),h$=l(),Co=o("div"),h(Ma.$$.fragment),u$=l(),If=o("p"),g$=a("Find and group together the adjacent tokens with the same entity predicted."),_$=l(),Do=o("div"),h(Fa.$$.fragment),b$=l(),jf=o("p"),v$=a("Group together the adjacent tokens with the same entity predicted."),wg=l(),Jt=o("h3"),zo=o("a"),Sf=o("span"),h(La.$$.fragment),w$=l(),Mf=o("span"),T$=a("TranslationPipeline"),Tg=l(),W=o("div"),h(Oa.$$.fragment),k$=l(),Ff=o("p"),P$=a("Translates from one language to another."),y$=l(),en=o("p"),x$=a("This translation pipeline can currently be loaded from "),Ql=o("a"),$$=a("pipeline()"),E$=a(` using the following task identifier: `),Lf=o("code"),q$=a('"translation_xx_to_yy"'),A$=a("."),C$=l(),Ua=o("p"),D$=a(`The models that this pipeline can use are models that have been fine-tuned on a translation task. See the up-to-date list of available models on `),Na=o("a"),z$=a("huggingface.co/models"),I$=a("."),j$=l(),h(Io.$$.fragment),S$=l(),jo=o("div"),h(Ga.$$.fragment),M$=l(),Of=o("p"),F$=a("Translate the text(s) given as inputs."),kg=l(),tn=o("h3"),So=o("a"),Uf=o("span"),h(Qa.$$.fragment),L$=l(),Nf=o("span"),O$=a("VisualQuestionAnsweringPipeline"),Pg=l(),ie=o("div"),h(Ra.$$.fragment),U$=l(),Va=o("p"),N$=a("Visual Question Answering pipeline using a "),Gf=o("code"),G$=a("AutoModelForVisualQuestionAnswering"),Q$=a(`. This pipeline is currently only available in PyTorch.`),R$=l(),nn=o("p"),V$=a("This visual question answering pipeline can currently be loaded from "),Rl=o("a"),H$=a("pipeline()"),W$=a(` using the following task identifiers: `),Qf=o("code"),Z$=a('"visual-question-answering", "vqa"'),B$=a("."),Y$=l(),Ha=o("p"),X$=a(`The models that this pipeline can use are models that have been fine-tuned on a visual question answering task. See the up-to-date list of available models on `),Wa=o("a"),K$=a("huggingface.co/models"),J$=a("."),e9=l(),Ue=o("div"),h(Za.$$.fragment),t9=l(),Rf=o("p"),n9=a(`Answers open-ended questions about images. The pipeline accepts several types of inputs which are detailed below:`),o9=l(),ze=o("ul"),Vf=o("li"),Hf=o("code"),r9=a("pipeline(image=image, question=question)"),s9=l(),Wf=o("li"),Zf=o("code"),a9=a('pipeline({"image": image, "question": question})'),i9=l(),Bf=o("li"),Yf=o("code"),l9=a('pipeline([{"image": image, "question": question}])'),d9=l(),Xf=o("li"),Kf=o("code"),c9=a('pipeline([{"image": image, "question": question}, {"image": image, "question": question}])'),yg=l(),on=o("h3"),Mo=o("a"),Jf=o("span"),h(Ba.$$.fragment),p9=l(),eh=o("span"),m9=a("ZeroShotClassificationPipeline"),xg=l(),Z=o("div"),h(Ya.$$.fragment),f9=l(),Xa=o("p"),h9=a("NLI-based zero-shot classification pipeline using a "),th=o("code"),u9=a("ModelForSequenceClassification"),g9=a(` trained on NLI (natural language inference) tasks.`),_9=l(),Ie=o("p"),b9=a(`Any combination of sequences and labels can be passed and each combination will be posed as a premise/hypothesis pair and passed to the pretrained model. Then, the logit for `),nh=o("em"),v9=a("entailment"),w9=a(` is taken as the logit for the candidate label being valid. Any NLI model can be used, but the id of the `),oh=o("em"),T9=a("entailment"),k9=a(` label must be included in the model config\u2019s :attr:`),rh=o("em"),P9=a("~transformers.PretrainedConfig.label2id"),y9=a("."),x9=l(),rn=o("p"),$9=a("This NLI pipeline can currently be loaded from "),Vl=o("a"),E9=a("pipeline()"),q9=a(` using the following task identifier: `),sh=o("code"),A9=a('"zero-shot-classification"'),C9=a("."),D9=l(),Ka=o("p"),z9=a(`The models that this pipeline can use are models that have been fine-tuned on an NLI task. See the up-to-date list of available models on `),Ja=o("a"),I9=a("huggingface.co/models"),j9=a("."),S9=l(),Fo=o("div"),h(ei.$$.fragment),M9=l(),ti=o("p"),F9=a("Classify the sequence(s) given as inputs. See the "),Hl=o("a"),L9=a("ZeroShotClassificationPipeline"),O9=a(` documentation for more information.`),$g=l(),sn=o("h3"),Lo=o("a"),ah=o("span"),h(ni.$$.fragment),U9=l(),ih=o("span"),N9=a("ZeroShotImageClassificationPipeline"),Eg=l(),le=o("div"),h(oi.$$.fragment),G9=l(),an=o("p"),Q9=a("Zero shot image classification pipeline using "),lh=o("code"),R9=a("CLIPModel"),V9=a(`. This pipeline predicts the class of an image when you provide an image and a set of `),dh=o("code"),H9=a("candidate_labels"),W9=a("."),Z9=l(),ln=o("p"),B9=a("This image classification pipeline can currently be loaded from "),Wl=o("a"),Y9=a("pipeline()"),X9=a(` using the following task identifier: `),ch=o("code"),K9=a('"zero-shot-image-classification"'),J9=a("."),eE=l(),ri=o("p"),tE=a(`See the list of available models on `),si=o("a"),nE=a("huggingface.co/models"),oE=a("."),rE=l(),Oo=o("div"),h(ai.$$.fragment),sE=l(),ph=o("p"),aE=a("Assign labels to the image(s) passed as inputs."),qg=l(),dn=o("h3"),Uo=o("a"),mh=o("span"),h(ii.$$.fragment),iE=l(),fh=o("span"),lE=a("ZeroShotObjectDetectionPipeline"),Ag=l(),de=o("div"),h(li.$$.fragment),dE=l(),cn=o("p"),cE=a("Zero shot object detection pipeline using "),hh=o("code"),pE=a("OwlViTForObjectDetection"),mE=a(`. This pipeline predicts bounding boxes of objects when you provide an image and a set of `),uh=o("code"),fE=a("candidate_labels"),hE=a("."),uE=l(),pn=o("p"),gE=a("This object detection pipeline can currently be loaded from "),Zl=o("a"),_E=a("pipeline()"),bE=a(` using the following task identifier: `),gh=o("code"),vE=a('"zero-shot-object-detection"'),wE=a("."),TE=l(),di=o("p"),kE=a(`See the list of available models on `),ci=o("a"),PE=a("huggingface.co/models"),yE=a("."),xE=l(),No=o("div"),h(pi.$$.fragment),$E=l(),_h=o("p"),EE=a("Detect objects (bounding boxes & classes) in the image(s) passed as inputs."),Cg=l(),mn=o("h2"),Go=o("a"),bh=o("span"),h(mi.$$.fragment),qE=l(),Bl=o("span"),AE=a("Parent class: "),vh=o("code"),CE=a("Pipeline"),Dg=l(),A=o("div"),h(fi.$$.fragment),DE=l(),wh=o("p"),zE=a(`The Pipeline class is the class from which all pipelines inherit. Refer to this class for methods shared across different pipelines.`),IE=l(),Th=o("p"),jE=a(`Base class implementing pipelined operations. Pipeline workflow is defined as a sequence of the following operations:`),SE=l(),kh=o("p"),ME=a("Input -> Tokenization -> Model Inference -> Post-Processing (task dependent) -> Output"),FE=l(),Ph=o("p"),LE=a("Pipeline supports running on CPU or GPU through the device argument (see below)."),OE=l(),ve=o("p"),UE=a("Some pipeline, like for instance "),Yl=o("a"),NE=a("FeatureExtractionPipeline"),GE=a(" ("),yh=o("code"),QE=a("'feature-extraction'"),RE=a(`) output large tensor object as nested-lists. In order to avoid dumping such large structure as textual data we provide the `),xh=o("code"),VE=a("binary_output"),HE=a(` constructor argument. If set to `),$h=o("code"),WE=a("True"),ZE=a(", the output will be stored in the pickle format."),BE=l(),Qo=o("div"),h(hi.$$.fragment),YE=l(),Eh=o("p"),XE=a("Check if the model class is in supported by the pipeline."),KE=l(),Ne=o("div"),h(ui.$$.fragment),JE=l(),qh=o("p"),e5=a("Context Manager allowing tensor allocation on the user-specified device in framework agnostic way."),t5=l(),h(Ro.$$.fragment),n5=l(),Vo=o("div"),h(gi.$$.fragment),o5=l(),Ah=o("p"),r5=a("Ensure PyTorch tensors are on the specified device."),s5=l(),Ho=o("div"),h(_i.$$.fragment),a5=l(),bi=o("p"),i5=a("Postprocess will receive the raw outputs of the "),Ch=o("code"),l5=a("_forward"),d5=a(` method, generally tensors, and reformat them into something more friendly. Generally it will output a list or a dict or results (containing just strings and numbers).`),c5=l(),Wo=o("div"),h(vi.$$.fragment),p5=l(),wi=o("p"),m5=a("Scikit / Keras interface to transformers\u2019 pipelines. This method will forward to "),Dh=o("strong"),f5=a("call"),h5=a("()."),u5=l(),Zo=o("div"),h(Ti.$$.fragment),g5=l(),fn=o("p"),_5=a("Preprocess will take the "),zh=o("code"),b5=a("input_"),v5=a(` of a specific pipeline and return a dictionnary of everything necessary for `),Ih=o("code"),w5=a("_forward"),T5=a(" to run properly. It should contain at least one tensor, but might have arbitrary other items."),k5=l(),Bo=o("div"),h(ki.$$.fragment),P5=l(),jh=o("p"),y5=a("Save the pipeline\u2019s model and tokenizer."),x5=l(),Yo=o("div"),h(Pi.$$.fragment),$5=l(),yi=o("p"),E5=a("Scikit / Keras interface to transformers\u2019 pipelines. This method will forward to "),Sh=o("strong"),q5=a("call"),A5=a("()."),this.h()},l(t){const p=ZD('[data-svelte="svelte-1phssyn"]',document.head);T=r(p,"META",{name:!0,content:!0}),p.forEach(n),$=d(t),y=r(t,"H1",{class:!0});var xi=s(y);w=r(xi,"A",{id:!0,class:!0,href:!0});var Mh=s(w);x=r(Mh,"SPAN",{});var Fh=s(x);u(f.$$.fragment,Fh),Fh.forEach(n),Mh.forEach(n),k=d(xi),Ee=r(xi,"SPAN",{});var Lh=s(Ee);Vb=i(Lh,"Pipelines"),Lh.forEach(n),xi.forEach(n),Qh=d(t),gn=r(t,"P",{});var $i=s(gn);Hb=i($i,`The pipelines are a great and easy way to use models for inference. These pipelines are objects that abstract most of the complex code from the library, offering a simple API dedicated to several tasks, including Named Entity Recognition, Masked Language Modeling, Sentiment Analysis, Feature Extraction and Question Answering. See the `),Ci=r($i,"A",{href:!0});var Oh=s(Ci);Wb=i(Oh,"task summary"),Oh.forEach(n),Zb=i($i," for examples of use."),$i.forEach(n),Rh=d(t),Di=r(t,"P",{});var Uh=s(Di);Bb=i(Uh,"There are two categories of pipeline abstractions to be aware about:"),Uh.forEach(n),Vh=d(t),_n=r(t,"UL",{});var Ei=s(_n);Fd=r(Ei,"LI",{});var Nh=s(Fd);rr=r(Nh,"P",{});var qi=s(rr);Yb=i(qi,"The "),zi=r(qi,"A",{href:!0});var Gh=s(zi);Xb=i(Gh,"pipeline()"),Gh.forEach(n),Kb=i(qi," which is the most powerful object encapsulating all other pipelines."),qi.forEach(n),Nh.forEach(n),Jb=d(Ei),sr=r(Ei,"LI",{});var Ig=s(sr);Ld=r(Ig,"P",{});var D5=s(Ld);ev=i(D5,"The other task-specific pipelines:"),D5.forEach(n),tv=d(Ig),E=r(Ig,"UL",{});var q=s(E);Od=r(q,"LI",{});var z5=s(Od);Ii=r(z5,"A",{href:!0});var I5=s(Ii);nv=i(I5,"AudioClassificationPipeline"),I5.forEach(n),z5.forEach(n),ov=d(q),Ud=r(q,"LI",{});var j5=s(Ud);ji=r(j5,"A",{href:!0});var S5=s(ji);rv=i(S5,"AutomaticSpeechRecognitionPipeline"),S5.forEach(n),j5.forEach(n),sv=d(q),Nd=r(q,"LI",{});var M5=s(Nd);Si=r(M5,"A",{href:!0});var F5=s(Si);av=i(F5,"ConversationalPipeline"),F5.forEach(n),M5.forEach(n),iv=d(q),Gd=r(q,"LI",{});var L5=s(Gd);Mi=r(L5,"A",{href:!0});var O5=s(Mi);lv=i(O5,"DocumentQuestionAnsweringPipeline"),O5.forEach(n),L5.forEach(n),dv=d(q),Qd=r(q,"LI",{});var U5=s(Qd);Fi=r(U5,"A",{href:!0});var N5=s(Fi);cv=i(N5,"FeatureExtractionPipeline"),N5.forEach(n),U5.forEach(n),pv=d(q),Rd=r(q,"LI",{});var G5=s(Rd);Li=r(G5,"A",{href:!0});var Q5=s(Li);mv=i(Q5,"FillMaskPipeline"),Q5.forEach(n),G5.forEach(n),fv=d(q),Vd=r(q,"LI",{});var R5=s(Vd);Oi=r(R5,"A",{href:!0});var V5=s(Oi);hv=i(V5,"ImageClassificationPipeline"),V5.forEach(n),R5.forEach(n),uv=d(q),Hd=r(q,"LI",{});var H5=s(Hd);Ui=r(H5,"A",{href:!0});var W5=s(Ui);gv=i(W5,"ImageSegmentationPipeline"),W5.forEach(n),H5.forEach(n),_v=d(q),Wd=r(q,"LI",{});var Z5=s(Wd);Ni=r(Z5,"A",{href:!0});var B5=s(Ni);bv=i(B5,"ImageToTextPipeline"),B5.forEach(n),Z5.forEach(n),vv=d(q),Zd=r(q,"LI",{});var Y5=s(Zd);Gi=r(Y5,"A",{href:!0});var X5=s(Gi);wv=i(X5,"ObjectDetectionPipeline"),X5.forEach(n),Y5.forEach(n),Tv=d(q),Bd=r(q,"LI",{});var K5=s(Bd);Qi=r(K5,"A",{href:!0});var J5=s(Qi);kv=i(J5,"QuestionAnsweringPipeline"),J5.forEach(n),K5.forEach(n),Pv=d(q),Yd=r(q,"LI",{});var e3=s(Yd);Ri=r(e3,"A",{href:!0});var t3=s(Ri);yv=i(t3,"SummarizationPipeline"),t3.forEach(n),e3.forEach(n),xv=d(q),Xd=r(q,"LI",{});var n3=s(Xd);Vi=r(n3,"A",{href:!0});var o3=s(Vi);$v=i(o3,"TableQuestionAnsweringPipeline"),o3.forEach(n),n3.forEach(n),Ev=d(q),Kd=r(q,"LI",{});var r3=s(Kd);Hi=r(r3,"A",{href:!0});var s3=s(Hi);qv=i(s3,"TextClassificationPipeline"),s3.forEach(n),r3.forEach(n),Av=d(q),Jd=r(q,"LI",{});var a3=s(Jd);Wi=r(a3,"A",{href:!0});var i3=s(Wi);Cv=i(i3,"TextGenerationPipeline"),i3.forEach(n),a3.forEach(n),Dv=d(q),ec=r(q,"LI",{});var l3=s(ec);Zi=r(l3,"A",{href:!0});var d3=s(Zi);zv=i(d3,"Text2TextGenerationPipeline"),d3.forEach(n),l3.forEach(n),Iv=d(q),tc=r(q,"LI",{});var c3=s(tc);Bi=r(c3,"A",{href:!0});var p3=s(Bi);jv=i(p3,"TokenClassificationPipeline"),p3.forEach(n),c3.forEach(n),Sv=d(q),nc=r(q,"LI",{});var m3=s(nc);Yi=r(m3,"A",{href:!0});var f3=s(Yi);Mv=i(f3,"TranslationPipeline"),f3.forEach(n),m3.forEach(n),Fv=d(q),oc=r(q,"LI",{});var h3=s(oc);Xi=r(h3,"A",{href:!0});var u3=s(Xi);Lv=i(u3,"VisualQuestionAnsweringPipeline"),u3.forEach(n),h3.forEach(n),Ov=d(q),rc=r(q,"LI",{});var g3=s(rc);Ki=r(g3,"A",{href:!0});var _3=s(Ki);Uv=i(_3,"ZeroShotClassificationPipeline"),_3.forEach(n),g3.forEach(n),Nv=d(q),sc=r(q,"LI",{});var b3=s(sc);Ji=r(b3,"A",{href:!0});var v3=s(Ji);Gv=i(v3,"ZeroShotImageClassificationPipeline"),v3.forEach(n),b3.forEach(n),Qv=d(q),ac=r(q,"LI",{});var w3=s(ac);el=r(w3,"A",{href:!0});var T3=s(el);Rv=i(T3,"ZeroShotObjectDetectionPipeline"),T3.forEach(n),w3.forEach(n),q.forEach(n),Ig.forEach(n),Ei.forEach(n),Hh=d(t),st=r(t,"H2",{class:!0});var jg=s(st);bn=r(jg,"A",{id:!0,class:!0,href:!0});var k3=s(bn);ic=r(k3,"SPAN",{});var P3=s(ic);u(ar.$$.fragment,P3),P3.forEach(n),k3.forEach(n),Vv=d(jg),lc=r(jg,"SPAN",{});var y3=s(lc);Hv=i(y3,"The pipeline abstraction"),y3.forEach(n),jg.forEach(n),Wh=d(t),vn=r(t,"P",{});var Sg=s(vn);Wv=i(Sg,"The "),dc=r(Sg,"EM",{});var x3=s(dc);Zv=i(x3,"pipeline"),x3.forEach(n),Bv=i(Sg,` abstraction is a wrapper around all the other available pipelines. It is instantiated as any other pipeline but can provide additional quality of life.`),Sg.forEach(n),Zh=d(t),tl=r(t,"P",{});var $3=s(tl);Yv=i($3,"Simple call on one item:"),$3.forEach(n),Bh=d(t),u(ir.$$.fragment,t),Yh=d(t),wn=r(t,"P",{});var Mg=s(wn);Xv=i(Mg,"If you want to use a specific model from the "),lr=r(Mg,"A",{href:!0,rel:!0});var E3=s(lr);Kv=i(E3,"hub"),E3.forEach(n),Jv=i(Mg,` you can ignore the task if the model on the hub already defines it:`),Mg.forEach(n),Xh=d(t),u(dr.$$.fragment,t),Kh=d(t),Tn=r(t,"P",{});var Fg=s(Tn);ew=i(Fg,"To call a pipeline on many items, you can either call with a "),cc=r(Fg,"EM",{});var q3=s(cc);tw=i(q3,"list"),q3.forEach(n),nw=i(Fg,"."),Fg.forEach(n),Jh=d(t),u(cr.$$.fragment,t),eu=d(t),kn=r(t,"P",{});var Lg=s(kn);ow=i(Lg,"To iterate of full datasets it is recommended to use a "),pc=r(Lg,"CODE",{});var A3=s(pc);rw=i(A3,"dataset"),A3.forEach(n),sw=i(Lg,` directly. This means you don\u2019t need to allocate the whole dataset at once, nor do you need to do batching yourself. This should work just as fast as custom loops on GPU. If it doesn\u2019t don\u2019t hesitate to create an issue.`),Lg.forEach(n),tu=d(t),u(pr.$$.fragment,t),nu=d(t),nl=r(t,"P",{});var C3=s(nl);aw=i(C3,"For ease of use, a generator is also possible:"),C3.forEach(n),ou=d(t),u(mr.$$.fragment,t),ru=d(t),X=r(t,"DIV",{class:!0});var Ge=s(X);u(fr.$$.fragment,Ge),iw=d(Ge),hr=r(Ge,"P",{});var Og=s(hr);lw=i(Og,"Utility factory method to build a "),ol=r(Og,"A",{href:!0});var D3=s(ol);dw=i(D3,"Pipeline"),D3.forEach(n),cw=i(Og,"."),Og.forEach(n),pw=d(Ge),mc=r(Ge,"P",{});var z3=s(mc);mw=i(z3,"Pipelines are made of:"),z3.forEach(n),fw=d(Ge),at=r(Ge,"UL",{});var Xl=s(at);ur=r(Xl,"LI",{});var Ug=s(ur);hw=i(Ug,"A "),rl=r(Ug,"A",{href:!0});var I3=s(rl);uw=i(I3,"tokenizer"),I3.forEach(n),gw=i(Ug," in charge of mapping raw textual input to token."),Ug.forEach(n),_w=d(Xl),gr=r(Xl,"LI",{});var Ng=s(gr);bw=i(Ng,"A "),sl=r(Ng,"A",{href:!0});var j3=s(sl);vw=i(j3,"model"),j3.forEach(n),ww=i(Ng," to make predictions from the inputs."),Ng.forEach(n),Tw=d(Xl),fc=r(Xl,"LI",{});var S3=s(fc);kw=i(S3,"Some (optional) post processing for enhancing model\u2019s output."),S3.forEach(n),Xl.forEach(n),Pw=d(Ge),u(Pn.$$.fragment,Ge),Ge.forEach(n),su=d(t),it=r(t,"H2",{class:!0});var Gg=s(it);yn=r(Gg,"A",{id:!0,class:!0,href:!0});var M3=s(yn);hc=r(M3,"SPAN",{});var F3=s(hc);u(_r.$$.fragment,F3),F3.forEach(n),M3.forEach(n),yw=d(Gg),uc=r(Gg,"SPAN",{});var L3=s(uc);xw=i(L3,"Pipeline batching"),L3.forEach(n),Gg.forEach(n),au=d(t),je=r(t,"P",{});var Kl=s(je);$w=i(Kl,`All pipelines can use batching. This will work whenever the pipeline uses its streaming ability (so when passing lists or `),gc=r(Kl,"CODE",{});var O3=s(gc);Ew=i(O3,"Dataset"),O3.forEach(n),qw=i(Kl," or "),_c=r(Kl,"CODE",{});var U3=s(_c);Aw=i(U3,"generator"),U3.forEach(n),Cw=i(Kl,")."),Kl.forEach(n),iu=d(t),u(br.$$.fragment,t),lu=d(t),u(xn.$$.fragment,t),du=d(t),u(vr.$$.fragment,t),cu=d(t),u(wr.$$.fragment,t),pu=d(t),al=r(t,"P",{});var N3=s(al);Dw=i(N3,"Example where it\u2019s most a slowdown:"),N3.forEach(n),mu=d(t),u(Tr.$$.fragment,t),fu=d(t),$n=r(t,"P",{});var Qg=s($n);zw=i(Qg,"This is a occasional very long sentence compared to the other. In that case, the "),bc=r(Qg,"STRONG",{});var G3=s(bc);Iw=i(G3,"whole"),G3.forEach(n),jw=i(Qg,` batch will need to be 400 tokens long, so the whole batch will be [64, 400] instead of [64, 4], leading to the high slowdown. Even worse, on bigger batches, the program simply crashes.`),Qg.forEach(n),hu=d(t),u(kr.$$.fragment,t),uu=d(t),il=r(t,"P",{});var Q3=s(il);Sw=i(Q3,`There are no good (general) solutions for this problem, and your mileage may vary depending on your use cases. Rule of thumb:`),Q3.forEach(n),gu=d(t),ll=r(t,"P",{});var R3=s(ll);Mw=i(R3,"For users, a rule of thumb is:"),R3.forEach(n),_u=d(t),ce=r(t,"UL",{});var Qe=s(ce);vc=r(Qe,"LI",{});var V3=s(vc);wc=r(V3,"P",{});var H3=s(wc);Tc=r(H3,"STRONG",{});var W3=s(Tc);Fw=i(W3,`Measure performance on your load, with your hardware. Measure, measure, and keep measuring. Real numbers are the only way to go.`),W3.forEach(n),H3.forEach(n),V3.forEach(n),Lw=d(Qe),kc=r(Qe,"LI",{});var Z3=s(kc);Pc=r(Z3,"P",{});var B3=s(Pc);Ow=i(B3,"If you are latency constrained (live product doing inference), don\u2019t batch"),B3.forEach(n),Z3.forEach(n),Uw=d(Qe),yc=r(Qe,"LI",{});var Y3=s(yc);xc=r(Y3,"P",{});var X3=s(xc);Nw=i(X3,"If you are using CPU, don\u2019t batch."),X3.forEach(n),Y3.forEach(n),Gw=d(Qe),Pr=r(Qe,"LI",{});var Rg=s(Pr);$c=r(Rg,"P",{});var K3=s($c);Qw=i(K3,"If you are using throughput (you want to run your model on a bunch of static data), on GPU, then:"),K3.forEach(n),Rw=d(Rg),lt=r(Rg,"UL",{});var Jl=s(lt);Ec=r(Jl,"LI",{});var J3=s(Ec);Vw=i(J3,`If you have no clue about the size of the sequence_length (\u201Cnatural\u201D data), by default don\u2019t batch, measure and try tentatively to add it, add OOM checks to recover when it will fail (and it will at some point if you don\u2019t control the sequence_length.)`),J3.forEach(n),Hw=d(Jl),qc=r(Jl,"LI",{});var eq=s(qc);Ww=i(eq,`If your sequence_length is super regular, then batching is more likely to be VERY interesting, measure and push it until you get OOMs.`),eq.forEach(n),Zw=d(Jl),Ac=r(Jl,"LI",{});var tq=s(Ac);Bw=i(tq,"The larger the GPU the more likely batching is going to be more interesting"),tq.forEach(n),Jl.forEach(n),Rg.forEach(n),Yw=d(Qe),Cc=r(Qe,"LI",{});var nq=s(Cc);Dc=r(nq,"P",{});var oq=s(Dc);Xw=i(oq,"As soon as you enable batching, make sure you can handle OOMs nicely."),oq.forEach(n),nq.forEach(n),Qe.forEach(n),bu=d(t),dt=r(t,"H2",{class:!0});var Vg=s(dt);En=r(Vg,"A",{id:!0,class:!0,href:!0});var rq=s(En);zc=r(rq,"SPAN",{});var sq=s(zc);u(yr.$$.fragment,sq),sq.forEach(n),rq.forEach(n),Kw=d(Vg),Ic=r(Vg,"SPAN",{});var aq=s(Ic);Jw=i(aq,"Pipeline chunk batching"),aq.forEach(n),Vg.forEach(n),vu=d(t),qe=r(t,"P",{});var Ai=s(qe);jc=r(Ai,"CODE",{});var iq=s(jc);e1=i(iq,"zero-shot-classification"),iq.forEach(n),t1=i(Ai," and "),Sc=r(Ai,"CODE",{});var lq=s(Sc);n1=i(lq,"question-answering"),lq.forEach(n),o1=i(Ai,` are slightly specific in the sense, that a single input might yield multiple forward pass of a model. Under normal circumstances, this would yield issues with `),Mc=r(Ai,"CODE",{});var dq=s(Mc);r1=i(dq,"batch_size"),dq.forEach(n),s1=i(Ai," argument."),Ai.forEach(n),wu=d(t),Se=r(t,"P",{});var ed=s(Se);a1=i(ed,"In order to circumvent this issue, both of these pipelines are a bit specific, they are "),Fc=r(ed,"CODE",{});var cq=s(Fc);i1=i(cq,"ChunkPipeline"),cq.forEach(n),l1=i(ed,` instead of regular `),Lc=r(ed,"CODE",{});var pq=s(Lc);d1=i(pq,"Pipeline"),pq.forEach(n),c1=i(ed,". In short:"),ed.forEach(n),Tu=d(t),u(xr.$$.fragment,t),ku=d(t),dl=r(t,"P",{});var mq=s(dl);p1=i(mq,"Now becomes:"),mq.forEach(n),Pu=d(t),u($r.$$.fragment,t),yu=d(t),cl=r(t,"P",{});var fq=s(cl);m1=i(fq,`This should be very transparent to your code because the pipelines are used in the same way.`),fq.forEach(n),xu=d(t),qn=r(t,"P",{});var Hg=s(qn);f1=i(Hg,`This is a simplified view, since the pipeline can handle automatically the batch to ! Meaning you don\u2019t have to care about how many forward passes you inputs are actually going to trigger, you can optimize the `),Oc=r(Hg,"CODE",{});var hq=s(Oc);h1=i(hq,"batch_size"),hq.forEach(n),u1=i(Hg,` independently of the inputs. The caveats from the previous section still apply.`),Hg.forEach(n),$u=d(t),ct=r(t,"H2",{class:!0});var Wg=s(ct);An=r(Wg,"A",{id:!0,class:!0,href:!0});var uq=s(An);Uc=r(uq,"SPAN",{});var gq=s(Uc);u(Er.$$.fragment,gq),gq.forEach(n),uq.forEach(n),g1=d(Wg),Nc=r(Wg,"SPAN",{});var _q=s(Nc);_1=i(_q,"Pipeline custom code"),_q.forEach(n),Wg.forEach(n),Eu=d(t),pl=r(t,"P",{});var bq=s(pl);b1=i(bq,"If you want to override a specific pipeline."),bq.forEach(n),qu=d(t),Cn=r(t,"P",{});var Zg=s(Cn);v1=i(Zg,`Don\u2019t hesitate to create an issue for your task at hand, the goal of the pipeline is to be easy to use and support most cases, so `),Gc=r(Zg,"CODE",{});var vq=s(Gc);w1=i(vq,"transformers"),vq.forEach(n),T1=i(Zg," could maybe support your use case."),Zg.forEach(n),Au=d(t),ml=r(t,"P",{});var wq=s(ml);k1=i(wq,"If you want to try simply you can:"),wq.forEach(n),Cu=d(t),fl=r(t,"UL",{});var Tq=s(fl);Qc=r(Tq,"LI",{});var kq=s(Qc);P1=i(kq,"Subclass your pipeline of choice"),kq.forEach(n),Tq.forEach(n),Du=d(t),u(qr.$$.fragment,t),zu=d(t),hl=r(t,"P",{});var Pq=s(hl);y1=i(Pq,"That should enable you to do all the custom code you want."),Pq.forEach(n),Iu=d(t),pt=r(t,"H2",{class:!0});var Bg=s(pt);Dn=r(Bg,"A",{id:!0,class:!0,href:!0});var yq=s(Dn);Rc=r(yq,"SPAN",{});var xq=s(Rc);u(Ar.$$.fragment,xq),xq.forEach(n),yq.forEach(n),x1=d(Bg),Vc=r(Bg,"SPAN",{});var $q=s(Vc);$1=i($q,"Implementing a pipeline"),$q.forEach(n),Bg.forEach(n),ju=d(t),ul=r(t,"P",{});var Eq=s(ul);gl=r(Eq,"A",{href:!0});var qq=s(gl);E1=i(qq,"Implementing a new pipeline"),qq.forEach(n),Eq.forEach(n),Su=d(t),mt=r(t,"H2",{class:!0});var Yg=s(mt);zn=r(Yg,"A",{id:!0,class:!0,href:!0});var Aq=s(zn);Hc=r(Aq,"SPAN",{});var Cq=s(Hc);u(Cr.$$.fragment,Cq),Cq.forEach(n),Aq.forEach(n),q1=d(Yg),Wc=r(Yg,"SPAN",{});var Dq=s(Wc);A1=i(Dq,"The task specific pipelines"),Dq.forEach(n),Yg.forEach(n),Mu=d(t),ft=r(t,"H3",{class:!0});var Xg=s(ft);In=r(Xg,"A",{id:!0,class:!0,href:!0});var zq=s(In);Zc=r(zq,"SPAN",{});var Iq=s(Zc);u(Dr.$$.fragment,Iq),Iq.forEach(n),zq.forEach(n),C1=d(Xg),Bc=r(Xg,"SPAN",{});var jq=s(Bc);D1=i(jq,"AudioClassificationPipeline"),jq.forEach(n),Xg.forEach(n),Fu=d(t),K=r(t,"DIV",{class:!0});var Re=s(K);u(zr.$$.fragment,Re),z1=d(Re),Ir=r(Re,"P",{});var Kg=s(Ir);I1=i(Kg,"Audio classification pipeline using any "),Yc=r(Kg,"CODE",{});var Sq=s(Yc);j1=i(Sq,"AutoModelForAudioClassification"),Sq.forEach(n),S1=i(Kg,`. This pipeline predicts the class of a raw waveform or an audio file. In case of an audio file, ffmpeg should be installed to support multiple audio formats.`),Kg.forEach(n),M1=d(Re),ht=r(Re,"P",{});var td=s(ht);F1=i(td,"This pipeline can currently be loaded from "),_l=r(td,"A",{href:!0});var Mq=s(_l);L1=i(Mq,"pipeline()"),Mq.forEach(n),O1=i(td,` using the following task identifier: `),Xc=r(td,"CODE",{});var Fq=s(Xc);U1=i(Fq,'"audio-classification"'),Fq.forEach(n),N1=i(td,"."),td.forEach(n),G1=d(Re),jr=r(Re,"P",{});var Jg=s(jr);Q1=i(Jg,`See the list of available models on `),Sr=r(Jg,"A",{href:!0,rel:!0});var Lq=s(Sr);R1=i(Lq,"huggingface.co/models"),Lq.forEach(n),V1=i(Jg,"."),Jg.forEach(n),H1=d(Re),jn=r(Re,"DIV",{class:!0});var e_=s(jn);u(Mr.$$.fragment,e_),W1=d(e_),Fr=r(e_,"P",{});var t_=s(Fr);Z1=i(t_,"Classify the sequence(s) given as inputs. See the "),bl=r(t_,"A",{href:!0});var Oq=s(bl);B1=i(Oq,"AutomaticSpeechRecognitionPipeline"),Oq.forEach(n),Y1=i(t_,` documentation for more information.`),t_.forEach(n),e_.forEach(n),Re.forEach(n),Lu=d(t),ut=r(t,"H3",{class:!0});var n_=s(ut);Sn=r(n_,"A",{id:!0,class:!0,href:!0});var Uq=s(Sn);Kc=r(Uq,"SPAN",{});var Nq=s(Kc);u(Lr.$$.fragment,Nq),Nq.forEach(n),Uq.forEach(n),X1=d(n_),Jc=r(n_,"SPAN",{});var Gq=s(Jc);K1=i(Gq,"AutomaticSpeechRecognitionPipeline"),Gq.forEach(n),n_.forEach(n),Ou=d(t),ge=r(t,"DIV",{class:!0});var Xo=s(ge);u(Or.$$.fragment,Xo),J1=d(Xo),ep=r(Xo,"P",{});var Qq=s(ep);e2=i(Qq,"Pipeline that aims at extracting spoken text contained within some audio."),Qq.forEach(n),t2=d(Xo),tp=r(Xo,"P",{});var Rq=s(tp);n2=i(Rq,`The input can be either a raw waveform or a audio file. In case of the audio file, ffmpeg should be installed for to support multiple audio formats`),Rq.forEach(n),o2=d(Xo),Mn=r(Xo,"DIV",{class:!0});var o_=s(Mn);u(Ur.$$.fragment,o_),r2=d(o_),Nr=r(o_,"P",{});var r_=s(Nr);s2=i(r_,"Classify the sequence(s) given as inputs. See the "),vl=r(r_,"A",{href:!0});var Vq=s(vl);a2=i(Vq,"AutomaticSpeechRecognitionPipeline"),Vq.forEach(n),i2=i(r_,` documentation for more information.`),r_.forEach(n),o_.forEach(n),Xo.forEach(n),Uu=d(t),gt=r(t,"H3",{class:!0});var s_=s(gt);Fn=r(s_,"A",{id:!0,class:!0,href:!0});var Hq=s(Fn);np=r(Hq,"SPAN",{});var Wq=s(np);u(Gr.$$.fragment,Wq),Wq.forEach(n),Hq.forEach(n),l2=d(s_),op=r(s_,"SPAN",{});var Zq=s(op);d2=i(Zq,"ConversationalPipeline"),Zq.forEach(n),s_.forEach(n),Nu=d(t),M=r(t,"DIV",{class:!0});var pe=s(M);u(Qr.$$.fragment,pe),c2=d(pe),Ae=r(pe,"P",{});var Ko=s(Ae);p2=i(Ko,`Utility class containing a conversation and its history. This class is meant to be used as an input to the `),wl=r(Ko,"A",{href:!0});var Bq=s(wl);m2=i(Bq,"ConversationalPipeline"),Bq.forEach(n),f2=i(Ko,`. The conversation contains a number of utility function to manage the addition of new user input and generated model responses. A conversation needs to contain an unprocessed user input before being passed to the `),Tl=r(Ko,"A",{href:!0});var Yq=s(Tl);h2=i(Yq,"ConversationalPipeline"),Yq.forEach(n),u2=i(Ko,`. This user input is either created when the class is instantiated, or by calling `),rp=r(Ko,"CODE",{});var Xq=s(rp);g2=i(Xq,'conversational_pipeline.append_response("input")'),Xq.forEach(n),_2=i(Ko," after a conversation turn."),Ko.forEach(n),b2=d(pe),u(Ln.$$.fragment,pe),v2=d(pe),On=r(pe,"DIV",{class:!0});var a_=s(On);u(Rr.$$.fragment,a_),w2=d(a_),Vr=r(a_,"P",{});var i_=s(Vr);T2=i(i_,"Add a user input to the conversation for the next round. This populates the internal "),sp=r(i_,"CODE",{});var Kq=s(sp);k2=i(Kq,"new_user_input"),Kq.forEach(n),P2=i(i_," field."),i_.forEach(n),a_.forEach(n),y2=d(pe),Un=r(pe,"DIV",{class:!0});var l_=s(Un);u(Hr.$$.fragment,l_),x2=d(l_),ap=r(l_,"P",{});var Jq=s(ap);$2=i(Jq,"Append a response to the list of generated responses."),Jq.forEach(n),l_.forEach(n),E2=d(pe),Me=r(pe,"DIV",{class:!0});var nd=s(Me);u(Wr.$$.fragment,nd),q2=d(nd),ip=r(nd,"P",{});var eA=s(ip);A2=i(eA,"Iterates over all blobs of the conversation."),eA.forEach(n),C2=d(nd),_e=r(nd,"P",{});var Ve=s(_e);D2=i(Ve,"Returns: Iterator of (is_user, text_chunk) in chronological order of the conversation. "),lp=r(Ve,"CODE",{});var tA=s(lp);z2=i(tA,"is_user"),tA.forEach(n),I2=i(Ve," is a "),dp=r(Ve,"CODE",{});var nA=s(dp);j2=i(nA,"bool"),nA.forEach(n),S2=i(Ve,`, `),cp=r(Ve,"CODE",{});var oA=s(cp);M2=i(oA,"text_chunks"),oA.forEach(n),F2=i(Ve," is a "),pp=r(Ve,"CODE",{});var rA=s(pp);L2=i(rA,"str"),rA.forEach(n),O2=i(Ve,"."),Ve.forEach(n),nd.forEach(n),U2=d(pe),Nn=r(pe,"DIV",{class:!0});var d_=s(Nn);u(Zr.$$.fragment,d_),N2=d(d_),Ce=r(d_,"P",{});var Jo=s(Ce);G2=i(Jo,"Mark the conversation as processed (moves the content of "),mp=r(Jo,"CODE",{});var sA=s(mp);Q2=i(sA,"new_user_input"),sA.forEach(n),R2=i(Jo," to "),fp=r(Jo,"CODE",{});var aA=s(fp);V2=i(aA,"past_user_inputs"),aA.forEach(n),H2=i(Jo,`) and empties the `),hp=r(Jo,"CODE",{});var iA=s(hp);W2=i(iA,"new_user_input"),iA.forEach(n),Z2=i(Jo," field."),Jo.forEach(n),d_.forEach(n),pe.forEach(n),Gu=d(t),G=r(t,"DIV",{class:!0});var Te=s(G);u(Br.$$.fragment,Te),B2=d(Te),up=r(Te,"P",{});var lA=s(up);Y2=i(lA,"Multi-turn conversational pipeline."),lA.forEach(n),X2=d(Te),_t=r(Te,"P",{});var od=s(_t);K2=i(od,"This conversational pipeline can currently be loaded from "),kl=r(od,"A",{href:!0});var dA=s(kl);J2=i(dA,"pipeline()"),dA.forEach(n),eT=i(od,` using the following task identifier: `),gp=r(od,"CODE",{});var cA=s(gp);tT=i(cA,'"conversational"'),cA.forEach(n),nT=i(od,"."),od.forEach(n),oT=d(Te),be=r(Te,"P",{});var He=s(be);rT=i(He,`The models that this pipeline can use are models that have been fine-tuned on a multi-turn conversational task, currently: `),_p=r(He,"EM",{});var pA=s(_p);sT=i(pA,"\u2018microsoft/DialoGPT-small\u2019"),pA.forEach(n),aT=i(He,", "),bp=r(He,"EM",{});var mA=s(bp);iT=i(mA,"\u2018microsoft/DialoGPT-medium\u2019"),mA.forEach(n),lT=i(He,", "),vp=r(He,"EM",{});var fA=s(vp);dT=i(fA,"\u2018microsoft/DialoGPT-large\u2019"),fA.forEach(n),cT=i(He,`. See the up-to-date list of available models on `),Yr=r(He,"A",{href:!0,rel:!0});var hA=s(Yr);pT=i(hA,"huggingface.co/models"),hA.forEach(n),mT=i(He,"."),He.forEach(n),fT=d(Te),u(Gn.$$.fragment,Te),hT=d(Te),Qn=r(Te,"DIV",{class:!0});var c_=s(Qn);u(Xr.$$.fragment,c_),uT=d(c_),wp=r(c_,"P",{});var uA=s(wp);gT=i(uA,"Generate responses for the conversation(s) given as inputs."),uA.forEach(n),c_.forEach(n),Te.forEach(n),Qu=d(t),bt=r(t,"H3",{class:!0});var p_=s(bt);Rn=r(p_,"A",{id:!0,class:!0,href:!0});var gA=s(Rn);Tp=r(gA,"SPAN",{});var _A=s(Tp);u(Kr.$$.fragment,_A),_A.forEach(n),gA.forEach(n),_T=d(p_),kp=r(p_,"SPAN",{});var bA=s(kp);bT=i(bA,"DocumentQuestionAnsweringPipeline"),bA.forEach(n),p_.forEach(n),Ru=d(t),J=r(t,"DIV",{class:!0});var We=s(J);u(Jr.$$.fragment,We),vT=d(We),es=r(We,"P",{});var m_=s(es);wT=i(m_,"Document Question Answering pipeline using any "),Pp=r(m_,"CODE",{});var vA=s(Pp);TT=i(vA,"AutoModelForDocumentQuestionAnswering"),vA.forEach(n),kT=i(m_,`. The inputs/outputs are similar to the (extractive) question answering pipeline; however, the pipeline takes an image (and optional OCR\u2019d words/boxes) as input instead of text context.`),m_.forEach(n),PT=d(We),vt=r(We,"P",{});var rd=s(vt);yT=i(rd,"This document question answering pipeline can currently be loaded from "),Pl=r(rd,"A",{href:!0});var wA=s(Pl);xT=i(wA,"pipeline()"),wA.forEach(n),$T=i(rd,` using the following task identifier: `),yp=r(rd,"CODE",{});var TA=s(yp);ET=i(TA,'"document-question-answering"'),TA.forEach(n),qT=i(rd,"."),rd.forEach(n),AT=d(We),ts=r(We,"P",{});var f_=s(ts);CT=i(f_,`The models that this pipeline can use are models that have been fine-tuned on a document question answering task. See the up-to-date list of available models on `),ns=r(f_,"A",{href:!0,rel:!0});var kA=s(ns);DT=i(kA,"huggingface.co/models"),kA.forEach(n),zT=i(f_,"."),f_.forEach(n),IT=d(We),we=r(We,"DIV",{class:!0});var er=s(we);u(os.$$.fragment,er),jT=d(er),rs=r(er,"P",{});var h_=s(rs);ST=i(h_,`Answer the question(s) given as inputs by using the document(s). A document is defined as an image and an optional list of (word, box) tuples which represent the text in the document. If the `),xp=r(h_,"CODE",{});var PA=s(xp);MT=i(PA,"word_boxes"),PA.forEach(n),FT=i(h_,` are not provided, it will use the Tesseract OCR engine (if available) to extract the words and boxes automatically for LayoutLM-like models which require them as input. For Donut, no OCR is run.`),h_.forEach(n),LT=d(er),$p=r(er,"P",{});var yA=s($p);OT=i(yA,"You can invoke the pipeline several ways:"),yA.forEach(n),UT=d(er),De=r(er,"UL",{});var tr=s(De);Ep=r(tr,"LI",{});var xA=s(Ep);qp=r(xA,"CODE",{});var $A=s(qp);NT=i($A,"pipeline(image=image, question=question)"),$A.forEach(n),xA.forEach(n),GT=d(tr),Ap=r(tr,"LI",{});var EA=s(Ap);Cp=r(EA,"CODE",{});var qA=s(Cp);QT=i(qA,"pipeline(image=image, question=question, word_boxes=word_boxes)"),qA.forEach(n),EA.forEach(n),RT=d(tr),Dp=r(tr,"LI",{});var AA=s(Dp);zp=r(AA,"CODE",{});var CA=s(zp);VT=i(CA,'pipeline([{"image": image, "question": question}])'),CA.forEach(n),AA.forEach(n),HT=d(tr),Ip=r(tr,"LI",{});var DA=s(Ip);jp=r(DA,"CODE",{});var zA=s(jp);WT=i(zA,'pipeline([{"image": image, "question": question, "word_boxes": word_boxes}])'),zA.forEach(n),DA.forEach(n),tr.forEach(n),er.forEach(n),We.forEach(n),Vu=d(t),wt=r(t,"H3",{class:!0});var u_=s(wt);Vn=r(u_,"A",{id:!0,class:!0,href:!0});var IA=s(Vn);Sp=r(IA,"SPAN",{});var jA=s(Sp);u(ss.$$.fragment,jA),jA.forEach(n),IA.forEach(n),ZT=d(u_),Mp=r(u_,"SPAN",{});var SA=s(Mp);BT=i(SA,"FeatureExtractionPipeline"),SA.forEach(n),u_.forEach(n),Hu=d(t),ee=r(t,"DIV",{class:!0});var Ze=s(ee);u(as.$$.fragment,Ze),YT=d(Ze),Fp=r(Ze,"P",{});var MA=s(Fp);XT=i(MA,`Feature extraction pipeline using no model head. This pipeline extracts the hidden states from the base transformer, which can be used as features in downstream tasks.`),MA.forEach(n),KT=d(Ze),Tt=r(Ze,"P",{});var sd=s(Tt);JT=i(sd,"This feature extraction pipeline can currently be loaded from "),yl=r(sd,"A",{href:!0});var FA=s(yl);ek=i(FA,"pipeline()"),FA.forEach(n),tk=i(sd,` using the task identifier: `),Lp=r(sd,"CODE",{});var LA=s(Lp);nk=i(LA,'"feature-extraction"'),LA.forEach(n),ok=i(sd,"."),sd.forEach(n),rk=d(Ze),is=r(Ze,"P",{});var g_=s(is);sk=i(g_,`All models may be used for this pipeline. See a list of all models, including community-contributed models on `),ls=r(g_,"A",{href:!0,rel:!0});var OA=s(ls);ak=i(OA,"huggingface.co/models"),OA.forEach(n),ik=i(g_,"."),g_.forEach(n),lk=d(Ze),Hn=r(Ze,"DIV",{class:!0});var __=s(Hn);u(ds.$$.fragment,__),dk=d(__),Op=r(__,"P",{});var UA=s(Op);ck=i(UA,"Extract the features of the input(s)."),UA.forEach(n),__.forEach(n),Ze.forEach(n),Wu=d(t),kt=r(t,"H3",{class:!0});var b_=s(kt);Wn=r(b_,"A",{id:!0,class:!0,href:!0});var NA=s(Wn);Up=r(NA,"SPAN",{});var GA=s(Up);u(cs.$$.fragment,GA),GA.forEach(n),NA.forEach(n),pk=d(b_),Np=r(b_,"SPAN",{});var QA=s(Np);mk=i(QA,"FillMaskPipeline"),QA.forEach(n),b_.forEach(n),Zu=d(t),Q=r(t,"DIV",{class:!0});var ke=s(Q);u(ps.$$.fragment,ke),fk=d(ke),Pt=r(ke,"P",{});var ad=s(Pt);hk=i(ad,"Masked language modeling prediction pipeline using any "),Gp=r(ad,"CODE",{});var RA=s(Gp);uk=i(RA,"ModelWithLMHead"),RA.forEach(n),gk=i(ad,". See the "),xl=r(ad,"A",{href:!0});var VA=s(xl);_k=i(VA,`masked language modeling examples`),VA.forEach(n),bk=i(ad," for more information."),ad.forEach(n),vk=d(ke),yt=r(ke,"P",{});var id=s(yt);wk=i(id,"This mask filling pipeline can currently be loaded from "),$l=r(id,"A",{href:!0});var HA=s($l);Tk=i(HA,"pipeline()"),HA.forEach(n),kk=i(id,` using the following task identifier: `),Qp=r(id,"CODE",{});var WA=s(Qp);Pk=i(WA,'"fill-mask"'),WA.forEach(n),yk=i(id,"."),id.forEach(n),xk=d(ke),ms=r(ke,"P",{});var v_=s(ms);$k=i(v_,`The models that this pipeline can use are models that have been trained with a masked language modeling objective, which includes the bi-directional models in the library. See the up-to-date list of available models on `),fs=r(v_,"A",{href:!0,rel:!0});var ZA=s(fs);Ek=i(ZA,"huggingface.co/models"),ZA.forEach(n),qk=i(v_,"."),v_.forEach(n),Ak=d(ke),u(Zn.$$.fragment,ke),Ck=d(ke),Bn=r(ke,"DIV",{class:!0});var w_=s(Bn);u(hs.$$.fragment,w_),Dk=d(w_),Rp=r(w_,"P",{});var BA=s(Rp);zk=i(BA,"Fill the masked token in the text(s) given as inputs."),BA.forEach(n),w_.forEach(n),ke.forEach(n),Bu=d(t),xt=r(t,"H3",{class:!0});var T_=s(xt);Yn=r(T_,"A",{id:!0,class:!0,href:!0});var YA=s(Yn);Vp=r(YA,"SPAN",{});var XA=s(Vp);u(us.$$.fragment,XA),XA.forEach(n),YA.forEach(n),Ik=d(T_),Hp=r(T_,"SPAN",{});var KA=s(Hp);jk=i(KA,"ImageClassificationPipeline"),KA.forEach(n),T_.forEach(n),Yu=d(t),te=r(t,"DIV",{class:!0});var Be=s(te);u(gs.$$.fragment,Be),Sk=d(Be),_s=r(Be,"P",{});var k_=s(_s);Mk=i(k_,"Image classification pipeline using any "),Wp=r(k_,"CODE",{});var JA=s(Wp);Fk=i(JA,"AutoModelForImageClassification"),JA.forEach(n),Lk=i(k_,`. This pipeline predicts the class of an image.`),k_.forEach(n),Ok=d(Be),$t=r(Be,"P",{});var ld=s($t);Uk=i(ld,"This image classification pipeline can currently be loaded from "),El=r(ld,"A",{href:!0});var e6=s(El);Nk=i(e6,"pipeline()"),e6.forEach(n),Gk=i(ld,` using the following task identifier: `),Zp=r(ld,"CODE",{});var t6=s(Zp);Qk=i(t6,'"image-classification"'),t6.forEach(n),Rk=i(ld,"."),ld.forEach(n),Vk=d(Be),bs=r(Be,"P",{});var P_=s(bs);Hk=i(P_,`See the list of available models on `),vs=r(P_,"A",{href:!0,rel:!0});var n6=s(vs);Wk=i(n6,"huggingface.co/models"),n6.forEach(n),Zk=i(P_,"."),P_.forEach(n),Bk=d(Be),Xn=r(Be,"DIV",{class:!0});var y_=s(Xn);u(ws.$$.fragment,y_),Yk=d(y_),Bp=r(y_,"P",{});var o6=s(Bp);Xk=i(o6,"Assign labels to the image(s) passed as inputs."),o6.forEach(n),y_.forEach(n),Be.forEach(n),Xu=d(t),Et=r(t,"H3",{class:!0});var x_=s(Et);Kn=r(x_,"A",{id:!0,class:!0,href:!0});var r6=s(Kn);Yp=r(r6,"SPAN",{});var s6=s(Yp);u(Ts.$$.fragment,s6),s6.forEach(n),r6.forEach(n),Kk=d(x_),Xp=r(x_,"SPAN",{});var a6=s(Xp);Jk=i(a6,"ImageSegmentationPipeline"),a6.forEach(n),x_.forEach(n),Ku=d(t),ne=r(t,"DIV",{class:!0});var Ye=s(ne);u(ks.$$.fragment,Ye),eP=d(Ye),Ps=r(Ye,"P",{});var $_=s(Ps);tP=i($_,"Image segmentation pipeline using any "),Kp=r($_,"CODE",{});var i6=s(Kp);nP=i(i6,"AutoModelForXXXSegmentation"),i6.forEach(n),oP=i($_,`. This pipeline predicts masks of objects and their classes.`),$_.forEach(n),rP=d(Ye),qt=r(Ye,"P",{});var dd=s(qt);sP=i(dd,"This image segmentation pipeline can currently be loaded from "),ql=r(dd,"A",{href:!0});var l6=s(ql);aP=i(l6,"pipeline()"),l6.forEach(n),iP=i(dd,` using the following task identifier: `),Jp=r(dd,"CODE",{});var d6=s(Jp);lP=i(d6,'"image-segmentation"'),d6.forEach(n),dP=i(dd,"."),dd.forEach(n),cP=d(Ye),ys=r(Ye,"P",{});var E_=s(ys);pP=i(E_,`See the list of available models on `),xs=r(E_,"A",{href:!0,rel:!0});var c6=s(xs);mP=i(c6,"huggingface.co/models"),c6.forEach(n),fP=i(E_,"."),E_.forEach(n),hP=d(Ye),Jn=r(Ye,"DIV",{class:!0});var q_=s(Jn);u($s.$$.fragment,q_),uP=d(q_),em=r(q_,"P",{});var p6=s(em);gP=i(p6,"Perform segmentation (detect masks & classes) in the image(s) passed as inputs."),p6.forEach(n),q_.forEach(n),Ye.forEach(n),Ju=d(t),At=r(t,"H3",{class:!0});var A_=s(At);eo=r(A_,"A",{id:!0,class:!0,href:!0});var m6=s(eo);tm=r(m6,"SPAN",{});var f6=s(tm);u(Es.$$.fragment,f6),f6.forEach(n),m6.forEach(n),_P=d(A_),nm=r(A_,"SPAN",{});var h6=s(nm);bP=i(h6,"ImageToTextPipeline"),h6.forEach(n),A_.forEach(n),eg=d(t),oe=r(t,"DIV",{class:!0});var Xe=s(oe);u(qs.$$.fragment,Xe),vP=d(Xe),As=r(Xe,"P",{});var C_=s(As);wP=i(C_,"Image To Text pipeline using a "),om=r(C_,"CODE",{});var u6=s(om);TP=i(u6,"AutoModelForVision2Seq"),u6.forEach(n),kP=i(C_,". This pipeline predicts a caption for a given image."),C_.forEach(n),PP=d(Xe),rm=r(Xe,"P",{});var g6=s(rm);yP=i(g6,`This image to text pipeline can currently be loaded from pipeline() using the following task identifier: \u201Cimage-to-text\u201D.`),g6.forEach(n),xP=d(Xe),Cs=r(Xe,"P",{});var D_=s(Cs);$P=i(D_,`See the list of available models on `),Ds=r(D_,"A",{href:!0,rel:!0});var _6=s(Ds);EP=i(_6,"huggingface.co/models"),_6.forEach(n),qP=i(D_,"."),D_.forEach(n),AP=d(Xe),to=r(Xe,"DIV",{class:!0});var z_=s(to);u(zs.$$.fragment,z_),CP=d(z_),sm=r(z_,"P",{});var b6=s(sm);DP=i(b6,"Assign labels to the image(s) passed as inputs."),b6.forEach(n),z_.forEach(n),Xe.forEach(n),tg=d(t),Ct=r(t,"H3",{class:!0});var I_=s(Ct);no=r(I_,"A",{id:!0,class:!0,href:!0});var v6=s(no);am=r(v6,"SPAN",{});var w6=s(am);u(Is.$$.fragment,w6),w6.forEach(n),v6.forEach(n),zP=d(I_),im=r(I_,"SPAN",{});var T6=s(im);IP=i(T6,"NerPipeline"),T6.forEach(n),I_.forEach(n),ng=d(t),S=r(t,"DIV",{class:!0});var B=s(S);u(js.$$.fragment,B),jP=d(B),Dt=r(B,"P",{});var cd=s(Dt);SP=i(cd,"Named Entity Recognition pipeline using any "),lm=r(cd,"CODE",{});var k6=s(lm);MP=i(k6,"ModelForTokenClassification"),k6.forEach(n),FP=i(cd,". See the "),Al=r(cd,"A",{href:!0});var P6=s(Al);LP=i(P6,`named entity recognition examples`),P6.forEach(n),OP=i(cd," for more information."),cd.forEach(n),UP=d(B),zt=r(B,"P",{});var pd=s(zt);NP=i(pd,"This token recognition pipeline can currently be loaded from "),Cl=r(pd,"A",{href:!0});var y6=s(Cl);GP=i(y6,"pipeline()"),y6.forEach(n),QP=i(pd,` using the following task identifier: `),dm=r(pd,"CODE",{});var x6=s(dm);RP=i(x6,'"ner"'),x6.forEach(n),VP=i(pd," (for predicting the classes of tokens in a sequence: person, organisation, location or miscellaneous)."),pd.forEach(n),HP=d(B),Ss=r(B,"P",{});var j_=s(Ss);WP=i(j_,`The models that this pipeline can use are models that have been fine-tuned on a token classification task. See the up-to-date list of available models on `),Ms=r(j_,"A",{href:!0,rel:!0});var $6=s(Ms);ZP=i($6,"huggingface.co/models"),$6.forEach(n),BP=i(j_,"."),j_.forEach(n),YP=d(B),Fe=r(B,"DIV",{class:!0});var md=s(Fe);u(Fs.$$.fragment,md),XP=d(md),cm=r(md,"P",{});var E6=s(cm);KP=i(E6,"Override tokens from a given word that disagree to force agreement on word boundaries."),E6.forEach(n),JP=d(md),pm=r(md,"P",{});var q6=s(pm);ey=i(q6,`Example: micro|soft| com|pany| B-ENT I-NAME I-ENT I-ENT will be rewritten with first strategy as microsoft| company| B-ENT I-ENT`),q6.forEach(n),md.forEach(n),ty=d(B),oo=r(B,"DIV",{class:!0});var S_=s(oo);u(Ls.$$.fragment,S_),ny=d(S_),mm=r(S_,"P",{});var A6=s(mm);oy=i(A6,"Fuse various numpy arrays into dicts with all the information needed for aggregation"),A6.forEach(n),S_.forEach(n),ry=d(B),ro=r(B,"DIV",{class:!0});var M_=s(ro);u(Os.$$.fragment,M_),sy=d(M_),fm=r(M_,"P",{});var C6=s(fm);ay=i(C6,"Find and group together the adjacent tokens with the same entity predicted."),C6.forEach(n),M_.forEach(n),iy=d(B),so=r(B,"DIV",{class:!0});var F_=s(so);u(Us.$$.fragment,F_),ly=d(F_),hm=r(F_,"P",{});var D6=s(hm);dy=i(D6,"Group together the adjacent tokens with the same entity predicted."),D6.forEach(n),F_.forEach(n),B.forEach(n),og=d(t),ao=r(t,"P",{});var L_=s(ao);cy=i(L_,"See "),Dl=r(L_,"A",{href:!0});var z6=s(Dl);py=i(z6,"TokenClassificationPipeline"),z6.forEach(n),my=i(L_," for all details."),L_.forEach(n),rg=d(t),It=r(t,"H3",{class:!0});var O_=s(It);io=r(O_,"A",{id:!0,class:!0,href:!0});var I6=s(io);um=r(I6,"SPAN",{});var j6=s(um);u(Ns.$$.fragment,j6),j6.forEach(n),I6.forEach(n),fy=d(O_),gm=r(O_,"SPAN",{});var S6=s(gm);hy=i(S6,"ObjectDetectionPipeline"),S6.forEach(n),O_.forEach(n),sg=d(t),re=r(t,"DIV",{class:!0});var Ke=s(re);u(Gs.$$.fragment,Ke),uy=d(Ke),Qs=r(Ke,"P",{});var U_=s(Qs);gy=i(U_,"Object detection pipeline using any "),_m=r(U_,"CODE",{});var M6=s(_m);_y=i(M6,"AutoModelForObjectDetection"),M6.forEach(n),by=i(U_,`. This pipeline predicts bounding boxes of objects and their classes.`),U_.forEach(n),vy=d(Ke),jt=r(Ke,"P",{});var fd=s(jt);wy=i(fd,"This object detection pipeline can currently be loaded from "),zl=r(fd,"A",{href:!0});var F6=s(zl);Ty=i(F6,"pipeline()"),F6.forEach(n),ky=i(fd,` using the following task identifier: `),bm=r(fd,"CODE",{});var L6=s(bm);Py=i(L6,'"object-detection"'),L6.forEach(n),yy=i(fd,"."),fd.forEach(n),xy=d(Ke),Rs=r(Ke,"P",{});var N_=s(Rs);$y=i(N_,"See the list of available models on "),Vs=r(N_,"A",{href:!0,rel:!0});var O6=s(Vs);Ey=i(O6,"huggingface.co/models"),O6.forEach(n),qy=i(N_,"."),N_.forEach(n),Ay=d(Ke),lo=r(Ke,"DIV",{class:!0});var G_=s(lo);u(Hs.$$.fragment,G_),Cy=d(G_),vm=r(G_,"P",{});var U6=s(vm);Dy=i(U6,"Detect objects (bounding boxes & classes) in the image(s) passed as inputs."),U6.forEach(n),G_.forEach(n),Ke.forEach(n),ag=d(t),St=r(t,"H3",{class:!0});var Q_=s(St);co=r(Q_,"A",{id:!0,class:!0,href:!0});var N6=s(co);wm=r(N6,"SPAN",{});var G6=s(wm);u(Ws.$$.fragment,G6),G6.forEach(n),N6.forEach(n),zy=d(Q_),Tm=r(Q_,"SPAN",{});var Q6=s(Tm);Iy=i(Q6,"QuestionAnsweringPipeline"),Q6.forEach(n),Q_.forEach(n),ig=d(t),F=r(t,"DIV",{class:!0});var me=s(F);u(Zs.$$.fragment,me),jy=d(me),Mt=r(me,"P",{});var hd=s(Mt);Sy=i(hd,"Question Answering pipeline using any "),km=r(hd,"CODE",{});var R6=s(km);My=i(R6,"ModelForQuestionAnswering"),R6.forEach(n),Fy=i(hd,". See the "),Il=r(hd,"A",{href:!0});var V6=s(Il);Ly=i(V6,`question answering examples`),V6.forEach(n),Oy=i(hd," for more information."),hd.forEach(n),Uy=d(me),Ft=r(me,"P",{});var ud=s(Ft);Ny=i(ud,"This question answering pipeline can currently be loaded from "),jl=r(ud,"A",{href:!0});var H6=s(jl);Gy=i(H6,"pipeline()"),H6.forEach(n),Qy=i(ud,` using the following task identifier: `),Pm=r(ud,"CODE",{});var W6=s(Pm);Ry=i(W6,'"question-answering"'),W6.forEach(n),Vy=i(ud,"."),ud.forEach(n),Hy=d(me),Bs=r(me,"P",{});var R_=s(Bs);Wy=i(R_,`The models that this pipeline can use are models that have been fine-tuned on a question answering task. See the up-to-date list of available models on `),Ys=r(R_,"A",{href:!0,rel:!0});var Z6=s(Ys);Zy=i(Z6,"huggingface.co/models"),Z6.forEach(n),By=i(R_,"."),R_.forEach(n),Yy=d(me),po=r(me,"DIV",{class:!0});var V_=s(po);u(Xs.$$.fragment,V_),Xy=d(V_),ym=r(V_,"P",{});var B6=s(ym);Ky=i(B6,"Answer the question(s) given as inputs by using the context(s)."),B6.forEach(n),V_.forEach(n),Jy=d(me),Le=r(me,"DIV",{class:!0});var gd=s(Le);u(Ks.$$.fragment,gd),e0=d(gd),Lt=r(gd,"P",{});var _d=s(Lt);t0=i(_d,"QuestionAnsweringPipeline leverages the "),xm=r(_d,"CODE",{});var Y6=s(xm);n0=i(Y6,"SquadExample"),Y6.forEach(n),o0=i(_d,` internally. This helper method encapsulate all the logic for converting question(s) and context(s) to `),$m=r(_d,"CODE",{});var X6=s($m);r0=i(X6,"SquadExample"),X6.forEach(n),s0=i(_d,"."),_d.forEach(n),a0=d(gd),Em=r(gd,"P",{});var K6=s(Em);i0=i(K6,"We currently support extractive question answering."),K6.forEach(n),gd.forEach(n),l0=d(me),mo=r(me,"DIV",{class:!0});var H_=s(mo);u(Js.$$.fragment,H_),d0=d(H_),qm=r(H_,"P",{});var J6=s(qm);c0=i(J6,"When decoding from token probabilities, this method maps token indexes to actual word in the initial context."),J6.forEach(n),H_.forEach(n),me.forEach(n),lg=d(t),Ot=r(t,"H3",{class:!0});var W_=s(Ot);fo=r(W_,"A",{id:!0,class:!0,href:!0});var e8=s(fo);Am=r(e8,"SPAN",{});var t8=s(Am);u(ea.$$.fragment,t8),t8.forEach(n),e8.forEach(n),p0=d(W_),Cm=r(W_,"SPAN",{});var n8=s(Cm);m0=i(n8,"SummarizationPipeline"),n8.forEach(n),W_.forEach(n),dg=d(t),R=r(t,"DIV",{class:!0});var Pe=s(R);u(ta.$$.fragment,Pe),f0=d(Pe),Dm=r(Pe,"P",{});var o8=s(Dm);h0=i(o8,"Summarize news articles and other documents."),o8.forEach(n),u0=d(Pe),Ut=r(Pe,"P",{});var bd=s(Ut);g0=i(bd,"This summarizing pipeline can currently be loaded from "),Sl=r(bd,"A",{href:!0});var r8=s(Sl);_0=i(r8,"pipeline()"),r8.forEach(n),b0=i(bd,` using the following task identifier: `),zm=r(bd,"CODE",{});var s8=s(zm);v0=i(s8,'"summarization"'),s8.forEach(n),w0=i(bd,"."),bd.forEach(n),T0=d(Pe),L=r(Pe,"P",{});var Y=s(L);k0=i(Y,`The models that this pipeline can use are models that have been fine-tuned on a summarization task, which is currently, \u2019`),Im=r(Y,"EM",{});var a8=s(Im);P0=i(a8,"bart-large-cnn"),a8.forEach(n),y0=i(Y,"\u2019, \u2019"),jm=r(Y,"EM",{});var i8=s(jm);x0=i(i8,"t5-small"),i8.forEach(n),$0=i(Y,"\u2019, \u2019"),Sm=r(Y,"EM",{});var l8=s(Sm);E0=i(l8,"t5-base"),l8.forEach(n),q0=i(Y,"\u2019, \u2019"),Mm=r(Y,"EM",{});var d8=s(Mm);A0=i(d8,"t5-large"),d8.forEach(n),C0=i(Y,"\u2019, \u2019"),Fm=r(Y,"EM",{});var c8=s(Fm);D0=i(c8,"t5-3b"),c8.forEach(n),z0=i(Y,"\u2019, \u2019"),Lm=r(Y,"EM",{});var p8=s(Lm);I0=i(p8,"t5-11b"),p8.forEach(n),j0=i(Y,`\u2019. See the up-to-date list of available models on `),na=r(Y,"A",{href:!0,rel:!0});var m8=s(na);S0=i(m8,"huggingface.co/models"),m8.forEach(n),M0=i(Y,"."),Y.forEach(n),F0=d(Pe),u(ho.$$.fragment,Pe),L0=d(Pe),uo=r(Pe,"DIV",{class:!0});var Z_=s(uo);u(oa.$$.fragment,Z_),O0=d(Z_),Om=r(Z_,"P",{});var f8=s(Om);U0=i(f8,"Summarize the text(s) given as inputs."),f8.forEach(n),Z_.forEach(n),Pe.forEach(n),cg=d(t),Nt=r(t,"H3",{class:!0});var B_=s(Nt);go=r(B_,"A",{id:!0,class:!0,href:!0});var h8=s(go);Um=r(h8,"SPAN",{});var u8=s(Um);u(ra.$$.fragment,u8),u8.forEach(n),h8.forEach(n),N0=d(B_),Nm=r(B_,"SPAN",{});var g8=s(Nm);G0=i(g8,"TableQuestionAnsweringPipeline"),g8.forEach(n),B_.forEach(n),pg=d(t),se=r(t,"DIV",{class:!0});var Je=s(se);u(sa.$$.fragment,Je),Q0=d(Je),aa=r(Je,"P",{});var Y_=s(aa);R0=i(Y_,"Table Question Answering pipeline using a "),Gm=r(Y_,"CODE",{});var _8=s(Gm);V0=i(_8,"ModelForTableQuestionAnswering"),_8.forEach(n),H0=i(Y_,`. This pipeline is only available in PyTorch.`),Y_.forEach(n),W0=d(Je),Gt=r(Je,"P",{});var vd=s(Gt);Z0=i(vd,"This tabular question answering pipeline can currently be loaded from "),Ml=r(vd,"A",{href:!0});var b8=s(Ml);B0=i(b8,"pipeline()"),b8.forEach(n),Y0=i(vd,` using the following task identifier: `),Qm=r(vd,"CODE",{});var v8=s(Qm);X0=i(v8,'"table-question-answering"'),v8.forEach(n),K0=i(vd,"."),vd.forEach(n),J0=d(Je),ia=r(Je,"P",{});var X_=s(ia);e4=i(X_,`The models that this pipeline can use are models that have been fine-tuned on a tabular question answering task. See the up-to-date list of available models on `),la=r(X_,"A",{href:!0,rel:!0});var w8=s(la);t4=i(w8,"huggingface.co/models"),w8.forEach(n),n4=i(X_,"."),X_.forEach(n),o4=d(Je),U=r(Je,"DIV",{class:!0});var fe=s(U);u(da.$$.fragment,fe),r4=d(fe),Rm=r(fe,"P",{});var T8=s(Rm);s4=i(T8,"Answers queries according to a table. The pipeline accepts several types of inputs which are detailed below:"),T8.forEach(n),a4=d(fe),V=r(fe,"UL",{});var he=s(V);Vm=r(he,"LI",{});var k8=s(Vm);Hm=r(k8,"CODE",{});var P8=s(Hm);i4=i(P8,"pipeline(table, query)"),P8.forEach(n),k8.forEach(n),l4=d(he),Wm=r(he,"LI",{});var y8=s(Wm);Zm=r(y8,"CODE",{});var x8=s(Zm);d4=i(x8,"pipeline(table, [query])"),x8.forEach(n),y8.forEach(n),c4=d(he),Bm=r(he,"LI",{});var $8=s(Bm);Ym=r($8,"CODE",{});var E8=s(Ym);p4=i(E8,"pipeline(table=table, query=query)"),E8.forEach(n),$8.forEach(n),m4=d(he),Xm=r(he,"LI",{});var q8=s(Xm);Km=r(q8,"CODE",{});var A8=s(Km);f4=i(A8,"pipeline(table=table, query=[query])"),A8.forEach(n),q8.forEach(n),h4=d(he),Jm=r(he,"LI",{});var C8=s(Jm);ef=r(C8,"CODE",{});var D8=s(ef);u4=i(D8,'pipeline({"table": table, "query": query})'),D8.forEach(n),C8.forEach(n),g4=d(he),tf=r(he,"LI",{});var z8=s(tf);nf=r(z8,"CODE",{});var I8=s(nf);_4=i(I8,'pipeline({"table": table, "query": [query]})'),I8.forEach(n),z8.forEach(n),b4=d(he),of=r(he,"LI",{});var j8=s(of);rf=r(j8,"CODE",{});var S8=s(rf);v4=i(S8,'pipeline([{"table": table, "query": query}, {"table": table, "query": query}])'),S8.forEach(n),j8.forEach(n),he.forEach(n),w4=d(fe),ca=r(fe,"P",{});var K_=s(ca);T4=i(K_,"The "),sf=r(K_,"CODE",{});var M8=s(sf);k4=i(M8,"table"),M8.forEach(n),P4=i(K_," argument should be a dict or a DataFrame built from that dict, containing the whole table:"),K_.forEach(n),y4=d(fe),u(_o.$$.fragment,fe),x4=d(fe),af=r(fe,"P",{});var F8=s(af);$4=i(F8,"This dictionary can be passed in as such, or can be converted to a pandas DataFrame:"),F8.forEach(n),E4=d(fe),u(bo.$$.fragment,fe),fe.forEach(n),Je.forEach(n),mg=d(t),Qt=r(t,"H3",{class:!0});var J_=s(Qt);vo=r(J_,"A",{id:!0,class:!0,href:!0});var L8=s(vo);lf=r(L8,"SPAN",{});var O8=s(lf);u(pa.$$.fragment,O8),O8.forEach(n),L8.forEach(n),q4=d(J_),df=r(J_,"SPAN",{});var U8=s(df);A4=i(U8,"TextClassificationPipeline"),U8.forEach(n),J_.forEach(n),fg=d(t),H=r(t,"DIV",{class:!0});var ye=s(H);u(ma.$$.fragment,ye),C4=d(ye),Rt=r(ye,"P",{});var wd=s(Rt);D4=i(wd,"Text classification pipeline using any "),cf=r(wd,"CODE",{});var N8=s(cf);z4=i(N8,"ModelForSequenceClassification"),N8.forEach(n),I4=i(wd,". See the "),Fl=r(wd,"A",{href:!0});var G8=s(Fl);j4=i(G8,`sequence classification examples`),G8.forEach(n),S4=i(wd," for more information."),wd.forEach(n),M4=d(ye),Vt=r(ye,"P",{});var Td=s(Vt);F4=i(Td,"This text classification pipeline can currently be loaded from "),Ll=r(Td,"A",{href:!0});var Q8=s(Ll);L4=i(Q8,"pipeline()"),Q8.forEach(n),O4=i(Td,` using the following task identifier: `),pf=r(Td,"CODE",{});var R8=s(pf);U4=i(R8,'"sentiment-analysis"'),R8.forEach(n),N4=i(Td," (for classifying sequences according to positive or negative sentiments)."),Td.forEach(n),G4=d(ye),fa=r(ye,"P",{});var eb=s(fa);Q4=i(eb,"If multiple classification labels are available ("),mf=r(eb,"CODE",{});var V8=s(mf);R4=i(V8,"model.config.num_labels >= 2"),V8.forEach(n),V4=i(eb,`), the pipeline will run a softmax over the results. If there is a single label, the pipeline will run a sigmoid over the result.`),eb.forEach(n),H4=d(ye),ha=r(ye,"P",{});var tb=s(ha);W4=i(tb,`The models that this pipeline can use are models that have been fine-tuned on a sequence classification task. See the up-to-date list of available models on `),ua=r(tb,"A",{href:!0,rel:!0});var H8=s(ua);Z4=i(H8,"huggingface.co/models"),H8.forEach(n),B4=i(tb,"."),tb.forEach(n),Y4=d(ye),wo=r(ye,"DIV",{class:!0});var nb=s(wo);u(ga.$$.fragment,nb),X4=d(nb),ff=r(nb,"P",{});var W8=s(ff);K4=i(W8,"Classify the text(s) given as inputs."),W8.forEach(n),nb.forEach(n),ye.forEach(n),hg=d(t),Ht=r(t,"H3",{class:!0});var ob=s(Ht);To=r(ob,"A",{id:!0,class:!0,href:!0});var Z8=s(To);hf=r(Z8,"SPAN",{});var B8=s(hf);u(_a.$$.fragment,B8),B8.forEach(n),Z8.forEach(n),J4=d(ob),uf=r(ob,"SPAN",{});var Y8=s(uf);ex=i(Y8,"TextGenerationPipeline"),Y8.forEach(n),ob.forEach(n),ug=d(t),ae=r(t,"DIV",{class:!0});var et=s(ae);u(ba.$$.fragment,et),tx=d(et),va=r(et,"P",{});var rb=s(va);nx=i(rb,"Language generation pipeline using any "),gf=r(rb,"CODE",{});var X8=s(gf);ox=i(X8,"ModelWithLMHead"),X8.forEach(n),rx=i(rb,`. This pipeline predicts the words that will follow a specified text prompt.`),rb.forEach(n),sx=d(et),Wt=r(et,"P",{});var kd=s(Wt);ax=i(kd,"This language generation pipeline can currently be loaded from "),Ol=r(kd,"A",{href:!0});var K8=s(Ol);ix=i(K8,"pipeline()"),K8.forEach(n),lx=i(kd,` using the following task identifier: `),_f=r(kd,"CODE",{});var J8=s(_f);dx=i(J8,'"text-generation"'),J8.forEach(n),cx=i(kd,"."),kd.forEach(n),px=d(et),wa=r(et,"P",{});var sb=s(wa);mx=i(sb,`The models that this pipeline can use are models that have been trained with an autoregressive language modeling objective, which includes the uni-directional models in the library (e.g. gpt2). See the list of available models on `),Ta=r(sb,"A",{href:!0,rel:!0});var eC=s(Ta);fx=i(eC,"huggingface.co/models"),eC.forEach(n),hx=i(sb,"."),sb.forEach(n),ux=d(et),ko=r(et,"DIV",{class:!0});var ab=s(ko);u(ka.$$.fragment,ab),gx=d(ab),bf=r(ab,"P",{});var tC=s(bf);_x=i(tC,"Complete the prompt(s) given as inputs."),tC.forEach(n),ab.forEach(n),et.forEach(n),gg=d(t),Zt=r(t,"H3",{class:!0});var ib=s(Zt);Po=r(ib,"A",{id:!0,class:!0,href:!0});var nC=s(Po);vf=r(nC,"SPAN",{});var oC=s(vf);u(Pa.$$.fragment,oC),oC.forEach(n),nC.forEach(n),bx=d(ib),wf=r(ib,"SPAN",{});var rC=s(wf);vx=i(rC,"Text2TextGenerationPipeline"),rC.forEach(n),ib.forEach(n),_g=d(t),O=r(t,"DIV",{class:!0});var ue=s(O);u(ya.$$.fragment,ue),wx=d(ue),Tf=r(ue,"P",{});var sC=s(Tf);Tx=i(sC,"Pipeline for text to text generation using seq2seq models."),sC.forEach(n),kx=d(ue),Bt=r(ue,"P",{});var Pd=s(Bt);Px=i(Pd,"This Text2TextGenerationPipeline pipeline can currently be loaded from "),Ul=r(Pd,"A",{href:!0});var aC=s(Ul);yx=i(aC,"pipeline()"),aC.forEach(n),xx=i(Pd,` using the following task identifier: `),kf=r(Pd,"CODE",{});var iC=s(kf);$x=i(iC,'"text2text-generation"'),iC.forEach(n),Ex=i(Pd,"."),Pd.forEach(n),qx=d(ue),xa=r(ue,"P",{});var lb=s(xa);Ax=i(lb,`The models that this pipeline can use are models that have been fine-tuned on a translation task. See the up-to-date list of available models on `),$a=r(lb,"A",{href:!0,rel:!0});var lC=s($a);Cx=i(lC,"huggingface.co/models"),lC.forEach(n),Dx=i(lb,"."),lb.forEach(n),zx=d(ue),u(yo.$$.fragment,ue),Ix=d(ue),xo=r(ue,"DIV",{class:!0});var db=s(xo);u(Ea.$$.fragment,db),jx=d(db),Pf=r(db,"P",{});var dC=s(Pf);Sx=i(dC,"Generate the output text(s) using text(s) given as inputs."),dC.forEach(n),db.forEach(n),Mx=d(ue),$o=r(ue,"DIV",{class:!0});var cb=s($o);u(qa.$$.fragment,cb),Fx=d(cb),yf=r(cb,"P",{});var cC=s(yf);Lx=i(cC,"Checks whether there might be something wrong with given input with regard to the model."),cC.forEach(n),cb.forEach(n),ue.forEach(n),bg=d(t),Yt=r(t,"H3",{class:!0});var pb=s(Yt);Eo=r(pb,"A",{id:!0,class:!0,href:!0});var pC=s(Eo);xf=r(pC,"SPAN",{});var mC=s(xf);u(Aa.$$.fragment,mC),mC.forEach(n),pC.forEach(n),Ox=d(pb),$f=r(pb,"SPAN",{});var fC=s($f);Ux=i(fC,"TokenClassificationPipeline"),fC.forEach(n),pb.forEach(n),vg=d(t),I=r(t,"DIV",{class:!0});var N=s(I);u(Ca.$$.fragment,N),Nx=d(N),Xt=r(N,"P",{});var yd=s(Xt);Gx=i(yd,"Named Entity Recognition pipeline using any "),Ef=r(yd,"CODE",{});var hC=s(Ef);Qx=i(hC,"ModelForTokenClassification"),hC.forEach(n),Rx=i(yd,". See the "),Nl=r(yd,"A",{href:!0});var uC=s(Nl);Vx=i(uC,`named entity recognition examples`),uC.forEach(n),Hx=i(yd," for more information."),yd.forEach(n),Wx=d(N),Kt=r(N,"P",{});var xd=s(Kt);Zx=i(xd,"This token recognition pipeline can currently be loaded from "),Gl=r(xd,"A",{href:!0});var gC=s(Gl);Bx=i(gC,"pipeline()"),gC.forEach(n),Yx=i(xd,` using the following task identifier: `),qf=r(xd,"CODE",{});var _C=s(qf);Xx=i(_C,'"ner"'),_C.forEach(n),Kx=i(xd," (for predicting the classes of tokens in a sequence: person, organisation, location or miscellaneous)."),xd.forEach(n),Jx=d(N),Da=r(N,"P",{});var mb=s(Da);e$=i(mb,`The models that this pipeline can use are models that have been fine-tuned on a token classification task. See the up-to-date list of available models on `),za=r(mb,"A",{href:!0,rel:!0});var bC=s(za);t$=i(bC,"huggingface.co/models"),bC.forEach(n),n$=i(mb,"."),mb.forEach(n),o$=d(N),qo=r(N,"DIV",{class:!0});var fb=s(qo);u(Ia.$$.fragment,fb),r$=d(fb),Af=r(fb,"P",{});var vC=s(Af);s$=i(vC,"Classify each token of the text(s) given as inputs."),vC.forEach(n),fb.forEach(n),a$=d(N),Oe=r(N,"DIV",{class:!0});var $d=s(Oe);u(ja.$$.fragment,$d),i$=d($d),Cf=r($d,"P",{});var wC=s(Cf);l$=i(wC,"Override tokens from a given word that disagree to force agreement on word boundaries."),wC.forEach(n),d$=d($d),Df=r($d,"P",{});var TC=s(Df);c$=i(TC,`Example: micro|soft| com|pany| B-ENT I-NAME I-ENT I-ENT will be rewritten with first strategy as microsoft| company| B-ENT I-ENT`),TC.forEach(n),$d.forEach(n),p$=d(N),Ao=r(N,"DIV",{class:!0});var hb=s(Ao);u(Sa.$$.fragment,hb),m$=d(hb),zf=r(hb,"P",{});var kC=s(zf);f$=i(kC,"Fuse various numpy arrays into dicts with all the information needed for aggregation"),kC.forEach(n),hb.forEach(n),h$=d(N),Co=r(N,"DIV",{class:!0});var ub=s(Co);u(Ma.$$.fragment,ub),u$=d(ub),If=r(ub,"P",{});var PC=s(If);g$=i(PC,"Find and group together the adjacent tokens with the same entity predicted."),PC.forEach(n),ub.forEach(n),_$=d(N),Do=r(N,"DIV",{class:!0});var gb=s(Do);u(Fa.$$.fragment,gb),b$=d(gb),jf=r(gb,"P",{});var yC=s(jf);v$=i(yC,"Group together the adjacent tokens with the same entity predicted."),yC.forEach(n),gb.forEach(n),N.forEach(n),wg=d(t),Jt=r(t,"H3",{class:!0});var _b=s(Jt);zo=r(_b,"A",{id:!0,class:!0,href:!0});var xC=s(zo);Sf=r(xC,"SPAN",{});var $C=s(Sf);u(La.$$.fragment,$C),$C.forEach(n),xC.forEach(n),w$=d(_b),Mf=r(_b,"SPAN",{});var EC=s(Mf);T$=i(EC,"TranslationPipeline"),EC.forEach(n),_b.forEach(n),Tg=d(t),W=r(t,"DIV",{class:!0});var xe=s(W);u(Oa.$$.fragment,xe),k$=d(xe),Ff=r(xe,"P",{});var qC=s(Ff);P$=i(qC,"Translates from one language to another."),qC.forEach(n),y$=d(xe),en=r(xe,"P",{});var Ed=s(en);x$=i(Ed,"This translation pipeline can currently be loaded from "),Ql=r(Ed,"A",{href:!0});var AC=s(Ql);$$=i(AC,"pipeline()"),AC.forEach(n),E$=i(Ed,` using the following task identifier: `),Lf=r(Ed,"CODE",{});var CC=s(Lf);q$=i(CC,'"translation_xx_to_yy"'),CC.forEach(n),A$=i(Ed,"."),Ed.forEach(n),C$=d(xe),Ua=r(xe,"P",{});var bb=s(Ua);D$=i(bb,`The models that this pipeline can use are models that have been fine-tuned on a translation task. See the up-to-date list of available models on `),Na=r(bb,"A",{href:!0,rel:!0});var DC=s(Na);z$=i(DC,"huggingface.co/models"),DC.forEach(n),I$=i(bb,"."),bb.forEach(n),j$=d(xe),u(Io.$$.fragment,xe),S$=d(xe),jo=r(xe,"DIV",{class:!0});var vb=s(jo);u(Ga.$$.fragment,vb),M$=d(vb),Of=r(vb,"P",{});var zC=s(Of);F$=i(zC,"Translate the text(s) given as inputs."),zC.forEach(n),vb.forEach(n),xe.forEach(n),kg=d(t),tn=r(t,"H3",{class:!0});var wb=s(tn);So=r(wb,"A",{id:!0,class:!0,href:!0});var IC=s(So);Uf=r(IC,"SPAN",{});var jC=s(Uf);u(Qa.$$.fragment,jC),jC.forEach(n),IC.forEach(n),L$=d(wb),Nf=r(wb,"SPAN",{});var SC=s(Nf);O$=i(SC,"VisualQuestionAnsweringPipeline"),SC.forEach(n),wb.forEach(n),Pg=d(t),ie=r(t,"DIV",{class:!0});var tt=s(ie);u(Ra.$$.fragment,tt),U$=d(tt),Va=r(tt,"P",{});var Tb=s(Va);N$=i(Tb,"Visual Question Answering pipeline using a "),Gf=r(Tb,"CODE",{});var MC=s(Gf);G$=i(MC,"AutoModelForVisualQuestionAnswering"),MC.forEach(n),Q$=i(Tb,`. This pipeline is currently only available in PyTorch.`),Tb.forEach(n),R$=d(tt),nn=r(tt,"P",{});var qd=s(nn);V$=i(qd,"This visual question answering pipeline can currently be loaded from "),Rl=r(qd,"A",{href:!0});var FC=s(Rl);H$=i(FC,"pipeline()"),FC.forEach(n),W$=i(qd,` using the following task identifiers: `),Qf=r(qd,"CODE",{});var LC=s(Qf);Z$=i(LC,'"visual-question-answering", "vqa"'),LC.forEach(n),B$=i(qd,"."),qd.forEach(n),Y$=d(tt),Ha=r(tt,"P",{});var kb=s(Ha);X$=i(kb,`The models that this pipeline can use are models that have been fine-tuned on a visual question answering task. See the up-to-date list of available models on `),Wa=r(kb,"A",{href:!0,rel:!0});var OC=s(Wa);K$=i(OC,"huggingface.co/models"),OC.forEach(n),J$=i(kb,"."),kb.forEach(n),e9=d(tt),Ue=r(tt,"DIV",{class:!0});var Ad=s(Ue);u(Za.$$.fragment,Ad),t9=d(Ad),Rf=r(Ad,"P",{});var UC=s(Rf);n9=i(UC,`Answers open-ended questions about images. The pipeline accepts several types of inputs which are detailed below:`),UC.forEach(n),o9=d(Ad),ze=r(Ad,"UL",{});var nr=s(ze);Vf=r(nr,"LI",{});var NC=s(Vf);Hf=r(NC,"CODE",{});var GC=s(Hf);r9=i(GC,"pipeline(image=image, question=question)"),GC.forEach(n),NC.forEach(n),s9=d(nr),Wf=r(nr,"LI",{});var QC=s(Wf);Zf=r(QC,"CODE",{});var RC=s(Zf);a9=i(RC,'pipeline({"image": image, "question": question})'),RC.forEach(n),QC.forEach(n),i9=d(nr),Bf=r(nr,"LI",{});var VC=s(Bf);Yf=r(VC,"CODE",{});var HC=s(Yf);l9=i(HC,'pipeline([{"image": image, "question": question}])'),HC.forEach(n),VC.forEach(n),d9=d(nr),Xf=r(nr,"LI",{});var WC=s(Xf);Kf=r(WC,"CODE",{});var ZC=s(Kf);c9=i(ZC,'pipeline([{"image": image, "question": question}, {"image": image, "question": question}])'),ZC.forEach(n),WC.forEach(n),nr.forEach(n),Ad.forEach(n),tt.forEach(n),yg=d(t),on=r(t,"H3",{class:!0});var Pb=s(on);Mo=r(Pb,"A",{id:!0,class:!0,href:!0});var BC=s(Mo);Jf=r(BC,"SPAN",{});var YC=s(Jf);u(Ba.$$.fragment,YC),YC.forEach(n),BC.forEach(n),p9=d(Pb),eh=r(Pb,"SPAN",{});var XC=s(eh);m9=i(XC,"ZeroShotClassificationPipeline"),XC.forEach(n),Pb.forEach(n),xg=d(t),Z=r(t,"DIV",{class:!0});var $e=s(Z);u(Ya.$$.fragment,$e),f9=d($e),Xa=r($e,"P",{});var yb=s(Xa);h9=i(yb,"NLI-based zero-shot classification pipeline using a "),th=r(yb,"CODE",{});var KC=s(th);u9=i(KC,"ModelForSequenceClassification"),KC.forEach(n),g9=i(yb,` trained on NLI (natural language inference) tasks.`),yb.forEach(n),_9=d($e),Ie=r($e,"P",{});var or=s(Ie);b9=i(or,`Any combination of sequences and labels can be passed and each combination will be posed as a premise/hypothesis pair and passed to the pretrained model. Then, the logit for `),nh=r(or,"EM",{});var JC=s(nh);v9=i(JC,"entailment"),JC.forEach(n),w9=i(or,` is taken as the logit for the candidate label being valid. Any NLI model can be used, but the id of the `),oh=r(or,"EM",{});var eD=s(oh);T9=i(eD,"entailment"),eD.forEach(n),k9=i(or,` label must be included in the model config\u2019s :attr:`),rh=r(or,"EM",{});var tD=s(rh);P9=i(tD,"~transformers.PretrainedConfig.label2id"),tD.forEach(n),y9=i(or,"."),or.forEach(n),x9=d($e),rn=r($e,"P",{});var Cd=s(rn);$9=i(Cd,"This NLI pipeline can currently be loaded from "),Vl=r(Cd,"A",{href:!0});var nD=s(Vl);E9=i(nD,"pipeline()"),nD.forEach(n),q9=i(Cd,` using the following task identifier: `),sh=r(Cd,"CODE",{});var oD=s(sh);A9=i(oD,'"zero-shot-classification"'),oD.forEach(n),C9=i(Cd,"."),Cd.forEach(n),D9=d($e),Ka=r($e,"P",{});var xb=s(Ka);z9=i(xb,`The models that this pipeline can use are models that have been fine-tuned on an NLI task. See the up-to-date list of available models on `),Ja=r(xb,"A",{href:!0,rel:!0});var rD=s(Ja);I9=i(rD,"huggingface.co/models"),rD.forEach(n),j9=i(xb,"."),xb.forEach(n),S9=d($e),Fo=r($e,"DIV",{class:!0});var $b=s(Fo);u(ei.$$.fragment,$b),M9=d($b),ti=r($b,"P",{});var Eb=s(ti);F9=i(Eb,"Classify the sequence(s) given as inputs. See the "),Hl=r(Eb,"A",{href:!0});var sD=s(Hl);L9=i(sD,"ZeroShotClassificationPipeline"),sD.forEach(n),O9=i(Eb,` documentation for more information.`),Eb.forEach(n),$b.forEach(n),$e.forEach(n),$g=d(t),sn=r(t,"H3",{class:!0});var qb=s(sn);Lo=r(qb,"A",{id:!0,class:!0,href:!0});var aD=s(Lo);ah=r(aD,"SPAN",{});var iD=s(ah);u(ni.$$.fragment,iD),iD.forEach(n),aD.forEach(n),U9=d(qb),ih=r(qb,"SPAN",{});var lD=s(ih);N9=i(lD,"ZeroShotImageClassificationPipeline"),lD.forEach(n),qb.forEach(n),Eg=d(t),le=r(t,"DIV",{class:!0});var nt=s(le);u(oi.$$.fragment,nt),G9=d(nt),an=r(nt,"P",{});var Dd=s(an);Q9=i(Dd,"Zero shot image classification pipeline using "),lh=r(Dd,"CODE",{});var dD=s(lh);R9=i(dD,"CLIPModel"),dD.forEach(n),V9=i(Dd,`. This pipeline predicts the class of an image when you provide an image and a set of `),dh=r(Dd,"CODE",{});var cD=s(dh);H9=i(cD,"candidate_labels"),cD.forEach(n),W9=i(Dd,"."),Dd.forEach(n),Z9=d(nt),ln=r(nt,"P",{});var zd=s(ln);B9=i(zd,"This image classification pipeline can currently be loaded from "),Wl=r(zd,"A",{href:!0});var pD=s(Wl);Y9=i(pD,"pipeline()"),pD.forEach(n),X9=i(zd,` using the following task identifier: `),ch=r(zd,"CODE",{});var mD=s(ch);K9=i(mD,'"zero-shot-image-classification"'),mD.forEach(n),J9=i(zd,"."),zd.forEach(n),eE=d(nt),ri=r(nt,"P",{});var Ab=s(ri);tE=i(Ab,`See the list of available models on `),si=r(Ab,"A",{href:!0,rel:!0});var fD=s(si);nE=i(fD,"huggingface.co/models"),fD.forEach(n),oE=i(Ab,"."),Ab.forEach(n),rE=d(nt),Oo=r(nt,"DIV",{class:!0});var Cb=s(Oo);u(ai.$$.fragment,Cb),sE=d(Cb),ph=r(Cb,"P",{});var hD=s(ph);aE=i(hD,"Assign labels to the image(s) passed as inputs."),hD.forEach(n),Cb.forEach(n),nt.forEach(n),qg=d(t),dn=r(t,"H3",{class:!0});var Db=s(dn);Uo=r(Db,"A",{id:!0,class:!0,href:!0});var uD=s(Uo);mh=r(uD,"SPAN",{});var gD=s(mh);u(ii.$$.fragment,gD),gD.forEach(n),uD.forEach(n),iE=d(Db),fh=r(Db,"SPAN",{});var _D=s(fh);lE=i(_D,"ZeroShotObjectDetectionPipeline"),_D.forEach(n),Db.forEach(n),Ag=d(t),de=r(t,"DIV",{class:!0});var ot=s(de);u(li.$$.fragment,ot),dE=d(ot),cn=r(ot,"P",{});var Id=s(cn);cE=i(Id,"Zero shot object detection pipeline using "),hh=r(Id,"CODE",{});var bD=s(hh);pE=i(bD,"OwlViTForObjectDetection"),bD.forEach(n),mE=i(Id,`. This pipeline predicts bounding boxes of objects when you provide an image and a set of `),uh=r(Id,"CODE",{});var vD=s(uh);fE=i(vD,"candidate_labels"),vD.forEach(n),hE=i(Id,"."),Id.forEach(n),uE=d(ot),pn=r(ot,"P",{});var jd=s(pn);gE=i(jd,"This object detection pipeline can currently be loaded from "),Zl=r(jd,"A",{href:!0});var wD=s(Zl);_E=i(wD,"pipeline()"),wD.forEach(n),bE=i(jd,` using the following task identifier: `),gh=r(jd,"CODE",{});var TD=s(gh);vE=i(TD,'"zero-shot-object-detection"'),TD.forEach(n),wE=i(jd,"."),jd.forEach(n),TE=d(ot),di=r(ot,"P",{});var zb=s(di);kE=i(zb,`See the list of available models on `),ci=r(zb,"A",{href:!0,rel:!0});var kD=s(ci);PE=i(kD,"huggingface.co/models"),kD.forEach(n),yE=i(zb,"."),zb.forEach(n),xE=d(ot),No=r(ot,"DIV",{class:!0});var Ib=s(No);u(pi.$$.fragment,Ib),$E=d(Ib),_h=r(Ib,"P",{});var PD=s(_h);EE=i(PD,"Detect objects (bounding boxes & classes) in the image(s) passed as inputs."),PD.forEach(n),Ib.forEach(n),ot.forEach(n),Cg=d(t),mn=r(t,"H2",{class:!0});var jb=s(mn);Go=r(jb,"A",{id:!0,class:!0,href:!0});var yD=s(Go);bh=r(yD,"SPAN",{});var xD=s(bh);u(mi.$$.fragment,xD),xD.forEach(n),yD.forEach(n),qE=d(jb),Bl=r(jb,"SPAN",{});var C5=s(Bl);AE=i(C5,"Parent class: "),vh=r(C5,"CODE",{});var $D=s(vh);CE=i($D,"Pipeline"),$D.forEach(n),C5.forEach(n),jb.forEach(n),Dg=d(t),A=r(t,"DIV",{class:!0});var D=s(A);u(fi.$$.fragment,D),DE=d(D),wh=r(D,"P",{});var ED=s(wh);zE=i(ED,`The Pipeline class is the class from which all pipelines inherit. Refer to this class for methods shared across different pipelines.`),ED.forEach(n),IE=d(D),Th=r(D,"P",{});var qD=s(Th);jE=i(qD,`Base class implementing pipelined operations. Pipeline workflow is defined as a sequence of the following operations:`),qD.forEach(n),SE=d(D),kh=r(D,"P",{});var AD=s(kh);ME=i(AD,"Input -> Tokenization -> Model Inference -> Post-Processing (task dependent) -> Output"),AD.forEach(n),FE=d(D),Ph=r(D,"P",{});var CD=s(Ph);LE=i(CD,"Pipeline supports running on CPU or GPU through the device argument (see below)."),CD.forEach(n),OE=d(D),ve=r(D,"P",{});var rt=s(ve);UE=i(rt,"Some pipeline, like for instance "),Yl=r(rt,"A",{href:!0});var DD=s(Yl);NE=i(DD,"FeatureExtractionPipeline"),DD.forEach(n),GE=i(rt," ("),yh=r(rt,"CODE",{});var zD=s(yh);QE=i(zD,"'feature-extraction'"),zD.forEach(n),RE=i(rt,`) output large tensor object as nested-lists. In order to avoid dumping such large structure as textual data we provide the `),xh=r(rt,"CODE",{});var ID=s(xh);VE=i(ID,"binary_output"),ID.forEach(n),HE=i(rt,` constructor argument. If set to `),$h=r(rt,"CODE",{});var jD=s($h);WE=i(jD,"True"),jD.forEach(n),ZE=i(rt,", the output will be stored in the pickle format."),rt.forEach(n),BE=d(D),Qo=r(D,"DIV",{class:!0});var Sb=s(Qo);u(hi.$$.fragment,Sb),YE=d(Sb),Eh=r(Sb,"P",{});var SD=s(Eh);XE=i(SD,"Check if the model class is in supported by the pipeline."),SD.forEach(n),Sb.forEach(n),KE=d(D),Ne=r(D,"DIV",{class:!0});var Sd=s(Ne);u(ui.$$.fragment,Sd),JE=d(Sd),qh=r(Sd,"P",{});var MD=s(qh);e5=i(MD,"Context Manager allowing tensor allocation on the user-specified device in framework agnostic way."),MD.forEach(n),t5=d(Sd),u(Ro.$$.fragment,Sd),Sd.forEach(n),n5=d(D),Vo=r(D,"DIV",{class:!0});var Mb=s(Vo);u(gi.$$.fragment,Mb),o5=d(Mb),Ah=r(Mb,"P",{});var FD=s(Ah);r5=i(FD,"Ensure PyTorch tensors are on the specified device."),FD.forEach(n),Mb.forEach(n),s5=d(D),Ho=r(D,"DIV",{class:!0});var Fb=s(Ho);u(_i.$$.fragment,Fb),a5=d(Fb),bi=r(Fb,"P",{});var Lb=s(bi);i5=i(Lb,"Postprocess will receive the raw outputs of the "),Ch=r(Lb,"CODE",{});var LD=s(Ch);l5=i(LD,"_forward"),LD.forEach(n),d5=i(Lb,` method, generally tensors, and reformat them into something more friendly. Generally it will output a list or a dict or results (containing just strings and numbers).`),Lb.forEach(n),Fb.forEach(n),c5=d(D),Wo=r(D,"DIV",{class:!0});var Ob=s(Wo);u(vi.$$.fragment,Ob),p5=d(Ob),wi=r(Ob,"P",{});var Ub=s(wi);m5=i(Ub,"Scikit / Keras interface to transformers\u2019 pipelines. This method will forward to "),Dh=r(Ub,"STRONG",{});var OD=s(Dh);f5=i(OD,"call"),OD.forEach(n),h5=i(Ub,"()."),Ub.forEach(n),Ob.forEach(n),u5=d(D),Zo=r(D,"DIV",{class:!0});var Nb=s(Zo);u(Ti.$$.fragment,Nb),g5=d(Nb),fn=r(Nb,"P",{});var Md=s(fn);_5=i(Md,"Preprocess will take the "),zh=r(Md,"CODE",{});var UD=s(zh);b5=i(UD,"input_"),UD.forEach(n),v5=i(Md,` of a specific pipeline and return a dictionnary of everything necessary for `),Ih=r(Md,"CODE",{});var ND=s(Ih);w5=i(ND,"_forward"),ND.forEach(n),T5=i(Md," to run properly. It should contain at least one tensor, but might have arbitrary other items."),Md.forEach(n),Nb.forEach(n),k5=d(D),Bo=r(D,"DIV",{class:!0});var Gb=s(Bo);u(ki.$$.fragment,Gb),P5=d(Gb),jh=r(Gb,"P",{});var GD=s(jh);y5=i(GD,"Save the pipeline\u2019s model and tokenizer."),GD.forEach(n),Gb.forEach(n),x5=d(D),Yo=r(D,"DIV",{class:!0});var Qb=s(Yo);u(Pi.$$.fragment,Qb),$5=d(Qb),yi=r(Qb,"P",{});var Rb=s(yi);E5=i(Rb,"Scikit / Keras interface to transformers\u2019 pipelines. This method will forward to "),Sh=r(Rb,"STRONG",{});var QD=s(Sh);q5=i(QD,"call"),QD.forEach(n),A5=i(Rb,"()."),Rb.forEach(n),Qb.forEach(n),D.forEach(n),this.h()},h(){c(T,"name","hf:doc:metadata"),c(T,"content",JSON.stringify(l7)),c(w,"id","pipelines"),c(w,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(w,"href","#pipelines"),c(y,"class","relative group"),c(Ci,"href","../task_summary"),c(zi,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline"),c(Ii,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.AudioClassificationPipeline"),c(ji,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline"),c(Si,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.ConversationalPipeline"),c(Mi,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.DocumentQuestionAnsweringPipeline"),c(Fi,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.FeatureExtractionPipeline"),c(Li,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.FillMaskPipeline"),c(Oi,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.ImageClassificationPipeline"),c(Ui,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.ImageSegmentationPipeline"),c(Ni,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.ImageToTextPipeline"),c(Gi,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.ObjectDetectionPipeline"),c(Qi,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.QuestionAnsweringPipeline"),c(Ri,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.SummarizationPipeline"),c(Vi,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.TableQuestionAnsweringPipeline"),c(Hi,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.TextClassificationPipeline"),c(Wi,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.TextGenerationPipeline"),c(Zi,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.Text2TextGenerationPipeline"),c(Bi,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.TokenClassificationPipeline"),c(Yi,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.TranslationPipeline"),c(Xi,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.VisualQuestionAnsweringPipeline"),c(Ki,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.ZeroShotClassificationPipeline"),c(Ji,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.ZeroShotImageClassificationPipeline"),c(el,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.ZeroShotObjectDetectionPipeline"),c(bn,"id","transformers.pipeline"),c(bn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(bn,"href","#transformers.pipeline"),c(st,"class","relative group"),c(lr,"href","https://huggingface.co"),c(lr,"rel","nofollow"),c(ol,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.Pipeline"),c(rl,"href","tokenizer"),c(sl,"href","model"),c(X,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(yn,"id","pipeline-batching"),c(yn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(yn,"href","#pipeline-batching"),c(it,"class","relative group"),c(En,"id","pipeline-chunk-batching"),c(En,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(En,"href","#pipeline-chunk-batching"),c(dt,"class","relative group"),c(An,"id","pipeline-custom-code"),c(An,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(An,"href","#pipeline-custom-code"),c(ct,"class","relative group"),c(Dn,"id","implementing-a-pipeline"),c(Dn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Dn,"href","#implementing-a-pipeline"),c(pt,"class","relative group"),c(gl,"href","../add_new_pipeline"),c(zn,"id","the-task-specific-pipelines"),c(zn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(zn,"href","#the-task-specific-pipelines"),c(mt,"class","relative group"),c(In,"id","transformers.AudioClassificationPipeline"),c(In,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(In,"href","#transformers.AudioClassificationPipeline"),c(ft,"class","relative group"),c(_l,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline"),c(Sr,"href","https://huggingface.co/models?filter=audio-classification"),c(Sr,"rel","nofollow"),c(bl,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline"),c(jn,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(K,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Sn,"id","transformers.AutomaticSpeechRecognitionPipeline"),c(Sn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Sn,"href","#transformers.AutomaticSpeechRecognitionPipeline"),c(ut,"class","relative group"),c(vl,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline"),c(Mn,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(ge,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Fn,"id","transformers.Conversation"),c(Fn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Fn,"href","#transformers.Conversation"),c(gt,"class","relative group"),c(wl,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.ConversationalPipeline"),c(Tl,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.ConversationalPipeline"),c(On,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Un,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Me,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Nn,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(M,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(kl,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline"),c(Yr,"href","https://huggingface.co/models?filter=conversational"),c(Yr,"rel","nofollow"),c(Qn,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(G,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Rn,"id","transformers.DocumentQuestionAnsweringPipeline"),c(Rn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Rn,"href","#transformers.DocumentQuestionAnsweringPipeline"),c(bt,"class","relative group"),c(Pl,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline"),c(ns,"href","https://huggingface.co/models?filter=document-question-answering"),c(ns,"rel","nofollow"),c(we,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(J,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Vn,"id","transformers.FeatureExtractionPipeline"),c(Vn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Vn,"href","#transformers.FeatureExtractionPipeline"),c(wt,"class","relative group"),c(yl,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline"),c(ls,"href","https://huggingface.co/models"),c(ls,"rel","nofollow"),c(Hn,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(ee,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Wn,"id","transformers.FillMaskPipeline"),c(Wn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Wn,"href","#transformers.FillMaskPipeline"),c(kt,"class","relative group"),c(xl,"href","../task_summary#masked-language-modeling"),c($l,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline"),c(fs,"href","https://huggingface.co/models?filter=fill-mask"),c(fs,"rel","nofollow"),c(Bn,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Yn,"id","transformers.ImageClassificationPipeline"),c(Yn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Yn,"href","#transformers.ImageClassificationPipeline"),c(xt,"class","relative group"),c(El,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline"),c(vs,"href","https://huggingface.co/models?filter=image-classification"),c(vs,"rel","nofollow"),c(Xn,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(te,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Kn,"id","transformers.ImageSegmentationPipeline"),c(Kn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Kn,"href","#transformers.ImageSegmentationPipeline"),c(Et,"class","relative group"),c(ql,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline"),c(xs,"href","https://huggingface.co/models?filter=image-segmentation"),c(xs,"rel","nofollow"),c(Jn,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(ne,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(eo,"id","transformers.ImageToTextPipeline"),c(eo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(eo,"href","#transformers.ImageToTextPipeline"),c(At,"class","relative group"),c(Ds,"href","https://huggingface.co/models?pipeline_tag=image-to-text"),c(Ds,"rel","nofollow"),c(to,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(oe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(no,"id","transformers.TokenClassificationPipeline"),c(no,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(no,"href","#transformers.TokenClassificationPipeline"),c(Ct,"class","relative group"),c(Al,"href","../task_summary#named-entity-recognition"),c(Cl,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline"),c(Ms,"href","https://huggingface.co/models?filter=token-classification"),c(Ms,"rel","nofollow"),c(Fe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(oo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(ro,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(so,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(S,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Dl,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.TokenClassificationPipeline"),c(io,"id","transformers.ObjectDetectionPipeline"),c(io,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(io,"href","#transformers.ObjectDetectionPipeline"),c(It,"class","relative group"),c(zl,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline"),c(Vs,"href","https://huggingface.co/models?filter=object-detection"),c(Vs,"rel","nofollow"),c(lo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(re,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(co,"id","transformers.QuestionAnsweringPipeline"),c(co,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(co,"href","#transformers.QuestionAnsweringPipeline"),c(St,"class","relative group"),c(Il,"href","../task_summary#question-answering"),c(jl,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline"),c(Ys,"href","https://huggingface.co/models?filter=question-answering"),c(Ys,"rel","nofollow"),c(po,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Le,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(mo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(F,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(fo,"id","transformers.SummarizationPipeline"),c(fo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(fo,"href","#transformers.SummarizationPipeline"),c(Ot,"class","relative group"),c(Sl,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline"),c(na,"href","https://huggingface.co/models?filter=summarization"),c(na,"rel","nofollow"),c(uo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(R,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(go,"id","transformers.TableQuestionAnsweringPipeline"),c(go,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(go,"href","#transformers.TableQuestionAnsweringPipeline"),c(Nt,"class","relative group"),c(Ml,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline"),c(la,"href","https://huggingface.co/models?filter=table-question-answering"),c(la,"rel","nofollow"),c(U,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(se,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(vo,"id","transformers.TextClassificationPipeline"),c(vo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(vo,"href","#transformers.TextClassificationPipeline"),c(Qt,"class","relative group"),c(Fl,"href","../task_summary#sequence-classification"),c(Ll,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline"),c(ua,"href","https://huggingface.co/models?filter=text-classification"),c(ua,"rel","nofollow"),c(wo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(H,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(To,"id","transformers.TextGenerationPipeline"),c(To,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(To,"href","#transformers.TextGenerationPipeline"),c(Ht,"class","relative group"),c(Ol,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline"),c(Ta,"href","https://huggingface.co/models?filter=text-generation"),c(Ta,"rel","nofollow"),c(ko,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(ae,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Po,"id","transformers.Text2TextGenerationPipeline"),c(Po,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Po,"href","#transformers.Text2TextGenerationPipeline"),c(Zt,"class","relative group"),c(Ul,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline"),c($a,"href","https://huggingface.co/models?filter=text2text-generation"),c($a,"rel","nofollow"),c(xo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c($o,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(O,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Eo,"id","transformers.TokenClassificationPipeline"),c(Eo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Eo,"href","#transformers.TokenClassificationPipeline"),c(Yt,"class","relative group"),c(Nl,"href","../task_summary#named-entity-recognition"),c(Gl,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline"),c(za,"href","https://huggingface.co/models?filter=token-classification"),c(za,"rel","nofollow"),c(qo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Oe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Ao,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Co,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Do,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(I,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(zo,"id","transformers.TranslationPipeline"),c(zo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(zo,"href","#transformers.TranslationPipeline"),c(Jt,"class","relative group"),c(Ql,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline"),c(Na,"href","https://huggingface.co/models?filter=translation"),c(Na,"rel","nofollow"),c(jo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(W,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(So,"id","transformers.VisualQuestionAnsweringPipeline"),c(So,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(So,"href","#transformers.VisualQuestionAnsweringPipeline"),c(tn,"class","relative group"),c(Rl,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline"),c(Wa,"href","https://huggingface.co/models?filter=visual-question-answering"),c(Wa,"rel","nofollow"),c(Ue,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(ie,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Mo,"id","transformers.ZeroShotClassificationPipeline"),c(Mo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Mo,"href","#transformers.ZeroShotClassificationPipeline"),c(on,"class","relative group"),c(Vl,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline"),c(Ja,"href","https://huggingface.co/models?search=nli"),c(Ja,"rel","nofollow"),c(Hl,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.ZeroShotClassificationPipeline"),c(Fo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Z,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Lo,"id","transformers.ZeroShotImageClassificationPipeline"),c(Lo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Lo,"href","#transformers.ZeroShotImageClassificationPipeline"),c(sn,"class","relative group"),c(Wl,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline"),c(si,"href","https://huggingface.co/models?filter=zero-shot-image-classification"),c(si,"rel","nofollow"),c(Oo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(le,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Uo,"id","transformers.ZeroShotObjectDetectionPipeline"),c(Uo,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Uo,"href","#transformers.ZeroShotObjectDetectionPipeline"),c(dn,"class","relative group"),c(Zl,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline"),c(ci,"href","https://huggingface.co/models?filter=zero-shot-object-detection"),c(ci,"rel","nofollow"),c(No,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(de,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Go,"id","transformers.Pipeline"),c(Go,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Go,"href","#transformers.Pipeline"),c(mn,"class","relative group"),c(Yl,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.FeatureExtractionPipeline"),c(Qo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Ne,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Vo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Ho,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Wo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Zo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Bo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Yo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(A,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(t,p){e(document.head,T),m(t,$,p),m(t,y,p),e(y,w),e(w,x),g(f,x,null),e(y,k),e(y,Ee),e(Ee,Vb),m(t,Qh,p),m(t,gn,p),e(gn,Hb),e(gn,Ci),e(Ci,Wb),e(gn,Zb),m(t,Rh,p),m(t,Di,p),e(Di,Bb),m(t,Vh,p),m(t,_n,p),e(_n,Fd),e(Fd,rr),e(rr,Yb),e(rr,zi),e(zi,Xb),e(rr,Kb),e(_n,Jb),e(_n,sr),e(sr,Ld),e(Ld,ev),e(sr,tv),e(sr,E),e(E,Od),e(Od,Ii),e(Ii,nv),e(E,ov),e(E,Ud),e(Ud,ji),e(ji,rv),e(E,sv),e(E,Nd),e(Nd,Si),e(Si,av),e(E,iv),e(E,Gd),e(Gd,Mi),e(Mi,lv),e(E,dv),e(E,Qd),e(Qd,Fi),e(Fi,cv),e(E,pv),e(E,Rd),e(Rd,Li),e(Li,mv),e(E,fv),e(E,Vd),e(Vd,Oi),e(Oi,hv),e(E,uv),e(E,Hd),e(Hd,Ui),e(Ui,gv),e(E,_v),e(E,Wd),e(Wd,Ni),e(Ni,bv),e(E,vv),e(E,Zd),e(Zd,Gi),e(Gi,wv),e(E,Tv),e(E,Bd),e(Bd,Qi),e(Qi,kv),e(E,Pv),e(E,Yd),e(Yd,Ri),e(Ri,yv),e(E,xv),e(E,Xd),e(Xd,Vi),e(Vi,$v),e(E,Ev),e(E,Kd),e(Kd,Hi),e(Hi,qv),e(E,Av),e(E,Jd),e(Jd,Wi),e(Wi,Cv),e(E,Dv),e(E,ec),e(ec,Zi),e(Zi,zv),e(E,Iv),e(E,tc),e(tc,Bi),e(Bi,jv),e(E,Sv),e(E,nc),e(nc,Yi),e(Yi,Mv),e(E,Fv),e(E,oc),e(oc,Xi),e(Xi,Lv),e(E,Ov),e(E,rc),e(rc,Ki),e(Ki,Uv),e(E,Nv),e(E,sc),e(sc,Ji),e(Ji,Gv),e(E,Qv),e(E,ac),e(ac,el),e(el,Rv),m(t,Hh,p),m(t,st,p),e(st,bn),e(bn,ic),g(ar,ic,null),e(st,Vv),e(st,lc),e(lc,Hv),m(t,Wh,p),m(t,vn,p),e(vn,Wv),e(vn,dc),e(dc,Zv),e(vn,Bv),m(t,Zh,p),m(t,tl,p),e(tl,Yv),m(t,Bh,p),g(ir,t,p),m(t,Yh,p),m(t,wn,p),e(wn,Xv),e(wn,lr),e(lr,Kv),e(wn,Jv),m(t,Xh,p),g(dr,t,p),m(t,Kh,p),m(t,Tn,p),e(Tn,ew),e(Tn,cc),e(cc,tw),e(Tn,nw),m(t,Jh,p),g(cr,t,p),m(t,eu,p),m(t,kn,p),e(kn,ow),e(kn,pc),e(pc,rw),e(kn,sw),m(t,tu,p),g(pr,t,p),m(t,nu,p),m(t,nl,p),e(nl,aw),m(t,ou,p),g(mr,t,p),m(t,ru,p),m(t,X,p),g(fr,X,null),e(X,iw),e(X,hr),e(hr,lw),e(hr,ol),e(ol,dw),e(hr,cw),e(X,pw),e(X,mc),e(mc,mw),e(X,fw),e(X,at),e(at,ur),e(ur,hw),e(ur,rl),e(rl,uw),e(ur,gw),e(at,_w),e(at,gr),e(gr,bw),e(gr,sl),e(sl,vw),e(gr,ww),e(at,Tw),e(at,fc),e(fc,kw),e(X,Pw),g(Pn,X,null),m(t,su,p),m(t,it,p),e(it,yn),e(yn,hc),g(_r,hc,null),e(it,yw),e(it,uc),e(uc,xw),m(t,au,p),m(t,je,p),e(je,$w),e(je,gc),e(gc,Ew),e(je,qw),e(je,_c),e(_c,Aw),e(je,Cw),m(t,iu,p),g(br,t,p),m(t,lu,p),g(xn,t,p),m(t,du,p),g(vr,t,p),m(t,cu,p),g(wr,t,p),m(t,pu,p),m(t,al,p),e(al,Dw),m(t,mu,p),g(Tr,t,p),m(t,fu,p),m(t,$n,p),e($n,zw),e($n,bc),e(bc,Iw),e($n,jw),m(t,hu,p),g(kr,t,p),m(t,uu,p),m(t,il,p),e(il,Sw),m(t,gu,p),m(t,ll,p),e(ll,Mw),m(t,_u,p),m(t,ce,p),e(ce,vc),e(vc,wc),e(wc,Tc),e(Tc,Fw),e(ce,Lw),e(ce,kc),e(kc,Pc),e(Pc,Ow),e(ce,Uw),e(ce,yc),e(yc,xc),e(xc,Nw),e(ce,Gw),e(ce,Pr),e(Pr,$c),e($c,Qw),e(Pr,Rw),e(Pr,lt),e(lt,Ec),e(Ec,Vw),e(lt,Hw),e(lt,qc),e(qc,Ww),e(lt,Zw),e(lt,Ac),e(Ac,Bw),e(ce,Yw),e(ce,Cc),e(Cc,Dc),e(Dc,Xw),m(t,bu,p),m(t,dt,p),e(dt,En),e(En,zc),g(yr,zc,null),e(dt,Kw),e(dt,Ic),e(Ic,Jw),m(t,vu,p),m(t,qe,p),e(qe,jc),e(jc,e1),e(qe,t1),e(qe,Sc),e(Sc,n1),e(qe,o1),e(qe,Mc),e(Mc,r1),e(qe,s1),m(t,wu,p),m(t,Se,p),e(Se,a1),e(Se,Fc),e(Fc,i1),e(Se,l1),e(Se,Lc),e(Lc,d1),e(Se,c1),m(t,Tu,p),g(xr,t,p),m(t,ku,p),m(t,dl,p),e(dl,p1),m(t,Pu,p),g($r,t,p),m(t,yu,p),m(t,cl,p),e(cl,m1),m(t,xu,p),m(t,qn,p),e(qn,f1),e(qn,Oc),e(Oc,h1),e(qn,u1),m(t,$u,p),m(t,ct,p),e(ct,An),e(An,Uc),g(Er,Uc,null),e(ct,g1),e(ct,Nc),e(Nc,_1),m(t,Eu,p),m(t,pl,p),e(pl,b1),m(t,qu,p),m(t,Cn,p),e(Cn,v1),e(Cn,Gc),e(Gc,w1),e(Cn,T1),m(t,Au,p),m(t,ml,p),e(ml,k1),m(t,Cu,p),m(t,fl,p),e(fl,Qc),e(Qc,P1),m(t,Du,p),g(qr,t,p),m(t,zu,p),m(t,hl,p),e(hl,y1),m(t,Iu,p),m(t,pt,p),e(pt,Dn),e(Dn,Rc),g(Ar,Rc,null),e(pt,x1),e(pt,Vc),e(Vc,$1),m(t,ju,p),m(t,ul,p),e(ul,gl),e(gl,E1),m(t,Su,p),m(t,mt,p),e(mt,zn),e(zn,Hc),g(Cr,Hc,null),e(mt,q1),e(mt,Wc),e(Wc,A1),m(t,Mu,p),m(t,ft,p),e(ft,In),e(In,Zc),g(Dr,Zc,null),e(ft,C1),e(ft,Bc),e(Bc,D1),m(t,Fu,p),m(t,K,p),g(zr,K,null),e(K,z1),e(K,Ir),e(Ir,I1),e(Ir,Yc),e(Yc,j1),e(Ir,S1),e(K,M1),e(K,ht),e(ht,F1),e(ht,_l),e(_l,L1),e(ht,O1),e(ht,Xc),e(Xc,U1),e(ht,N1),e(K,G1),e(K,jr),e(jr,Q1),e(jr,Sr),e(Sr,R1),e(jr,V1),e(K,H1),e(K,jn),g(Mr,jn,null),e(jn,W1),e(jn,Fr),e(Fr,Z1),e(Fr,bl),e(bl,B1),e(Fr,Y1),m(t,Lu,p),m(t,ut,p),e(ut,Sn),e(Sn,Kc),g(Lr,Kc,null),e(ut,X1),e(ut,Jc),e(Jc,K1),m(t,Ou,p),m(t,ge,p),g(Or,ge,null),e(ge,J1),e(ge,ep),e(ep,e2),e(ge,t2),e(ge,tp),e(tp,n2),e(ge,o2),e(ge,Mn),g(Ur,Mn,null),e(Mn,r2),e(Mn,Nr),e(Nr,s2),e(Nr,vl),e(vl,a2),e(Nr,i2),m(t,Uu,p),m(t,gt,p),e(gt,Fn),e(Fn,np),g(Gr,np,null),e(gt,l2),e(gt,op),e(op,d2),m(t,Nu,p),m(t,M,p),g(Qr,M,null),e(M,c2),e(M,Ae),e(Ae,p2),e(Ae,wl),e(wl,m2),e(Ae,f2),e(Ae,Tl),e(Tl,h2),e(Ae,u2),e(Ae,rp),e(rp,g2),e(Ae,_2),e(M,b2),g(Ln,M,null),e(M,v2),e(M,On),g(Rr,On,null),e(On,w2),e(On,Vr),e(Vr,T2),e(Vr,sp),e(sp,k2),e(Vr,P2),e(M,y2),e(M,Un),g(Hr,Un,null),e(Un,x2),e(Un,ap),e(ap,$2),e(M,E2),e(M,Me),g(Wr,Me,null),e(Me,q2),e(Me,ip),e(ip,A2),e(Me,C2),e(Me,_e),e(_e,D2),e(_e,lp),e(lp,z2),e(_e,I2),e(_e,dp),e(dp,j2),e(_e,S2),e(_e,cp),e(cp,M2),e(_e,F2),e(_e,pp),e(pp,L2),e(_e,O2),e(M,U2),e(M,Nn),g(Zr,Nn,null),e(Nn,N2),e(Nn,Ce),e(Ce,G2),e(Ce,mp),e(mp,Q2),e(Ce,R2),e(Ce,fp),e(fp,V2),e(Ce,H2),e(Ce,hp),e(hp,W2),e(Ce,Z2),m(t,Gu,p),m(t,G,p),g(Br,G,null),e(G,B2),e(G,up),e(up,Y2),e(G,X2),e(G,_t),e(_t,K2),e(_t,kl),e(kl,J2),e(_t,eT),e(_t,gp),e(gp,tT),e(_t,nT),e(G,oT),e(G,be),e(be,rT),e(be,_p),e(_p,sT),e(be,aT),e(be,bp),e(bp,iT),e(be,lT),e(be,vp),e(vp,dT),e(be,cT),e(be,Yr),e(Yr,pT),e(be,mT),e(G,fT),g(Gn,G,null),e(G,hT),e(G,Qn),g(Xr,Qn,null),e(Qn,uT),e(Qn,wp),e(wp,gT),m(t,Qu,p),m(t,bt,p),e(bt,Rn),e(Rn,Tp),g(Kr,Tp,null),e(bt,_T),e(bt,kp),e(kp,bT),m(t,Ru,p),m(t,J,p),g(Jr,J,null),e(J,vT),e(J,es),e(es,wT),e(es,Pp),e(Pp,TT),e(es,kT),e(J,PT),e(J,vt),e(vt,yT),e(vt,Pl),e(Pl,xT),e(vt,$T),e(vt,yp),e(yp,ET),e(vt,qT),e(J,AT),e(J,ts),e(ts,CT),e(ts,ns),e(ns,DT),e(ts,zT),e(J,IT),e(J,we),g(os,we,null),e(we,jT),e(we,rs),e(rs,ST),e(rs,xp),e(xp,MT),e(rs,FT),e(we,LT),e(we,$p),e($p,OT),e(we,UT),e(we,De),e(De,Ep),e(Ep,qp),e(qp,NT),e(De,GT),e(De,Ap),e(Ap,Cp),e(Cp,QT),e(De,RT),e(De,Dp),e(Dp,zp),e(zp,VT),e(De,HT),e(De,Ip),e(Ip,jp),e(jp,WT),m(t,Vu,p),m(t,wt,p),e(wt,Vn),e(Vn,Sp),g(ss,Sp,null),e(wt,ZT),e(wt,Mp),e(Mp,BT),m(t,Hu,p),m(t,ee,p),g(as,ee,null),e(ee,YT),e(ee,Fp),e(Fp,XT),e(ee,KT),e(ee,Tt),e(Tt,JT),e(Tt,yl),e(yl,ek),e(Tt,tk),e(Tt,Lp),e(Lp,nk),e(Tt,ok),e(ee,rk),e(ee,is),e(is,sk),e(is,ls),e(ls,ak),e(is,ik),e(ee,lk),e(ee,Hn),g(ds,Hn,null),e(Hn,dk),e(Hn,Op),e(Op,ck),m(t,Wu,p),m(t,kt,p),e(kt,Wn),e(Wn,Up),g(cs,Up,null),e(kt,pk),e(kt,Np),e(Np,mk),m(t,Zu,p),m(t,Q,p),g(ps,Q,null),e(Q,fk),e(Q,Pt),e(Pt,hk),e(Pt,Gp),e(Gp,uk),e(Pt,gk),e(Pt,xl),e(xl,_k),e(Pt,bk),e(Q,vk),e(Q,yt),e(yt,wk),e(yt,$l),e($l,Tk),e(yt,kk),e(yt,Qp),e(Qp,Pk),e(yt,yk),e(Q,xk),e(Q,ms),e(ms,$k),e(ms,fs),e(fs,Ek),e(ms,qk),e(Q,Ak),g(Zn,Q,null),e(Q,Ck),e(Q,Bn),g(hs,Bn,null),e(Bn,Dk),e(Bn,Rp),e(Rp,zk),m(t,Bu,p),m(t,xt,p),e(xt,Yn),e(Yn,Vp),g(us,Vp,null),e(xt,Ik),e(xt,Hp),e(Hp,jk),m(t,Yu,p),m(t,te,p),g(gs,te,null),e(te,Sk),e(te,_s),e(_s,Mk),e(_s,Wp),e(Wp,Fk),e(_s,Lk),e(te,Ok),e(te,$t),e($t,Uk),e($t,El),e(El,Nk),e($t,Gk),e($t,Zp),e(Zp,Qk),e($t,Rk),e(te,Vk),e(te,bs),e(bs,Hk),e(bs,vs),e(vs,Wk),e(bs,Zk),e(te,Bk),e(te,Xn),g(ws,Xn,null),e(Xn,Yk),e(Xn,Bp),e(Bp,Xk),m(t,Xu,p),m(t,Et,p),e(Et,Kn),e(Kn,Yp),g(Ts,Yp,null),e(Et,Kk),e(Et,Xp),e(Xp,Jk),m(t,Ku,p),m(t,ne,p),g(ks,ne,null),e(ne,eP),e(ne,Ps),e(Ps,tP),e(Ps,Kp),e(Kp,nP),e(Ps,oP),e(ne,rP),e(ne,qt),e(qt,sP),e(qt,ql),e(ql,aP),e(qt,iP),e(qt,Jp),e(Jp,lP),e(qt,dP),e(ne,cP),e(ne,ys),e(ys,pP),e(ys,xs),e(xs,mP),e(ys,fP),e(ne,hP),e(ne,Jn),g($s,Jn,null),e(Jn,uP),e(Jn,em),e(em,gP),m(t,Ju,p),m(t,At,p),e(At,eo),e(eo,tm),g(Es,tm,null),e(At,_P),e(At,nm),e(nm,bP),m(t,eg,p),m(t,oe,p),g(qs,oe,null),e(oe,vP),e(oe,As),e(As,wP),e(As,om),e(om,TP),e(As,kP),e(oe,PP),e(oe,rm),e(rm,yP),e(oe,xP),e(oe,Cs),e(Cs,$P),e(Cs,Ds),e(Ds,EP),e(Cs,qP),e(oe,AP),e(oe,to),g(zs,to,null),e(to,CP),e(to,sm),e(sm,DP),m(t,tg,p),m(t,Ct,p),e(Ct,no),e(no,am),g(Is,am,null),e(Ct,zP),e(Ct,im),e(im,IP),m(t,ng,p),m(t,S,p),g(js,S,null),e(S,jP),e(S,Dt),e(Dt,SP),e(Dt,lm),e(lm,MP),e(Dt,FP),e(Dt,Al),e(Al,LP),e(Dt,OP),e(S,UP),e(S,zt),e(zt,NP),e(zt,Cl),e(Cl,GP),e(zt,QP),e(zt,dm),e(dm,RP),e(zt,VP),e(S,HP),e(S,Ss),e(Ss,WP),e(Ss,Ms),e(Ms,ZP),e(Ss,BP),e(S,YP),e(S,Fe),g(Fs,Fe,null),e(Fe,XP),e(Fe,cm),e(cm,KP),e(Fe,JP),e(Fe,pm),e(pm,ey),e(S,ty),e(S,oo),g(Ls,oo,null),e(oo,ny),e(oo,mm),e(mm,oy),e(S,ry),e(S,ro),g(Os,ro,null),e(ro,sy),e(ro,fm),e(fm,ay),e(S,iy),e(S,so),g(Us,so,null),e(so,ly),e(so,hm),e(hm,dy),m(t,og,p),m(t,ao,p),e(ao,cy),e(ao,Dl),e(Dl,py),e(ao,my),m(t,rg,p),m(t,It,p),e(It,io),e(io,um),g(Ns,um,null),e(It,fy),e(It,gm),e(gm,hy),m(t,sg,p),m(t,re,p),g(Gs,re,null),e(re,uy),e(re,Qs),e(Qs,gy),e(Qs,_m),e(_m,_y),e(Qs,by),e(re,vy),e(re,jt),e(jt,wy),e(jt,zl),e(zl,Ty),e(jt,ky),e(jt,bm),e(bm,Py),e(jt,yy),e(re,xy),e(re,Rs),e(Rs,$y),e(Rs,Vs),e(Vs,Ey),e(Rs,qy),e(re,Ay),e(re,lo),g(Hs,lo,null),e(lo,Cy),e(lo,vm),e(vm,Dy),m(t,ag,p),m(t,St,p),e(St,co),e(co,wm),g(Ws,wm,null),e(St,zy),e(St,Tm),e(Tm,Iy),m(t,ig,p),m(t,F,p),g(Zs,F,null),e(F,jy),e(F,Mt),e(Mt,Sy),e(Mt,km),e(km,My),e(Mt,Fy),e(Mt,Il),e(Il,Ly),e(Mt,Oy),e(F,Uy),e(F,Ft),e(Ft,Ny),e(Ft,jl),e(jl,Gy),e(Ft,Qy),e(Ft,Pm),e(Pm,Ry),e(Ft,Vy),e(F,Hy),e(F,Bs),e(Bs,Wy),e(Bs,Ys),e(Ys,Zy),e(Bs,By),e(F,Yy),e(F,po),g(Xs,po,null),e(po,Xy),e(po,ym),e(ym,Ky),e(F,Jy),e(F,Le),g(Ks,Le,null),e(Le,e0),e(Le,Lt),e(Lt,t0),e(Lt,xm),e(xm,n0),e(Lt,o0),e(Lt,$m),e($m,r0),e(Lt,s0),e(Le,a0),e(Le,Em),e(Em,i0),e(F,l0),e(F,mo),g(Js,mo,null),e(mo,d0),e(mo,qm),e(qm,c0),m(t,lg,p),m(t,Ot,p),e(Ot,fo),e(fo,Am),g(ea,Am,null),e(Ot,p0),e(Ot,Cm),e(Cm,m0),m(t,dg,p),m(t,R,p),g(ta,R,null),e(R,f0),e(R,Dm),e(Dm,h0),e(R,u0),e(R,Ut),e(Ut,g0),e(Ut,Sl),e(Sl,_0),e(Ut,b0),e(Ut,zm),e(zm,v0),e(Ut,w0),e(R,T0),e(R,L),e(L,k0),e(L,Im),e(Im,P0),e(L,y0),e(L,jm),e(jm,x0),e(L,$0),e(L,Sm),e(Sm,E0),e(L,q0),e(L,Mm),e(Mm,A0),e(L,C0),e(L,Fm),e(Fm,D0),e(L,z0),e(L,Lm),e(Lm,I0),e(L,j0),e(L,na),e(na,S0),e(L,M0),e(R,F0),g(ho,R,null),e(R,L0),e(R,uo),g(oa,uo,null),e(uo,O0),e(uo,Om),e(Om,U0),m(t,cg,p),m(t,Nt,p),e(Nt,go),e(go,Um),g(ra,Um,null),e(Nt,N0),e(Nt,Nm),e(Nm,G0),m(t,pg,p),m(t,se,p),g(sa,se,null),e(se,Q0),e(se,aa),e(aa,R0),e(aa,Gm),e(Gm,V0),e(aa,H0),e(se,W0),e(se,Gt),e(Gt,Z0),e(Gt,Ml),e(Ml,B0),e(Gt,Y0),e(Gt,Qm),e(Qm,X0),e(Gt,K0),e(se,J0),e(se,ia),e(ia,e4),e(ia,la),e(la,t4),e(ia,n4),e(se,o4),e(se,U),g(da,U,null),e(U,r4),e(U,Rm),e(Rm,s4),e(U,a4),e(U,V),e(V,Vm),e(Vm,Hm),e(Hm,i4),e(V,l4),e(V,Wm),e(Wm,Zm),e(Zm,d4),e(V,c4),e(V,Bm),e(Bm,Ym),e(Ym,p4),e(V,m4),e(V,Xm),e(Xm,Km),e(Km,f4),e(V,h4),e(V,Jm),e(Jm,ef),e(ef,u4),e(V,g4),e(V,tf),e(tf,nf),e(nf,_4),e(V,b4),e(V,of),e(of,rf),e(rf,v4),e(U,w4),e(U,ca),e(ca,T4),e(ca,sf),e(sf,k4),e(ca,P4),e(U,y4),g(_o,U,null),e(U,x4),e(U,af),e(af,$4),e(U,E4),g(bo,U,null),m(t,mg,p),m(t,Qt,p),e(Qt,vo),e(vo,lf),g(pa,lf,null),e(Qt,q4),e(Qt,df),e(df,A4),m(t,fg,p),m(t,H,p),g(ma,H,null),e(H,C4),e(H,Rt),e(Rt,D4),e(Rt,cf),e(cf,z4),e(Rt,I4),e(Rt,Fl),e(Fl,j4),e(Rt,S4),e(H,M4),e(H,Vt),e(Vt,F4),e(Vt,Ll),e(Ll,L4),e(Vt,O4),e(Vt,pf),e(pf,U4),e(Vt,N4),e(H,G4),e(H,fa),e(fa,Q4),e(fa,mf),e(mf,R4),e(fa,V4),e(H,H4),e(H,ha),e(ha,W4),e(ha,ua),e(ua,Z4),e(ha,B4),e(H,Y4),e(H,wo),g(ga,wo,null),e(wo,X4),e(wo,ff),e(ff,K4),m(t,hg,p),m(t,Ht,p),e(Ht,To),e(To,hf),g(_a,hf,null),e(Ht,J4),e(Ht,uf),e(uf,ex),m(t,ug,p),m(t,ae,p),g(ba,ae,null),e(ae,tx),e(ae,va),e(va,nx),e(va,gf),e(gf,ox),e(va,rx),e(ae,sx),e(ae,Wt),e(Wt,ax),e(Wt,Ol),e(Ol,ix),e(Wt,lx),e(Wt,_f),e(_f,dx),e(Wt,cx),e(ae,px),e(ae,wa),e(wa,mx),e(wa,Ta),e(Ta,fx),e(wa,hx),e(ae,ux),e(ae,ko),g(ka,ko,null),e(ko,gx),e(ko,bf),e(bf,_x),m(t,gg,p),m(t,Zt,p),e(Zt,Po),e(Po,vf),g(Pa,vf,null),e(Zt,bx),e(Zt,wf),e(wf,vx),m(t,_g,p),m(t,O,p),g(ya,O,null),e(O,wx),e(O,Tf),e(Tf,Tx),e(O,kx),e(O,Bt),e(Bt,Px),e(Bt,Ul),e(Ul,yx),e(Bt,xx),e(Bt,kf),e(kf,$x),e(Bt,Ex),e(O,qx),e(O,xa),e(xa,Ax),e(xa,$a),e($a,Cx),e(xa,Dx),e(O,zx),g(yo,O,null),e(O,Ix),e(O,xo),g(Ea,xo,null),e(xo,jx),e(xo,Pf),e(Pf,Sx),e(O,Mx),e(O,$o),g(qa,$o,null),e($o,Fx),e($o,yf),e(yf,Lx),m(t,bg,p),m(t,Yt,p),e(Yt,Eo),e(Eo,xf),g(Aa,xf,null),e(Yt,Ox),e(Yt,$f),e($f,Ux),m(t,vg,p),m(t,I,p),g(Ca,I,null),e(I,Nx),e(I,Xt),e(Xt,Gx),e(Xt,Ef),e(Ef,Qx),e(Xt,Rx),e(Xt,Nl),e(Nl,Vx),e(Xt,Hx),e(I,Wx),e(I,Kt),e(Kt,Zx),e(Kt,Gl),e(Gl,Bx),e(Kt,Yx),e(Kt,qf),e(qf,Xx),e(Kt,Kx),e(I,Jx),e(I,Da),e(Da,e$),e(Da,za),e(za,t$),e(Da,n$),e(I,o$),e(I,qo),g(Ia,qo,null),e(qo,r$),e(qo,Af),e(Af,s$),e(I,a$),e(I,Oe),g(ja,Oe,null),e(Oe,i$),e(Oe,Cf),e(Cf,l$),e(Oe,d$),e(Oe,Df),e(Df,c$),e(I,p$),e(I,Ao),g(Sa,Ao,null),e(Ao,m$),e(Ao,zf),e(zf,f$),e(I,h$),e(I,Co),g(Ma,Co,null),e(Co,u$),e(Co,If),e(If,g$),e(I,_$),e(I,Do),g(Fa,Do,null),e(Do,b$),e(Do,jf),e(jf,v$),m(t,wg,p),m(t,Jt,p),e(Jt,zo),e(zo,Sf),g(La,Sf,null),e(Jt,w$),e(Jt,Mf),e(Mf,T$),m(t,Tg,p),m(t,W,p),g(Oa,W,null),e(W,k$),e(W,Ff),e(Ff,P$),e(W,y$),e(W,en),e(en,x$),e(en,Ql),e(Ql,$$),e(en,E$),e(en,Lf),e(Lf,q$),e(en,A$),e(W,C$),e(W,Ua),e(Ua,D$),e(Ua,Na),e(Na,z$),e(Ua,I$),e(W,j$),g(Io,W,null),e(W,S$),e(W,jo),g(Ga,jo,null),e(jo,M$),e(jo,Of),e(Of,F$),m(t,kg,p),m(t,tn,p),e(tn,So),e(So,Uf),g(Qa,Uf,null),e(tn,L$),e(tn,Nf),e(Nf,O$),m(t,Pg,p),m(t,ie,p),g(Ra,ie,null),e(ie,U$),e(ie,Va),e(Va,N$),e(Va,Gf),e(Gf,G$),e(Va,Q$),e(ie,R$),e(ie,nn),e(nn,V$),e(nn,Rl),e(Rl,H$),e(nn,W$),e(nn,Qf),e(Qf,Z$),e(nn,B$),e(ie,Y$),e(ie,Ha),e(Ha,X$),e(Ha,Wa),e(Wa,K$),e(Ha,J$),e(ie,e9),e(ie,Ue),g(Za,Ue,null),e(Ue,t9),e(Ue,Rf),e(Rf,n9),e(Ue,o9),e(Ue,ze),e(ze,Vf),e(Vf,Hf),e(Hf,r9),e(ze,s9),e(ze,Wf),e(Wf,Zf),e(Zf,a9),e(ze,i9),e(ze,Bf),e(Bf,Yf),e(Yf,l9),e(ze,d9),e(ze,Xf),e(Xf,Kf),e(Kf,c9),m(t,yg,p),m(t,on,p),e(on,Mo),e(Mo,Jf),g(Ba,Jf,null),e(on,p9),e(on,eh),e(eh,m9),m(t,xg,p),m(t,Z,p),g(Ya,Z,null),e(Z,f9),e(Z,Xa),e(Xa,h9),e(Xa,th),e(th,u9),e(Xa,g9),e(Z,_9),e(Z,Ie),e(Ie,b9),e(Ie,nh),e(nh,v9),e(Ie,w9),e(Ie,oh),e(oh,T9),e(Ie,k9),e(Ie,rh),e(rh,P9),e(Ie,y9),e(Z,x9),e(Z,rn),e(rn,$9),e(rn,Vl),e(Vl,E9),e(rn,q9),e(rn,sh),e(sh,A9),e(rn,C9),e(Z,D9),e(Z,Ka),e(Ka,z9),e(Ka,Ja),e(Ja,I9),e(Ka,j9),e(Z,S9),e(Z,Fo),g(ei,Fo,null),e(Fo,M9),e(Fo,ti),e(ti,F9),e(ti,Hl),e(Hl,L9),e(ti,O9),m(t,$g,p),m(t,sn,p),e(sn,Lo),e(Lo,ah),g(ni,ah,null),e(sn,U9),e(sn,ih),e(ih,N9),m(t,Eg,p),m(t,le,p),g(oi,le,null),e(le,G9),e(le,an),e(an,Q9),e(an,lh),e(lh,R9),e(an,V9),e(an,dh),e(dh,H9),e(an,W9),e(le,Z9),e(le,ln),e(ln,B9),e(ln,Wl),e(Wl,Y9),e(ln,X9),e(ln,ch),e(ch,K9),e(ln,J9),e(le,eE),e(le,ri),e(ri,tE),e(ri,si),e(si,nE),e(ri,oE),e(le,rE),e(le,Oo),g(ai,Oo,null),e(Oo,sE),e(Oo,ph),e(ph,aE),m(t,qg,p),m(t,dn,p),e(dn,Uo),e(Uo,mh),g(ii,mh,null),e(dn,iE),e(dn,fh),e(fh,lE),m(t,Ag,p),m(t,de,p),g(li,de,null),e(de,dE),e(de,cn),e(cn,cE),e(cn,hh),e(hh,pE),e(cn,mE),e(cn,uh),e(uh,fE),e(cn,hE),e(de,uE),e(de,pn),e(pn,gE),e(pn,Zl),e(Zl,_E),e(pn,bE),e(pn,gh),e(gh,vE),e(pn,wE),e(de,TE),e(de,di),e(di,kE),e(di,ci),e(ci,PE),e(di,yE),e(de,xE),e(de,No),g(pi,No,null),e(No,$E),e(No,_h),e(_h,EE),m(t,Cg,p),m(t,mn,p),e(mn,Go),e(Go,bh),g(mi,bh,null),e(mn,qE),e(mn,Bl),e(Bl,AE),e(Bl,vh),e(vh,CE),m(t,Dg,p),m(t,A,p),g(fi,A,null),e(A,DE),e(A,wh),e(wh,zE),e(A,IE),e(A,Th),e(Th,jE),e(A,SE),e(A,kh),e(kh,ME),e(A,FE),e(A,Ph),e(Ph,LE),e(A,OE),e(A,ve),e(ve,UE),e(ve,Yl),e(Yl,NE),e(ve,GE),e(ve,yh),e(yh,QE),e(ve,RE),e(ve,xh),e(xh,VE),e(ve,HE),e(ve,$h),e($h,WE),e(ve,ZE),e(A,BE),e(A,Qo),g(hi,Qo,null),e(Qo,YE),e(Qo,Eh),e(Eh,XE),e(A,KE),e(A,Ne),g(ui,Ne,null),e(Ne,JE),e(Ne,qh),e(qh,e5),e(Ne,t5),g(Ro,Ne,null),e(A,n5),e(A,Vo),g(gi,Vo,null),e(Vo,o5),e(Vo,Ah),e(Ah,r5),e(A,s5),e(A,Ho),g(_i,Ho,null),e(Ho,a5),e(Ho,bi),e(bi,i5),e(bi,Ch),e(Ch,l5),e(bi,d5),e(A,c5),e(A,Wo),g(vi,Wo,null),e(Wo,p5),e(Wo,wi),e(wi,m5),e(wi,Dh),e(Dh,f5),e(wi,h5),e(A,u5),e(A,Zo),g(Ti,Zo,null),e(Zo,g5),e(Zo,fn),e(fn,_5),e(fn,zh),e(zh,b5),e(fn,v5),e(fn,Ih),e(Ih,w5),e(fn,T5),e(A,k5),e(A,Bo),g(ki,Bo,null),e(Bo,P5),e(Bo,jh),e(jh,y5),e(A,x5),e(A,Yo),g(Pi,Yo,null),e(Yo,$5),e(Yo,yi),e(yi,E5),e(yi,Sh),e(Sh,q5),e(yi,A5),zg=!0},p(t,[p]){const xi={};p&2&&(xi.$$scope={dirty:p,ctx:t}),Pn.$set(xi);const Mh={};p&2&&(Mh.$$scope={dirty:p,ctx:t}),xn.$set(Mh);const Fh={};p&2&&(Fh.$$scope={dirty:p,ctx:t}),Ln.$set(Fh);const Lh={};p&2&&(Lh.$$scope={dirty:p,ctx:t}),Gn.$set(Lh);const $i={};p&2&&($i.$$scope={dirty:p,ctx:t}),Zn.$set($i);const Oh={};p&2&&(Oh.$$scope={dirty:p,ctx:t}),ho.$set(Oh);const Uh={};p&2&&(Uh.$$scope={dirty:p,ctx:t}),_o.$set(Uh);const Ei={};p&2&&(Ei.$$scope={dirty:p,ctx:t}),bo.$set(Ei);const Nh={};p&2&&(Nh.$$scope={dirty:p,ctx:t}),yo.$set(Nh);const qi={};p&2&&(qi.$$scope={dirty:p,ctx:t}),Io.$set(qi);const Gh={};p&2&&(Gh.$$scope={dirty:p,ctx:t}),Ro.$set(Gh)},i(t){zg||(_(f.$$.fragment,t),_(ar.$$.fragment,t),_(ir.$$.fragment,t),_(dr.$$.fragment,t),_(cr.$$.fragment,t),_(pr.$$.fragment,t),_(mr.$$.fragment,t),_(fr.$$.fragment,t),_(Pn.$$.fragment,t),_(_r.$$.fragment,t),_(br.$$.fragment,t),_(xn.$$.fragment,t),_(vr.$$.fragment,t),_(wr.$$.fragment,t),_(Tr.$$.fragment,t),_(kr.$$.fragment,t),_(yr.$$.fragment,t),_(xr.$$.fragment,t),_($r.$$.fragment,t),_(Er.$$.fragment,t),_(qr.$$.fragment,t),_(Ar.$$.fragment,t),_(Cr.$$.fragment,t),_(Dr.$$.fragment,t),_(zr.$$.fragment,t),_(Mr.$$.fragment,t),_(Lr.$$.fragment,t),_(Or.$$.fragment,t),_(Ur.$$.fragment,t),_(Gr.$$.fragment,t),_(Qr.$$.fragment,t),_(Ln.$$.fragment,t),_(Rr.$$.fragment,t),_(Hr.$$.fragment,t),_(Wr.$$.fragment,t),_(Zr.$$.fragment,t),_(Br.$$.fragment,t),_(Gn.$$.fragment,t),_(Xr.$$.fragment,t),_(Kr.$$.fragment,t),_(Jr.$$.fragment,t),_(os.$$.fragment,t),_(ss.$$.fragment,t),_(as.$$.fragment,t),_(ds.$$.fragment,t),_(cs.$$.fragment,t),_(ps.$$.fragment,t),_(Zn.$$.fragment,t),_(hs.$$.fragment,t),_(us.$$.fragment,t),_(gs.$$.fragment,t),_(ws.$$.fragment,t),_(Ts.$$.fragment,t),_(ks.$$.fragment,t),_($s.$$.fragment,t),_(Es.$$.fragment,t),_(qs.$$.fragment,t),_(zs.$$.fragment,t),_(Is.$$.fragment,t),_(js.$$.fragment,t),_(Fs.$$.fragment,t),_(Ls.$$.fragment,t),_(Os.$$.fragment,t),_(Us.$$.fragment,t),_(Ns.$$.fragment,t),_(Gs.$$.fragment,t),_(Hs.$$.fragment,t),_(Ws.$$.fragment,t),_(Zs.$$.fragment,t),_(Xs.$$.fragment,t),_(Ks.$$.fragment,t),_(Js.$$.fragment,t),_(ea.$$.fragment,t),_(ta.$$.fragment,t),_(ho.$$.fragment,t),_(oa.$$.fragment,t),_(ra.$$.fragment,t),_(sa.$$.fragment,t),_(da.$$.fragment,t),_(_o.$$.fragment,t),_(bo.$$.fragment,t),_(pa.$$.fragment,t),_(ma.$$.fragment,t),_(ga.$$.fragment,t),_(_a.$$.fragment,t),_(ba.$$.fragment,t),_(ka.$$.fragment,t),_(Pa.$$.fragment,t),_(ya.$$.fragment,t),_(yo.$$.fragment,t),_(Ea.$$.fragment,t),_(qa.$$.fragment,t),_(Aa.$$.fragment,t),_(Ca.$$.fragment,t),_(Ia.$$.fragment,t),_(ja.$$.fragment,t),_(Sa.$$.fragment,t),_(Ma.$$.fragment,t),_(Fa.$$.fragment,t),_(La.$$.fragment,t),_(Oa.$$.fragment,t),_(Io.$$.fragment,t),_(Ga.$$.fragment,t),_(Qa.$$.fragment,t),_(Ra.$$.fragment,t),_(Za.$$.fragment,t),_(Ba.$$.fragment,t),_(Ya.$$.fragment,t),_(ei.$$.fragment,t),_(ni.$$.fragment,t),_(oi.$$.fragment,t),_(ai.$$.fragment,t),_(ii.$$.fragment,t),_(li.$$.fragment,t),_(pi.$$.fragment,t),_(mi.$$.fragment,t),_(fi.$$.fragment,t),_(hi.$$.fragment,t),_(ui.$$.fragment,t),_(Ro.$$.fragment,t),_(gi.$$.fragment,t),_(_i.$$.fragment,t),_(vi.$$.fragment,t),_(Ti.$$.fragment,t),_(ki.$$.fragment,t),_(Pi.$$.fragment,t),zg=!0)},o(t){b(f.$$.fragment,t),b(ar.$$.fragment,t),b(ir.$$.fragment,t),b(dr.$$.fragment,t),b(cr.$$.fragment,t),b(pr.$$.fragment,t),b(mr.$$.fragment,t),b(fr.$$.fragment,t),b(Pn.$$.fragment,t),b(_r.$$.fragment,t),b(br.$$.fragment,t),b(xn.$$.fragment,t),b(vr.$$.fragment,t),b(wr.$$.fragment,t),b(Tr.$$.fragment,t),b(kr.$$.fragment,t),b(yr.$$.fragment,t),b(xr.$$.fragment,t),b($r.$$.fragment,t),b(Er.$$.fragment,t),b(qr.$$.fragment,t),b(Ar.$$.fragment,t),b(Cr.$$.fragment,t),b(Dr.$$.fragment,t),b(zr.$$.fragment,t),b(Mr.$$.fragment,t),b(Lr.$$.fragment,t),b(Or.$$.fragment,t),b(Ur.$$.fragment,t),b(Gr.$$.fragment,t),b(Qr.$$.fragment,t),b(Ln.$$.fragment,t),b(Rr.$$.fragment,t),b(Hr.$$.fragment,t),b(Wr.$$.fragment,t),b(Zr.$$.fragment,t),b(Br.$$.fragment,t),b(Gn.$$.fragment,t),b(Xr.$$.fragment,t),b(Kr.$$.fragment,t),b(Jr.$$.fragment,t),b(os.$$.fragment,t),b(ss.$$.fragment,t),b(as.$$.fragment,t),b(ds.$$.fragment,t),b(cs.$$.fragment,t),b(ps.$$.fragment,t),b(Zn.$$.fragment,t),b(hs.$$.fragment,t),b(us.$$.fragment,t),b(gs.$$.fragment,t),b(ws.$$.fragment,t),b(Ts.$$.fragment,t),b(ks.$$.fragment,t),b($s.$$.fragment,t),b(Es.$$.fragment,t),b(qs.$$.fragment,t),b(zs.$$.fragment,t),b(Is.$$.fragment,t),b(js.$$.fragment,t),b(Fs.$$.fragment,t),b(Ls.$$.fragment,t),b(Os.$$.fragment,t),b(Us.$$.fragment,t),b(Ns.$$.fragment,t),b(Gs.$$.fragment,t),b(Hs.$$.fragment,t),b(Ws.$$.fragment,t),b(Zs.$$.fragment,t),b(Xs.$$.fragment,t),b(Ks.$$.fragment,t),b(Js.$$.fragment,t),b(ea.$$.fragment,t),b(ta.$$.fragment,t),b(ho.$$.fragment,t),b(oa.$$.fragment,t),b(ra.$$.fragment,t),b(sa.$$.fragment,t),b(da.$$.fragment,t),b(_o.$$.fragment,t),b(bo.$$.fragment,t),b(pa.$$.fragment,t),b(ma.$$.fragment,t),b(ga.$$.fragment,t),b(_a.$$.fragment,t),b(ba.$$.fragment,t),b(ka.$$.fragment,t),b(Pa.$$.fragment,t),b(ya.$$.fragment,t),b(yo.$$.fragment,t),b(Ea.$$.fragment,t),b(qa.$$.fragment,t),b(Aa.$$.fragment,t),b(Ca.$$.fragment,t),b(Ia.$$.fragment,t),b(ja.$$.fragment,t),b(Sa.$$.fragment,t),b(Ma.$$.fragment,t),b(Fa.$$.fragment,t),b(La.$$.fragment,t),b(Oa.$$.fragment,t),b(Io.$$.fragment,t),b(Ga.$$.fragment,t),b(Qa.$$.fragment,t),b(Ra.$$.fragment,t),b(Za.$$.fragment,t),b(Ba.$$.fragment,t),b(Ya.$$.fragment,t),b(ei.$$.fragment,t),b(ni.$$.fragment,t),b(oi.$$.fragment,t),b(ai.$$.fragment,t),b(ii.$$.fragment,t),b(li.$$.fragment,t),b(pi.$$.fragment,t),b(mi.$$.fragment,t),b(fi.$$.fragment,t),b(hi.$$.fragment,t),b(ui.$$.fragment,t),b(Ro.$$.fragment,t),b(gi.$$.fragment,t),b(_i.$$.fragment,t),b(vi.$$.fragment,t),b(Ti.$$.fragment,t),b(ki.$$.fragment,t),b(Pi.$$.fragment,t),zg=!1},d(t){n(T),t&&n($),t&&n(y),v(f),t&&n(Qh),t&&n(gn),t&&n(Rh),t&&n(Di),t&&n(Vh),t&&n(_n),t&&n(Hh),t&&n(st),v(ar),t&&n(Wh),t&&n(vn),t&&n(Zh),t&&n(tl),t&&n(Bh),v(ir,t),t&&n(Yh),t&&n(wn),t&&n(Xh),v(dr,t),t&&n(Kh),t&&n(Tn),t&&n(Jh),v(cr,t),t&&n(eu),t&&n(kn),t&&n(tu),v(pr,t),t&&n(nu),t&&n(nl),t&&n(ou),v(mr,t),t&&n(ru),t&&n(X),v(fr),v(Pn),t&&n(su),t&&n(it),v(_r),t&&n(au),t&&n(je),t&&n(iu),v(br,t),t&&n(lu),v(xn,t),t&&n(du),v(vr,t),t&&n(cu),v(wr,t),t&&n(pu),t&&n(al),t&&n(mu),v(Tr,t),t&&n(fu),t&&n($n),t&&n(hu),v(kr,t),t&&n(uu),t&&n(il),t&&n(gu),t&&n(ll),t&&n(_u),t&&n(ce),t&&n(bu),t&&n(dt),v(yr),t&&n(vu),t&&n(qe),t&&n(wu),t&&n(Se),t&&n(Tu),v(xr,t),t&&n(ku),t&&n(dl),t&&n(Pu),v($r,t),t&&n(yu),t&&n(cl),t&&n(xu),t&&n(qn),t&&n($u),t&&n(ct),v(Er),t&&n(Eu),t&&n(pl),t&&n(qu),t&&n(Cn),t&&n(Au),t&&n(ml),t&&n(Cu),t&&n(fl),t&&n(Du),v(qr,t),t&&n(zu),t&&n(hl),t&&n(Iu),t&&n(pt),v(Ar),t&&n(ju),t&&n(ul),t&&n(Su),t&&n(mt),v(Cr),t&&n(Mu),t&&n(ft),v(Dr),t&&n(Fu),t&&n(K),v(zr),v(Mr),t&&n(Lu),t&&n(ut),v(Lr),t&&n(Ou),t&&n(ge),v(Or),v(Ur),t&&n(Uu),t&&n(gt),v(Gr),t&&n(Nu),t&&n(M),v(Qr),v(Ln),v(Rr),v(Hr),v(Wr),v(Zr),t&&n(Gu),t&&n(G),v(Br),v(Gn),v(Xr),t&&n(Qu),t&&n(bt),v(Kr),t&&n(Ru),t&&n(J),v(Jr),v(os),t&&n(Vu),t&&n(wt),v(ss),t&&n(Hu),t&&n(ee),v(as),v(ds),t&&n(Wu),t&&n(kt),v(cs),t&&n(Zu),t&&n(Q),v(ps),v(Zn),v(hs),t&&n(Bu),t&&n(xt),v(us),t&&n(Yu),t&&n(te),v(gs),v(ws),t&&n(Xu),t&&n(Et),v(Ts),t&&n(Ku),t&&n(ne),v(ks),v($s),t&&n(Ju),t&&n(At),v(Es),t&&n(eg),t&&n(oe),v(qs),v(zs),t&&n(tg),t&&n(Ct),v(Is),t&&n(ng),t&&n(S),v(js),v(Fs),v(Ls),v(Os),v(Us),t&&n(og),t&&n(ao),t&&n(rg),t&&n(It),v(Ns),t&&n(sg),t&&n(re),v(Gs),v(Hs),t&&n(ag),t&&n(St),v(Ws),t&&n(ig),t&&n(F),v(Zs),v(Xs),v(Ks),v(Js),t&&n(lg),t&&n(Ot),v(ea),t&&n(dg),t&&n(R),v(ta),v(ho),v(oa),t&&n(cg),t&&n(Nt),v(ra),t&&n(pg),t&&n(se),v(sa),v(da),v(_o),v(bo),t&&n(mg),t&&n(Qt),v(pa),t&&n(fg),t&&n(H),v(ma),v(ga),t&&n(hg),t&&n(Ht),v(_a),t&&n(ug),t&&n(ae),v(ba),v(ka),t&&n(gg),t&&n(Zt),v(Pa),t&&n(_g),t&&n(O),v(ya),v(yo),v(Ea),v(qa),t&&n(bg),t&&n(Yt),v(Aa),t&&n(vg),t&&n(I),v(Ca),v(Ia),v(ja),v(Sa),v(Ma),v(Fa),t&&n(wg),t&&n(Jt),v(La),t&&n(Tg),t&&n(W),v(Oa),v(Io),v(Ga),t&&n(kg),t&&n(tn),v(Qa),t&&n(Pg),t&&n(ie),v(Ra),v(Za),t&&n(yg),t&&n(on),v(Ba),t&&n(xg),t&&n(Z),v(Ya),v(ei),t&&n($g),t&&n(sn),v(ni),t&&n(Eg),t&&n(le),v(oi),v(ai),t&&n(qg),t&&n(dn),v(ii),t&&n(Ag),t&&n(de),v(li),v(pi),t&&n(Cg),t&&n(mn),v(mi),t&&n(Dg),t&&n(A),v(fi),v(hi),v(ui),v(Ro),v(gi),v(_i),v(vi),v(Ti),v(ki),v(Pi)}}}const l7={local:"pipelines",sections:[{local:"transformers.pipeline",title:"The pipeline abstraction"},{local:"pipeline-batching",title:"Pipeline batching"},{local:"pipeline-chunk-batching",title:"Pipeline chunk batching"},{local:"pipeline-custom-code",title:"Pipeline custom code"},{local:"implementing-a-pipeline",title:"Implementing a pipeline"},{local:"the-task-specific-pipelines",sections:[{local:"transformers.AudioClassificationPipeline",title:"AudioClassificationPipeline"},{local:"transformers.AutomaticSpeechRecognitionPipeline",title:"AutomaticSpeechRecognitionPipeline"},{local:"transformers.Conversation",title:"ConversationalPipeline"},{local:"transformers.DocumentQuestionAnsweringPipeline",title:"DocumentQuestionAnsweringPipeline"},{local:"transformers.FeatureExtractionPipeline",title:"FeatureExtractionPipeline"},{local:"transformers.FillMaskPipeline",title:"FillMaskPipeline"},{local:"transformers.ImageClassificationPipeline",title:"ImageClassificationPipeline"},{local:"transformers.ImageSegmentationPipeline",title:"ImageSegmentationPipeline"},{local:"transformers.ImageToTextPipeline",title:"ImageToTextPipeline"},{local:"transformers.TokenClassificationPipeline",title:"NerPipeline"},{local:"transformers.ObjectDetectionPipeline",title:"ObjectDetectionPipeline"},{local:"transformers.QuestionAnsweringPipeline",title:"QuestionAnsweringPipeline"},{local:"transformers.SummarizationPipeline",title:"SummarizationPipeline"},{local:"transformers.TableQuestionAnsweringPipeline",title:"TableQuestionAnsweringPipeline"},{local:"transformers.TextClassificationPipeline",title:"TextClassificationPipeline"},{local:"transformers.TextGenerationPipeline",title:"TextGenerationPipeline"},{local:"transformers.Text2TextGenerationPipeline",title:"Text2TextGenerationPipeline"},{local:"transformers.TokenClassificationPipeline",title:"TokenClassificationPipeline"},{local:"transformers.TranslationPipeline",title:"TranslationPipeline"},{local:"transformers.VisualQuestionAnsweringPipeline",title:"VisualQuestionAnsweringPipeline"},{local:"transformers.ZeroShotClassificationPipeline",title:"ZeroShotClassificationPipeline"},{local:"transformers.ZeroShotImageClassificationPipeline",title:"ZeroShotImageClassificationPipeline"},{local:"transformers.ZeroShotObjectDetectionPipeline",title:"ZeroShotObjectDetectionPipeline"}],title:"The task specific pipelines"},{local:"transformers.Pipeline",title:"Parent class: `Pipeline`"}],title:"Pipelines"};function d7(z){return BD(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class g7 extends VD{constructor(T){super();HD(this,T,d7,i7,WD,{})}}export{g7 as default,l7 as metadata};
21
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/main_classes/processors.mdx-hf-doc-builder.js
import{S as cp,i as fp,s as mp,e as s,k as i,w as g,t as n,M as up,c as a,d as r,m as d,a as o,x as v,h as l,b as f,G as t,g as c,y as $,q as b,o as x,B as E,v as hp,L as vi}from"../../chunks/vendor-hf-doc-builder.js";import{T as _i}from"../../chunks/Tip-hf-doc-builder.js";import{D as I}from"../../chunks/Docstring-hf-doc-builder.js";import{C as As}from"../../chunks/CodeBlock-hf-doc-builder.js";import{I as Oe}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{E as gi}from"../../chunks/ExampleCodeBlock-hf-doc-builder.js";function _p(z){let u,y,_,h,P,m,w,V;return{c(){u=s("p"),y=n(`This class method is simply calling the feature extractor `),_=s("a"),h=n("from_pretrained()"),P=n(` and the tokenizer `),m=s("code"),w=n("~tokenization_utils_base.PreTrainedTokenizer.from_pretrained"),V=n(` methods. Please refer to the docstrings of the methods above for more information.`),this.h()},l(T){u=a(T,"P",{});var q=o(u);y=l(q,`This class method is simply calling the feature extractor `),_=a(q,"A",{href:!0});var C=o(_);h=l(C,"from_pretrained()"),C.forEach(r),P=l(q,` and the tokenizer `),m=a(q,"CODE",{});var K=o(m);w=l(K,"~tokenization_utils_base.PreTrainedTokenizer.from_pretrained"),K.forEach(r),V=l(q,` methods. Please refer to the docstrings of the methods above for more information.`),q.forEach(r),this.h()},h(){f(_,"href","/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.from_pretrained")},m(T,q){c(T,u,q),t(u,y),t(u,_),t(_,h),t(u,P),t(u,m),t(m,w),t(u,V)},d(T){T&&r(u)}}}function gp(z){let u,y,_,h,P;return h=new As({props:{code:`from transformers import AutoProcessor processor = AutoProcessor.from_pretrained("bert-base-cased") # Push the processor to your namespace with the name "my-finetuned-bert". processor.push_to_hub("my-finetuned-bert") # Push the processor to an organization with the name "my-finetuned-bert". processor.push_to_hub("huggingface/my-finetuned-bert")`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoProcessor processor = AutoProcessor.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the processor to your namespace with the name &quot;my-finetuned-bert&quot;.</span> processor.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the processor to an organization with the name &quot;my-finetuned-bert&quot;.</span> processor.push_to_hub(<span class="hljs-string">&quot;huggingface/my-finetuned-bert&quot;</span>)`}}),{c(){u=s("p"),y=n("Examples:"),_=i(),g(h.$$.fragment)},l(m){u=a(m,"P",{});var w=o(u);y=l(w,"Examples:"),w.forEach(r),_=d(m),v(h.$$.fragment,m)},m(m,w){c(m,u,w),t(u,y),c(m,_,w),$(h,m,w),P=!0},p:vi,i(m){P||(b(h.$$.fragment,m),P=!0)},o(m){x(h.$$.fragment,m),P=!1},d(m){m&&r(u),m&&r(_),E(h,m)}}}function vp(z){let u,y;return{c(){u=s("p"),y=n("This API is experimental and may have some slight breaking changes in the next releases.")},l(_){u=a(_,"P",{});var h=o(u);y=l(h,"This API is experimental and may have some slight breaking changes in the next releases."),h.forEach(r)},m(_,h){c(_,u,h),t(u,y)},d(_){_&&r(u)}}}function $p(z){let u,y,_,h,P,m,w,V;return{c(){u=s("p"),y=n("This class method is simply calling "),_=s("a"),h=n("save_pretrained()"),P=n(` and `),m=s("code"),w=n("~tokenization_utils_base.PreTrainedTokenizer.save_pretrained"),V=n(`. Please refer to the docstrings of the methods above for more information.`),this.h()},l(T){u=a(T,"P",{});var q=o(u);y=l(q,"This class method is simply calling "),_=a(q,"A",{href:!0});var C=o(_);h=l(C,"save_pretrained()"),C.forEach(r),P=l(q,` and `),m=a(q,"CODE",{});var K=o(m);w=l(K,"~tokenization_utils_base.PreTrainedTokenizer.save_pretrained"),K.forEach(r),V=l(q,`. Please refer to the docstrings of the methods above for more information.`),q.forEach(r),this.h()},h(){f(_,"href","/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.save_pretrained")},m(T,q){c(T,u,q),t(u,y),t(u,_),t(_,h),t(u,P),t(u,m),t(m,w),t(u,V)},d(T){T&&r(u)}}}function bp(z){let u,y,_,h,P;return h=new As({props:{code:`import tensorflow_datasets as tfds dataset = tfds.load("squad") training_examples = get_examples_from_dataset(dataset, evaluate=False) evaluation_examples = get_examples_from_dataset(dataset, evaluate=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow_datasets <span class="hljs-keyword">as</span> tfds <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = tfds.load(<span class="hljs-string">&quot;squad&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>training_examples = get_examples_from_dataset(dataset, evaluate=<span class="hljs-literal">False</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>evaluation_examples = get_examples_from_dataset(dataset, evaluate=<span class="hljs-literal">True</span>)`}}),{c(){u=s("p"),y=n("Examples:"),_=i(),g(h.$$.fragment)},l(m){u=a(m,"P",{});var w=o(u);y=l(w,"Examples:"),w.forEach(r),_=d(m),v(h.$$.fragment,m)},m(m,w){c(m,u,w),t(u,y),c(m,_,w),$(h,m,w),P=!0},p:vi,i(m){P||(b(h.$$.fragment,m),P=!0)},o(m){x(h.$$.fragment,m),P=!1},d(m){m&&r(u),m&&r(_),E(h,m)}}}function xp(z){let u,y,_,h,P;return h=new As({props:{code:`processor = SquadV2Processor() examples = processor.get_dev_examples(data_dir) features = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=not evaluate, )`,highlighted:`processor = SquadV2Processor() examples = processor.get_dev_examples(data_dir) features = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=<span class="hljs-keyword">not</span> evaluate, )`}}),{c(){u=s("p"),y=n("Example:"),_=i(),g(h.$$.fragment)},l(m){u=a(m,"P",{});var w=o(u);y=l(w,"Example:"),w.forEach(r),_=d(m),v(h.$$.fragment,m)},m(m,w){c(m,u,w),t(u,y),c(m,_,w),$(h,m,w),P=!0},p:vi,i(m){P||(b(h.$$.fragment,m),P=!0)},o(m){x(h.$$.fragment,m),P=!1},d(m){m&&r(u),m&&r(_),E(h,m)}}}function Ep(z){let u,y,_,h,P,m,w,V,T,q,C,K,Ls,ie,Y,Ya,Ot,Za,eo,Qt,to,ro,so,gr,ao,Ds,Z,de,vr,Qe,oo,$r,no,Ts,Ut,lo,Ns,Ft,io,zs,L,Ue,po,br,co,fo,G,Fe,mo,xr,uo,ho,pe,_o,R,Ge,go,Re,vo,Er,$o,bo,xo,ce,Eo,H,He,wo,We,yo,wr,Po,qo,ko,fe,Io,W,Xe,So,Be,Ao,Gt,Lo,Do,To,me,Ms,ee,ue,yr,Je,No,Pr,zo,Vs,M,Mo,Rt,Vo,Co,Ht,jo,Oo,Wt,Qo,Uo,Xt,Fo,Go,Cs,S,Ke,Ro,qr,Ho,Wo,he,Ye,Xo,Ze,Bo,Bt,Jo,Ko,Yo,_e,et,Zo,kr,en,tn,ge,tt,rn,Ir,sn,an,ve,rt,on,st,nn,Jt,ln,dn,pn,$e,at,cn,ot,fn,Kt,mn,un,hn,be,nt,_n,Sr,gn,js,O,lt,vn,Ar,$n,bn,xe,it,xn,Lr,En,Os,Q,dt,wn,Dr,yn,Pn,Ee,pt,qn,Tr,kn,Qs,te,we,Nr,ct,In,zr,Sn,Us,ye,ft,An,Ln,mt,Dn,Fs,Yt,Tn,Gs,Zt,Nn,Rs,k,Mr,Vr,zn,Mn,Cr,jr,Vn,Cn,Or,Qr,jn,On,Ur,Fr,Qn,Un,Gr,Rr,Fn,Gn,Hr,Wr,Rn,Hn,Xr,Br,Wn,Xn,Jr,Kr,Bn,Jn,Yr,Zr,Kn,Hs,Pe,Yn,er,Zn,el,Ws,re,ut,tl,tr,rl,es,sl,Xs,se,qe,ts,ht,al,rs,ol,Bs,ae,_t,nl,ll,gt,ss,il,dl,Js,vt,pl,$t,cl,Ks,rr,fl,Ys,sr,as,os,ml,Zs,ar,ul,ea,ke,hl,bt,_l,gl,ta,oe,Ie,ns,xt,vl,ls,$l,ra,U,Et,bl,xl,wt,El,wl,yt,yl,Pl,sa,or,ql,aa,ne,Se,is,Pt,kl,ds,Il,oa,nr,Sl,na,Ae,ps,cs,Al,Ll,fs,ms,Dl,la,qt,Tl,us,Nl,ia,N,kt,zl,hs,Ml,Vl,Le,It,Cl,_s,jl,Ol,X,St,Ql,At,Ul,gs,Fl,Gl,Rl,De,Hl,Te,Lt,Wl,vs,Xl,da,Ne,Bl,$s,Jl,Kl,pa,F,Dt,Yl,bs,Zl,ei,ze,ca,Me,ti,xs,ri,si,fa,le,Ve,Es,Tt,ai,ws,oi,ma,lr,ni,ua,Nt,ha,Ce,li,ys,ii,di,_a,zt,ga,je,pi,Mt,ci,fi,va;return m=new Oe({}),Qe=new Oe({}),Ue=new I({props:{name:"class transformers.ProcessorMixin",anchor:"transformers.ProcessorMixin",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/processing_utils.py#L43"}}),Fe=new I({props:{name:"from_pretrained",anchor:"transformers.ProcessorMixin.from_pretrained",parameters:[{name:"pretrained_model_name_or_path",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.ProcessorMixin.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; This can be either:</p> <ul> <li>a string, the <em>model id</em> of a pretrained feature_extractor hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>a path to a <em>directory</em> containing a feature extractor file saved using the <a href="/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.save_pretrained">save_pretrained()</a> method, e.g., <code>./my_model_directory/</code>.</li> <li>a path or url to a saved feature extractor JSON <em>file</em>, e.g., <code>./my_model_directory/preprocessor_config.json</code>. **kwargs &#x2014; Additional keyword arguments passed along to both <a href="/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.from_pretrained">from_pretrained()</a> and <code>~tokenization_utils_base.PreTrainedTokenizer.from_pretrained</code>.</li> </ul>`,name:"pretrained_model_name_or_path"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/processing_utils.py#L152"}}),pe=new _i({props:{$$slots:{default:[_p]},$$scope:{ctx:z}}}),Ge=new I({props:{name:"push_to_hub",anchor:"transformers.ProcessorMixin.push_to_hub",parameters:[{name:"repo_id",val:": str"},{name:"use_temp_dir",val:": typing.Optional[bool] = None"},{name:"commit_message",val:": typing.Optional[str] = None"},{name:"private",val:": typing.Optional[bool] = None"},{name:"use_auth_token",val:": typing.Union[bool, str, NoneType] = None"},{name:"max_shard_size",val:": typing.Union[int, str, NoneType] = '10GB'"},{name:"create_pr",val:": bool = False"},{name:"**deprecated_kwargs",val:""}],parametersDescription:[{anchor:"transformers.ProcessorMixin.push_to_hub.repo_id",description:`<strong>repo_id</strong> (<code>str</code>) &#x2014; The name of the repository you want to push your processor to. It should contain your organization name when pushing to a given organization.`,name:"repo_id"},{anchor:"transformers.ProcessorMixin.push_to_hub.use_temp_dir",description:`<strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to use a temporary directory to store the files saved before they are pushed to the Hub. Will default to <code>True</code> if there is no directory named like <code>repo_id</code>, <code>False</code> otherwise.`,name:"use_temp_dir"},{anchor:"transformers.ProcessorMixin.push_to_hub.commit_message",description:`<strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;Upload processor&quot;</code>.`,name:"commit_message"},{anchor:"transformers.ProcessorMixin.push_to_hub.private",description:`<strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).`,name:"private"},{anchor:"transformers.ProcessorMixin.push_to_hub.use_auth_token",description:`<strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>huggingface-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.`,name:"use_auth_token"},{anchor:"transformers.ProcessorMixin.push_to_hub.max_shard_size",description:`<strong>max_shard_size</strong> (<code>int</code> or <code>str</code>, <em>optional</em>, defaults to <code>&quot;10GB&quot;</code>) &#x2014; Only applicable for models. The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like <code>&quot;5MB&quot;</code>).`,name:"max_shard_size"},{anchor:"transformers.ProcessorMixin.push_to_hub.create_pr",description:`<strong>create_pr</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to create a PR with the uploaded files or directly commit.`,name:"create_pr"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/hub.py#L712"}}),ce=new gi({props:{anchor:"transformers.ProcessorMixin.push_to_hub.example",$$slots:{default:[gp]},$$scope:{ctx:z}}}),He=new I({props:{name:"register_for_auto_class",anchor:"transformers.ProcessorMixin.register_for_auto_class",parameters:[{name:"auto_class",val:" = 'AutoProcessor'"}],parametersDescription:[{anchor:"transformers.ProcessorMixin.register_for_auto_class.auto_class",description:`<strong>auto_class</strong> (<code>str</code> or <code>type</code>, <em>optional</em>, defaults to <code>&quot;AutoProcessor&quot;</code>) &#x2014; The auto class to register this new feature extractor with.`,name:"auto_class"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/processing_utils.py#L185"}}),fe=new _i({props:{warning:!0,$$slots:{default:[vp]},$$scope:{ctx:z}}}),Xe=new I({props:{name:"save_pretrained",anchor:"transformers.ProcessorMixin.save_pretrained",parameters:[{name:"save_directory",val:""},{name:"push_to_hub",val:": bool = False"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.ProcessorMixin.save_pretrained.save_directory",description:`<strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Directory where the feature extractor JSON file and the tokenizer files will be saved (directory will be created if it does not exist).`,name:"save_directory"},{anchor:"transformers.ProcessorMixin.save_pretrained.push_to_hub",description:`<strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with <code>repo_id</code> (will default to the name of <code>save_directory</code> in your namespace). kwargs &#x2014; Additional key word arguments passed along to the <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.push_to_hub">push_to_hub()</a> method.`,name:"push_to_hub"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/processing_utils.py#L94"}}),me=new _i({props:{$$slots:{default:[$p]},$$scope:{ctx:z}}}),Je=new Oe({}),Ke=new I({props:{name:"class transformers.DataProcessor",anchor:"transformers.DataProcessor",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/utils.py#L80"}}),Ye=new I({props:{name:"get_dev_examples",anchor:"transformers.DataProcessor.get_dev_examples",parameters:[{name:"data_dir",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/utils.py#L97"}}),et=new I({props:{name:"get_example_from_tensor_dict",anchor:"transformers.DataProcessor.get_example_from_tensor_dict",parameters:[{name:"tensor_dict",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/utils.py#L83"}}),tt=new I({props:{name:"get_labels",anchor:"transformers.DataProcessor.get_labels",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/utils.py#L105"}}),rt=new I({props:{name:"get_test_examples",anchor:"transformers.DataProcessor.get_test_examples",parameters:[{name:"data_dir",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/utils.py#L101"}}),at=new I({props:{name:"get_train_examples",anchor:"transformers.DataProcessor.get_train_examples",parameters:[{name:"data_dir",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/utils.py#L93"}}),nt=new I({props:{name:"tfds_map",anchor:"transformers.DataProcessor.tfds_map",parameters:[{name:"example",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/utils.py#L109"}}),lt=new I({props:{name:"class transformers.InputExample",anchor:"transformers.InputExample",parameters:[{name:"guid",val:": str"},{name:"text_a",val:": str"},{name:"text_b",val:": typing.Optional[str] = None"},{name:"label",val:": typing.Optional[str] = None"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/utils.py#L30"}}),it=new I({props:{name:"to_json_string",anchor:"transformers.InputExample.to_json_string",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/utils.py#L49"}}),dt=new I({props:{name:"class transformers.InputFeatures",anchor:"transformers.InputFeatures",parameters:[{name:"input_ids",val:": typing.List[int]"},{name:"attention_mask",val:": typing.Optional[typing.List[int]] = None"},{name:"token_type_ids",val:": typing.Optional[typing.List[int]] = None"},{name:"label",val:": typing.Union[int, float, NoneType] = None"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/utils.py#L55"}}),pt=new I({props:{name:"to_json_string",anchor:"transformers.InputFeatures.to_json_string",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/utils.py#L75"}}),ct=new Oe({}),ut=new I({props:{name:"transformers.glue_convert_examples_to_features",anchor:"transformers.glue_convert_examples_to_features",parameters:[{name:"examples",val:": typing.Union[typing.List[transformers.data.processors.utils.InputExample], ForwardRef('tf.data.Dataset')]"},{name:"tokenizer",val:": PreTrainedTokenizer"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"task",val:" = None"},{name:"label_list",val:" = None"},{name:"output_mode",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/glue.py#L41",returnDescription:` <p>If the <code>examples</code> input is a <code>tf.data.Dataset</code>, will return a <code>tf.data.Dataset</code> containing the task-specific features. If the input is a list of <code>InputExamples</code>, will return a list of task-specific <code>InputFeatures</code> which can be fed to the model.</p> `}}),ht=new Oe({}),xt=new Oe({}),Pt=new Oe({}),kt=new I({props:{name:"class transformers.data.processors.squad.SquadProcessor",anchor:"transformers.data.processors.squad.SquadProcessor",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/squad.py#L542"}}),It=new I({props:{name:"get_dev_examples",anchor:"transformers.data.processors.squad.SquadProcessor.get_dev_examples",parameters:[{name:"data_dir",val:""},{name:"filename",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/squad.py#L630"}}),St=new I({props:{name:"get_examples_from_dataset",anchor:"transformers.data.processors.squad.SquadProcessor.get_examples_from_dataset",parameters:[{name:"dataset",val:""},{name:"evaluate",val:" = False"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/squad.py#L575",returnDescription:` <p>List of SquadExample</p> `}}),De=new gi({props:{anchor:"transformers.data.processors.squad.SquadProcessor.get_examples_from_dataset.example",$$slots:{default:[bp]},$$scope:{ctx:z}}}),Lt=new I({props:{name:"get_train_examples",anchor:"transformers.data.processors.squad.SquadProcessor.get_train_examples",parameters:[{name:"data_dir",val:""},{name:"filename",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/squad.py#L608"}}),Dt=new I({props:{name:"transformers.squad_convert_examples_to_features",anchor:"transformers.squad_convert_examples_to_features",parameters:[{name:"examples",val:""},{name:"tokenizer",val:""},{name:"max_seq_length",val:""},{name:"doc_stride",val:""},{name:"max_query_length",val:""},{name:"is_training",val:""},{name:"padding_strategy",val:" = 'max_length'"},{name:"return_dataset",val:" = False"},{name:"threads",val:" = 1"},{name:"tqdm_enabled",val:" = True"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/squad.py#L317",returnDescription:` <p>list of <code>SquadFeatures</code></p> `}}),ze=new gi({props:{anchor:"transformers.squad_convert_examples_to_features.example",$$slots:{default:[xp]},$$scope:{ctx:z}}}),Tt=new Oe({}),Nt=new As({props:{code:`# Loading a V2 processor processor = SquadV2Processor() examples = processor.get_dev_examples(squad_v2_data_dir) # Loading a V1 processor processor = SquadV1Processor() examples = processor.get_dev_examples(squad_v1_data_dir) features = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=max_seq_length, doc_stride=args.doc_stride, max_query_length=max_query_length, is_training=not evaluate, )`,highlighted:`<span class="hljs-comment"># Loading a V2 processor</span> processor = SquadV2Processor() examples = processor.get_dev_examples(squad_v2_data_dir) <span class="hljs-comment"># Loading a V1 processor</span> processor = SquadV1Processor() examples = processor.get_dev_examples(squad_v1_data_dir) features = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=max_seq_length, doc_stride=args.doc_stride, max_query_length=max_query_length, is_training=<span class="hljs-keyword">not</span> evaluate, )`}}),zt=new As({props:{code:`# tensorflow_datasets only handle Squad V1. tfds_examples = tfds.load("squad") examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate) features = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=max_seq_length, doc_stride=args.doc_stride, max_query_length=max_query_length, is_training=not evaluate, )`,highlighted:`<span class="hljs-comment"># tensorflow_datasets only handle Squad V1.</span> tfds_examples = tfds.load(<span class="hljs-string">&quot;squad&quot;</span>) examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate) features = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=max_seq_length, doc_stride=args.doc_stride, max_query_length=max_query_length, is_training=<span class="hljs-keyword">not</span> evaluate, )`}}),{c(){u=s("meta"),y=i(),_=s("h1"),h=s("a"),P=s("span"),g(m.$$.fragment),w=i(),V=s("span"),T=n("Processors"),q=i(),C=s("p"),K=n("Processors can mean two different things in the Transformers library:"),Ls=i(),ie=s("ul"),Y=s("li"),Ya=n("the objects that pre-process inputs for multi-modal models such as "),Ot=s("a"),Za=n("Wav2Vec2"),eo=n(` (speech and text) or `),Qt=s("a"),to=n("CLIP"),ro=n(" (text and vision)"),so=i(),gr=s("li"),ao=n("deprecated objects that were used in older versions of the library to preprocess data for GLUE or SQUAD."),Ds=i(),Z=s("h2"),de=s("a"),vr=s("span"),g(Qe.$$.fragment),oo=i(),$r=s("span"),no=n("Multi-modal processors"),Ts=i(),Ut=s("p"),lo=n(`Any multi-modal model will require an object to encode or decode the data that groups several modalities (among text, vision and audio). This is handled by objects called processors, which group tokenizers (for the text modality) and feature extractors (for vision and audio).`),Ns=i(),Ft=s("p"),io=n("Those processors inherit from the following base class that implements the saving and loading functionality:"),zs=i(),L=s("div"),g(Ue.$$.fragment),po=i(),br=s("p"),co=n("This is a mixin used to provide saving/loading functionality for all processor classes."),fo=i(),G=s("div"),g(Fe.$$.fragment),mo=i(),xr=s("p"),uo=n("Instantiate a processor associated with a pretrained model."),ho=i(),g(pe.$$.fragment),_o=i(),R=s("div"),g(Ge.$$.fragment),go=i(),Re=s("p"),vo=n(`Upload the processor files to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),Er=s("code"),$o=n("repo_path_or_name"),bo=n("."),xo=i(),g(ce.$$.fragment),Eo=i(),H=s("div"),g(He.$$.fragment),wo=i(),We=s("p"),yo=n(`Register this class with a given auto class. This should only be used for custom feature extractors as the ones in the library are already mapped with `),wr=s("code"),Po=n("AutoProcessor"),qo=n("."),ko=i(),g(fe.$$.fragment),Io=i(),W=s("div"),g(Xe.$$.fragment),So=i(),Be=s("p"),Ao=n(`Saves the attributes of this processor (feature extractor, tokenizer\u2026) in the specified directory so that it can be reloaded using the `),Gt=s("a"),Lo=n("from_pretrained()"),Do=n(" method."),To=i(),g(me.$$.fragment),Ms=i(),ee=s("h2"),ue=s("a"),yr=s("span"),g(Je.$$.fragment),No=i(),Pr=s("span"),zo=n("Deprecated processors"),Vs=i(),M=s("p"),Mo=n(`All processors follow the same architecture which is that of the `),Rt=s("a"),Vo=n("DataProcessor"),Co=n(`. The processor returns a list of `),Ht=s("a"),jo=n("InputExample"),Oo=n(`. These `),Wt=s("a"),Qo=n("InputExample"),Uo=n(` can be converted to `),Xt=s("a"),Fo=n("InputFeatures"),Go=n(" in order to be fed to the model."),Cs=i(),S=s("div"),g(Ke.$$.fragment),Ro=i(),qr=s("p"),Ho=n("Base class for data converters for sequence classification data sets."),Wo=i(),he=s("div"),g(Ye.$$.fragment),Xo=i(),Ze=s("p"),Bo=n("Gets a collection of "),Bt=s("a"),Jo=n("InputExample"),Ko=n(" for the dev set."),Yo=i(),_e=s("div"),g(et.$$.fragment),Zo=i(),kr=s("p"),en=n("Gets an example from a dict with tensorflow tensors."),tn=i(),ge=s("div"),g(tt.$$.fragment),rn=i(),Ir=s("p"),sn=n("Gets the list of labels for this data set."),an=i(),ve=s("div"),g(rt.$$.fragment),on=i(),st=s("p"),nn=n("Gets a collection of "),Jt=s("a"),ln=n("InputExample"),dn=n(" for the test set."),pn=i(),$e=s("div"),g(at.$$.fragment),cn=i(),ot=s("p"),fn=n("Gets a collection of "),Kt=s("a"),mn=n("InputExample"),un=n(" for the train set."),hn=i(),be=s("div"),g(nt.$$.fragment),_n=i(),Sr=s("p"),gn=n(`Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are. This method converts examples to the correct format.`),js=i(),O=s("div"),g(lt.$$.fragment),vn=i(),Ar=s("p"),$n=n("A single training/test example for simple sequence classification."),bn=i(),xe=s("div"),g(it.$$.fragment),xn=i(),Lr=s("p"),En=n("Serializes this instance to a JSON string."),Os=i(),Q=s("div"),g(dt.$$.fragment),wn=i(),Dr=s("p"),yn=n("A single set of features of data. Property names are the same names as the corresponding inputs to a model."),Pn=i(),Ee=s("div"),g(pt.$$.fragment),qn=i(),Tr=s("p"),kn=n("Serializes this instance to a JSON string."),Qs=i(),te=s("h2"),we=s("a"),Nr=s("span"),g(ct.$$.fragment),In=i(),zr=s("span"),Sn=n("GLUE"),Us=i(),ye=s("p"),ft=s("a"),An=n("General Language Understanding Evaluation (GLUE)"),Ln=n(` is a benchmark that evaluates the performance of models across a diverse set of existing NLU tasks. It was released together with the paper `),mt=s("a"),Dn=n(`GLUE: A multi-task benchmark and analysis platform for natural language understanding`),Fs=i(),Yt=s("p"),Tn=n(`This library hosts a total of 10 processors for the following tasks: MRPC, MNLI, MNLI (mismatched), CoLA, SST2, STSB, QQP, QNLI, RTE and WNLI.`),Gs=i(),Zt=s("p"),Nn=n("Those processors are:"),Rs=i(),k=s("ul"),Mr=s("li"),Vr=s("code"),zn=n("~data.processors.utils.MrpcProcessor"),Mn=i(),Cr=s("li"),jr=s("code"),Vn=n("~data.processors.utils.MnliProcessor"),Cn=i(),Or=s("li"),Qr=s("code"),jn=n("~data.processors.utils.MnliMismatchedProcessor"),On=i(),Ur=s("li"),Fr=s("code"),Qn=n("~data.processors.utils.Sst2Processor"),Un=i(),Gr=s("li"),Rr=s("code"),Fn=n("~data.processors.utils.StsbProcessor"),Gn=i(),Hr=s("li"),Wr=s("code"),Rn=n("~data.processors.utils.QqpProcessor"),Hn=i(),Xr=s("li"),Br=s("code"),Wn=n("~data.processors.utils.QnliProcessor"),Xn=i(),Jr=s("li"),Kr=s("code"),Bn=n("~data.processors.utils.RteProcessor"),Jn=i(),Yr=s("li"),Zr=s("code"),Kn=n("~data.processors.utils.WnliProcessor"),Hs=i(),Pe=s("p"),Yn=n(`Additionally, the following method can be used to load values from a data file and convert them to a list of `),er=s("a"),Zn=n("InputExample"),el=n("."),Ws=i(),re=s("div"),g(ut.$$.fragment),tl=i(),tr=s("p"),rl=n("Loads a data file into a list of "),es=s("code"),sl=n("InputFeatures"),Xs=i(),se=s("h2"),qe=s("a"),ts=s("span"),g(ht.$$.fragment),al=i(),rs=s("span"),ol=n("XNLI"),Bs=i(),ae=s("p"),_t=s("a"),nl=n("The Cross-Lingual NLI Corpus (XNLI)"),ll=n(` is a benchmark that evaluates the quality of cross-lingual text representations. XNLI is crowd-sourced dataset based on `),gt=s("a"),ss=s("em"),il=n("MultiNLI"),dl=n(`: pairs of text are labeled with textual entailment annotations for 15 different languages (including both high-resource language such as English and low-resource languages such as Swahili).`),Js=i(),vt=s("p"),pl=n("It was released together with the paper "),$t=s("a"),cl=n("XNLI: Evaluating Cross-lingual Sentence Representations"),Ks=i(),rr=s("p"),fl=n("This library hosts the processor to load the XNLI data:"),Ys=i(),sr=s("ul"),as=s("li"),os=s("code"),ml=n("~data.processors.utils.XnliProcessor"),Zs=i(),ar=s("p"),ul=n("Please note that since the gold labels are available on the test set, evaluation is performed on the test set."),ea=i(),ke=s("p"),hl=n("An example using these processors is given in the "),bt=s("a"),_l=n("run_xnli.py"),gl=n(" script."),ta=i(),oe=s("h2"),Ie=s("a"),ns=s("span"),g(xt.$$.fragment),vl=i(),ls=s("span"),$l=n("SQuAD"),ra=i(),U=s("p"),Et=s("a"),bl=n("The Stanford Question Answering Dataset (SQuAD)"),xl=n(` is a benchmark that evaluates the performance of models on question answering. Two versions are available, v1.1 and v2.0. The first version (v1.1) was released together with the paper `),wt=s("a"),El=n("SQuAD: 100,000+ Questions for Machine Comprehension of Text"),wl=n(". The second version (v2.0) was released alongside the paper "),yt=s("a"),yl=n(`Know What You Don\u2019t Know: Unanswerable Questions for SQuAD`),Pl=n("."),sa=i(),or=s("p"),ql=n("This library hosts a processor for each of the two versions:"),aa=i(),ne=s("h3"),Se=s("a"),is=s("span"),g(Pt.$$.fragment),kl=i(),ds=s("span"),Il=n("Processors"),oa=i(),nr=s("p"),Sl=n("Those processors are:"),na=i(),Ae=s("ul"),ps=s("li"),cs=s("code"),Al=n("~data.processors.utils.SquadV1Processor"),Ll=i(),fs=s("li"),ms=s("code"),Dl=n("~data.processors.utils.SquadV2Processor"),la=i(),qt=s("p"),Tl=n("They both inherit from the abstract class "),us=s("code"),Nl=n("~data.processors.utils.SquadProcessor"),ia=i(),N=s("div"),g(kt.$$.fragment),zl=i(),hs=s("p"),Ml=n(`Processor for the SQuAD data set. overridden by SquadV1Processor and SquadV2Processor, used by the version 1.1 and version 2.0 of SQuAD, respectively.`),Vl=i(),Le=s("div"),g(It.$$.fragment),Cl=i(),_s=s("p"),jl=n("Returns the evaluation example from the data directory."),Ol=i(),X=s("div"),g(St.$$.fragment),Ql=i(),At=s("p"),Ul=n("Creates a list of "),gs=s("code"),Fl=n("SquadExample"),Gl=n(" using a TFDS dataset."),Rl=i(),g(De.$$.fragment),Hl=i(),Te=s("div"),g(Lt.$$.fragment),Wl=i(),vs=s("p"),Xl=n("Returns the training examples from the data directory."),da=i(),Ne=s("p"),Bl=n(`Additionally, the following method can be used to convert SQuAD examples into `),$s=s("code"),Jl=n("~data.processors.utils.SquadFeatures"),Kl=n(" that can be used as model inputs."),pa=i(),F=s("div"),g(Dt.$$.fragment),Yl=i(),bs=s("p"),Zl=n(`Converts a list of examples into a list of features that can be directly given as input to a model. It is model-dependant and takes advantage of many of the tokenizer\u2019s features to create the model\u2019s inputs.`),ei=i(),g(ze.$$.fragment),ca=i(),Me=s("p"),ti=n(`These processors as well as the aforementionned method can be used with files containing the data as well as with the `),xs=s("em"),ri=n("tensorflow_datasets"),si=n(" package. Examples are given below."),fa=i(),le=s("h3"),Ve=s("a"),Es=s("span"),g(Tt.$$.fragment),ai=i(),ws=s("span"),oi=n("Example usage"),ma=i(),lr=s("p"),ni=n("Here is an example using the processors as well as the conversion method using data files:"),ua=i(),g(Nt.$$.fragment),ha=i(),Ce=s("p"),li=n("Using "),ys=s("em"),ii=n("tensorflow_datasets"),di=n(" is as easy as using a data file:"),_a=i(),g(zt.$$.fragment),ga=i(),je=s("p"),pi=n("Another example using these processors is given in the "),Mt=s("a"),ci=n("run_squad.py"),fi=n(" script."),this.h()},l(e){const p=up('[data-svelte="svelte-1phssyn"]',document.head);u=a(p,"META",{name:!0,content:!0}),p.forEach(r),y=d(e),_=a(e,"H1",{class:!0});var Vt=o(_);h=a(Vt,"A",{id:!0,class:!0,href:!0});var Ps=o(h);P=a(Ps,"SPAN",{});var qs=o(P);v(m.$$.fragment,qs),qs.forEach(r),Ps.forEach(r),w=d(Vt),V=a(Vt,"SPAN",{});var ks=o(V);T=l(ks,"Processors"),ks.forEach(r),Vt.forEach(r),q=d(e),C=a(e,"P",{});var Is=o(C);K=l(Is,"Processors can mean two different things in the Transformers library:"),Is.forEach(r),Ls=d(e),ie=a(e,"UL",{});var Ct=o(ie);Y=a(Ct,"LI",{});var ir=o(Y);Ya=l(ir,"the objects that pre-process inputs for multi-modal models such as "),Ot=a(ir,"A",{href:!0});var $i=o(Ot);Za=l($i,"Wav2Vec2"),$i.forEach(r),eo=l(ir,` (speech and text) or `),Qt=a(ir,"A",{href:!0});var bi=o(Qt);to=l(bi,"CLIP"),bi.forEach(r),ro=l(ir," (text and vision)"),ir.forEach(r),so=d(Ct),gr=a(Ct,"LI",{});var xi=o(gr);ao=l(xi,"deprecated objects that were used in older versions of the library to preprocess data for GLUE or SQUAD."),xi.forEach(r),Ct.forEach(r),Ds=d(e),Z=a(e,"H2",{class:!0});var $a=o(Z);de=a($a,"A",{id:!0,class:!0,href:!0});var Ei=o(de);vr=a(Ei,"SPAN",{});var wi=o(vr);v(Qe.$$.fragment,wi),wi.forEach(r),Ei.forEach(r),oo=d($a),$r=a($a,"SPAN",{});var yi=o($r);no=l(yi,"Multi-modal processors"),yi.forEach(r),$a.forEach(r),Ts=d(e),Ut=a(e,"P",{});var Pi=o(Ut);lo=l(Pi,`Any multi-modal model will require an object to encode or decode the data that groups several modalities (among text, vision and audio). This is handled by objects called processors, which group tokenizers (for the text modality) and feature extractors (for vision and audio).`),Pi.forEach(r),Ns=d(e),Ft=a(e,"P",{});var qi=o(Ft);io=l(qi,"Those processors inherit from the following base class that implements the saving and loading functionality:"),qi.forEach(r),zs=d(e),L=a(e,"DIV",{class:!0});var j=o(L);v(Ue.$$.fragment,j),po=d(j),br=a(j,"P",{});var ki=o(br);co=l(ki,"This is a mixin used to provide saving/loading functionality for all processor classes."),ki.forEach(r),fo=d(j),G=a(j,"DIV",{class:!0});var dr=o(G);v(Fe.$$.fragment,dr),mo=d(dr),xr=a(dr,"P",{});var Ii=o(xr);uo=l(Ii,"Instantiate a processor associated with a pretrained model."),Ii.forEach(r),ho=d(dr),v(pe.$$.fragment,dr),dr.forEach(r),_o=d(j),R=a(j,"DIV",{class:!0});var pr=o(R);v(Ge.$$.fragment,pr),go=d(pr),Re=a(pr,"P",{});var ba=o(Re);vo=l(ba,`Upload the processor files to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),Er=a(ba,"CODE",{});var Si=o(Er);$o=l(Si,"repo_path_or_name"),Si.forEach(r),bo=l(ba,"."),ba.forEach(r),xo=d(pr),v(ce.$$.fragment,pr),pr.forEach(r),Eo=d(j),H=a(j,"DIV",{class:!0});var cr=o(H);v(He.$$.fragment,cr),wo=d(cr),We=a(cr,"P",{});var xa=o(We);yo=l(xa,`Register this class with a given auto class. This should only be used for custom feature extractors as the ones in the library are already mapped with `),wr=a(xa,"CODE",{});var Ai=o(wr);Po=l(Ai,"AutoProcessor"),Ai.forEach(r),qo=l(xa,"."),xa.forEach(r),ko=d(cr),v(fe.$$.fragment,cr),cr.forEach(r),Io=d(j),W=a(j,"DIV",{class:!0});var fr=o(W);v(Xe.$$.fragment,fr),So=d(fr),Be=a(fr,"P",{});var Ea=o(Be);Ao=l(Ea,`Saves the attributes of this processor (feature extractor, tokenizer\u2026) in the specified directory so that it can be reloaded using the `),Gt=a(Ea,"A",{href:!0});var Li=o(Gt);Lo=l(Li,"from_pretrained()"),Li.forEach(r),Do=l(Ea," method."),Ea.forEach(r),To=d(fr),v(me.$$.fragment,fr),fr.forEach(r),j.forEach(r),Ms=d(e),ee=a(e,"H2",{class:!0});var wa=o(ee);ue=a(wa,"A",{id:!0,class:!0,href:!0});var Di=o(ue);yr=a(Di,"SPAN",{});var Ti=o(yr);v(Je.$$.fragment,Ti),Ti.forEach(r),Di.forEach(r),No=d(wa),Pr=a(wa,"SPAN",{});var Ni=o(Pr);zo=l(Ni,"Deprecated processors"),Ni.forEach(r),wa.forEach(r),Vs=d(e),M=a(e,"P",{});var B=o(M);Mo=l(B,`All processors follow the same architecture which is that of the `),Rt=a(B,"A",{href:!0});var zi=o(Rt);Vo=l(zi,"DataProcessor"),zi.forEach(r),Co=l(B,`. The processor returns a list of `),Ht=a(B,"A",{href:!0});var Mi=o(Ht);jo=l(Mi,"InputExample"),Mi.forEach(r),Oo=l(B,`. These `),Wt=a(B,"A",{href:!0});var Vi=o(Wt);Qo=l(Vi,"InputExample"),Vi.forEach(r),Uo=l(B,` can be converted to `),Xt=a(B,"A",{href:!0});var Ci=o(Xt);Fo=l(Ci,"InputFeatures"),Ci.forEach(r),Go=l(B," in order to be fed to the model."),B.forEach(r),Cs=d(e),S=a(e,"DIV",{class:!0});var D=o(S);v(Ke.$$.fragment,D),Ro=d(D),qr=a(D,"P",{});var ji=o(qr);Ho=l(ji,"Base class for data converters for sequence classification data sets."),ji.forEach(r),Wo=d(D),he=a(D,"DIV",{class:!0});var ya=o(he);v(Ye.$$.fragment,ya),Xo=d(ya),Ze=a(ya,"P",{});var Pa=o(Ze);Bo=l(Pa,"Gets a collection of "),Bt=a(Pa,"A",{href:!0});var Oi=o(Bt);Jo=l(Oi,"InputExample"),Oi.forEach(r),Ko=l(Pa," for the dev set."),Pa.forEach(r),ya.forEach(r),Yo=d(D),_e=a(D,"DIV",{class:!0});var qa=o(_e);v(et.$$.fragment,qa),Zo=d(qa),kr=a(qa,"P",{});var Qi=o(kr);en=l(Qi,"Gets an example from a dict with tensorflow tensors."),Qi.forEach(r),qa.forEach(r),tn=d(D),ge=a(D,"DIV",{class:!0});var ka=o(ge);v(tt.$$.fragment,ka),rn=d(ka),Ir=a(ka,"P",{});var Ui=o(Ir);sn=l(Ui,"Gets the list of labels for this data set."),Ui.forEach(r),ka.forEach(r),an=d(D),ve=a(D,"DIV",{class:!0});var Ia=o(ve);v(rt.$$.fragment,Ia),on=d(Ia),st=a(Ia,"P",{});var Sa=o(st);nn=l(Sa,"Gets a collection of "),Jt=a(Sa,"A",{href:!0});var Fi=o(Jt);ln=l(Fi,"InputExample"),Fi.forEach(r),dn=l(Sa," for the test set."),Sa.forEach(r),Ia.forEach(r),pn=d(D),$e=a(D,"DIV",{class:!0});var Aa=o($e);v(at.$$.fragment,Aa),cn=d(Aa),ot=a(Aa,"P",{});var La=o(ot);fn=l(La,"Gets a collection of "),Kt=a(La,"A",{href:!0});var Gi=o(Kt);mn=l(Gi,"InputExample"),Gi.forEach(r),un=l(La," for the train set."),La.forEach(r),Aa.forEach(r),hn=d(D),be=a(D,"DIV",{class:!0});var Da=o(be);v(nt.$$.fragment,Da),_n=d(Da),Sr=a(Da,"P",{});var Ri=o(Sr);gn=l(Ri,`Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are. This method converts examples to the correct format.`),Ri.forEach(r),Da.forEach(r),D.forEach(r),js=d(e),O=a(e,"DIV",{class:!0});var mr=o(O);v(lt.$$.fragment,mr),vn=d(mr),Ar=a(mr,"P",{});var Hi=o(Ar);$n=l(Hi,"A single training/test example for simple sequence classification."),Hi.forEach(r),bn=d(mr),xe=a(mr,"DIV",{class:!0});var Ta=o(xe);v(it.$$.fragment,Ta),xn=d(Ta),Lr=a(Ta,"P",{});var Wi=o(Lr);En=l(Wi,"Serializes this instance to a JSON string."),Wi.forEach(r),Ta.forEach(r),mr.forEach(r),Os=d(e),Q=a(e,"DIV",{class:!0});var ur=o(Q);v(dt.$$.fragment,ur),wn=d(ur),Dr=a(ur,"P",{});var Xi=o(Dr);yn=l(Xi,"A single set of features of data. Property names are the same names as the corresponding inputs to a model."),Xi.forEach(r),Pn=d(ur),Ee=a(ur,"DIV",{class:!0});var Na=o(Ee);v(pt.$$.fragment,Na),qn=d(Na),Tr=a(Na,"P",{});var Bi=o(Tr);kn=l(Bi,"Serializes this instance to a JSON string."),Bi.forEach(r),Na.forEach(r),ur.forEach(r),Qs=d(e),te=a(e,"H2",{class:!0});var za=o(te);we=a(za,"A",{id:!0,class:!0,href:!0});var Ji=o(we);Nr=a(Ji,"SPAN",{});var Ki=o(Nr);v(ct.$$.fragment,Ki),Ki.forEach(r),Ji.forEach(r),In=d(za),zr=a(za,"SPAN",{});var Yi=o(zr);Sn=l(Yi,"GLUE"),Yi.forEach(r),za.forEach(r),Us=d(e),ye=a(e,"P",{});var Ma=o(ye);ft=a(Ma,"A",{href:!0,rel:!0});var Zi=o(ft);An=l(Zi,"General Language Understanding Evaluation (GLUE)"),Zi.forEach(r),Ln=l(Ma,` is a benchmark that evaluates the performance of models across a diverse set of existing NLU tasks. It was released together with the paper `),mt=a(Ma,"A",{href:!0,rel:!0});var ed=o(mt);Dn=l(ed,`GLUE: A multi-task benchmark and analysis platform for natural language understanding`),ed.forEach(r),Ma.forEach(r),Fs=d(e),Yt=a(e,"P",{});var td=o(Yt);Tn=l(td,`This library hosts a total of 10 processors for the following tasks: MRPC, MNLI, MNLI (mismatched), CoLA, SST2, STSB, QQP, QNLI, RTE and WNLI.`),td.forEach(r),Gs=d(e),Zt=a(e,"P",{});var rd=o(Zt);Nn=l(rd,"Those processors are:"),rd.forEach(r),Rs=d(e),k=a(e,"UL",{});var A=o(k);Mr=a(A,"LI",{});var sd=o(Mr);Vr=a(sd,"CODE",{});var ad=o(Vr);zn=l(ad,"~data.processors.utils.MrpcProcessor"),ad.forEach(r),sd.forEach(r),Mn=d(A),Cr=a(A,"LI",{});var od=o(Cr);jr=a(od,"CODE",{});var nd=o(jr);Vn=l(nd,"~data.processors.utils.MnliProcessor"),nd.forEach(r),od.forEach(r),Cn=d(A),Or=a(A,"LI",{});var ld=o(Or);Qr=a(ld,"CODE",{});var id=o(Qr);jn=l(id,"~data.processors.utils.MnliMismatchedProcessor"),id.forEach(r),ld.forEach(r),On=d(A),Ur=a(A,"LI",{});var dd=o(Ur);Fr=a(dd,"CODE",{});var pd=o(Fr);Qn=l(pd,"~data.processors.utils.Sst2Processor"),pd.forEach(r),dd.forEach(r),Un=d(A),Gr=a(A,"LI",{});var cd=o(Gr);Rr=a(cd,"CODE",{});var fd=o(Rr);Fn=l(fd,"~data.processors.utils.StsbProcessor"),fd.forEach(r),cd.forEach(r),Gn=d(A),Hr=a(A,"LI",{});var md=o(Hr);Wr=a(md,"CODE",{});var ud=o(Wr);Rn=l(ud,"~data.processors.utils.QqpProcessor"),ud.forEach(r),md.forEach(r),Hn=d(A),Xr=a(A,"LI",{});var hd=o(Xr);Br=a(hd,"CODE",{});var _d=o(Br);Wn=l(_d,"~data.processors.utils.QnliProcessor"),_d.forEach(r),hd.forEach(r),Xn=d(A),Jr=a(A,"LI",{});var gd=o(Jr);Kr=a(gd,"CODE",{});var vd=o(Kr);Bn=l(vd,"~data.processors.utils.RteProcessor"),vd.forEach(r),gd.forEach(r),Jn=d(A),Yr=a(A,"LI",{});var $d=o(Yr);Zr=a($d,"CODE",{});var bd=o(Zr);Kn=l(bd,"~data.processors.utils.WnliProcessor"),bd.forEach(r),$d.forEach(r),A.forEach(r),Hs=d(e),Pe=a(e,"P",{});var Va=o(Pe);Yn=l(Va,`Additionally, the following method can be used to load values from a data file and convert them to a list of `),er=a(Va,"A",{href:!0});var xd=o(er);Zn=l(xd,"InputExample"),xd.forEach(r),el=l(Va,"."),Va.forEach(r),Ws=d(e),re=a(e,"DIV",{class:!0});var Ca=o(re);v(ut.$$.fragment,Ca),tl=d(Ca),tr=a(Ca,"P",{});var mi=o(tr);rl=l(mi,"Loads a data file into a list of "),es=a(mi,"CODE",{});var Ed=o(es);sl=l(Ed,"InputFeatures"),Ed.forEach(r),mi.forEach(r),Ca.forEach(r),Xs=d(e),se=a(e,"H2",{class:!0});var ja=o(se);qe=a(ja,"A",{id:!0,class:!0,href:!0});var wd=o(qe);ts=a(wd,"SPAN",{});var yd=o(ts);v(ht.$$.fragment,yd),yd.forEach(r),wd.forEach(r),al=d(ja),rs=a(ja,"SPAN",{});var Pd=o(rs);ol=l(Pd,"XNLI"),Pd.forEach(r),ja.forEach(r),Bs=d(e),ae=a(e,"P",{});var Ss=o(ae);_t=a(Ss,"A",{href:!0,rel:!0});var qd=o(_t);nl=l(qd,"The Cross-Lingual NLI Corpus (XNLI)"),qd.forEach(r),ll=l(Ss,` is a benchmark that evaluates the quality of cross-lingual text representations. XNLI is crowd-sourced dataset based on `),gt=a(Ss,"A",{href:!0,rel:!0});var kd=o(gt);ss=a(kd,"EM",{});var Id=o(ss);il=l(Id,"MultiNLI"),Id.forEach(r),kd.forEach(r),dl=l(Ss,`: pairs of text are labeled with textual entailment annotations for 15 different languages (including both high-resource language such as English and low-resource languages such as Swahili).`),Ss.forEach(r),Js=d(e),vt=a(e,"P",{});var ui=o(vt);pl=l(ui,"It was released together with the paper "),$t=a(ui,"A",{href:!0,rel:!0});var Sd=o($t);cl=l(Sd,"XNLI: Evaluating Cross-lingual Sentence Representations"),Sd.forEach(r),ui.forEach(r),Ks=d(e),rr=a(e,"P",{});var Ad=o(rr);fl=l(Ad,"This library hosts the processor to load the XNLI data:"),Ad.forEach(r),Ys=d(e),sr=a(e,"UL",{});var Ld=o(sr);as=a(Ld,"LI",{});var Dd=o(as);os=a(Dd,"CODE",{});var Td=o(os);ml=l(Td,"~data.processors.utils.XnliProcessor"),Td.forEach(r),Dd.forEach(r),Ld.forEach(r),Zs=d(e),ar=a(e,"P",{});var Nd=o(ar);ul=l(Nd,"Please note that since the gold labels are available on the test set, evaluation is performed on the test set."),Nd.forEach(r),ea=d(e),ke=a(e,"P",{});var Oa=o(ke);hl=l(Oa,"An example using these processors is given in the "),bt=a(Oa,"A",{href:!0,rel:!0});var zd=o(bt);_l=l(zd,"run_xnli.py"),zd.forEach(r),gl=l(Oa," script."),Oa.forEach(r),ta=d(e),oe=a(e,"H2",{class:!0});var Qa=o(oe);Ie=a(Qa,"A",{id:!0,class:!0,href:!0});var Md=o(Ie);ns=a(Md,"SPAN",{});var Vd=o(ns);v(xt.$$.fragment,Vd),Vd.forEach(r),Md.forEach(r),vl=d(Qa),ls=a(Qa,"SPAN",{});var Cd=o(ls);$l=l(Cd,"SQuAD"),Cd.forEach(r),Qa.forEach(r),ra=d(e),U=a(e,"P",{});var jt=o(U);Et=a(jt,"A",{href:!0,rel:!0});var jd=o(Et);bl=l(jd,"The Stanford Question Answering Dataset (SQuAD)"),jd.forEach(r),xl=l(jt,` is a benchmark that evaluates the performance of models on question answering. Two versions are available, v1.1 and v2.0. The first version (v1.1) was released together with the paper `),wt=a(jt,"A",{href:!0,rel:!0});var Od=o(wt);El=l(Od,"SQuAD: 100,000+ Questions for Machine Comprehension of Text"),Od.forEach(r),wl=l(jt,". The second version (v2.0) was released alongside the paper "),yt=a(jt,"A",{href:!0,rel:!0});var Qd=o(yt);yl=l(Qd,`Know What You Don\u2019t Know: Unanswerable Questions for SQuAD`),Qd.forEach(r),Pl=l(jt,"."),jt.forEach(r),sa=d(e),or=a(e,"P",{});var Ud=o(or);ql=l(Ud,"This library hosts a processor for each of the two versions:"),Ud.forEach(r),aa=d(e),ne=a(e,"H3",{class:!0});var Ua=o(ne);Se=a(Ua,"A",{id:!0,class:!0,href:!0});var Fd=o(Se);is=a(Fd,"SPAN",{});var Gd=o(is);v(Pt.$$.fragment,Gd),Gd.forEach(r),Fd.forEach(r),kl=d(Ua),ds=a(Ua,"SPAN",{});var Rd=o(ds);Il=l(Rd,"Processors"),Rd.forEach(r),Ua.forEach(r),oa=d(e),nr=a(e,"P",{});var Hd=o(nr);Sl=l(Hd,"Those processors are:"),Hd.forEach(r),na=d(e),Ae=a(e,"UL",{});var Fa=o(Ae);ps=a(Fa,"LI",{});var Wd=o(ps);cs=a(Wd,"CODE",{});var Xd=o(cs);Al=l(Xd,"~data.processors.utils.SquadV1Processor"),Xd.forEach(r),Wd.forEach(r),Ll=d(Fa),fs=a(Fa,"LI",{});var Bd=o(fs);ms=a(Bd,"CODE",{});var Jd=o(ms);Dl=l(Jd,"~data.processors.utils.SquadV2Processor"),Jd.forEach(r),Bd.forEach(r),Fa.forEach(r),la=d(e),qt=a(e,"P",{});var hi=o(qt);Tl=l(hi,"They both inherit from the abstract class "),us=a(hi,"CODE",{});var Kd=o(us);Nl=l(Kd,"~data.processors.utils.SquadProcessor"),Kd.forEach(r),hi.forEach(r),ia=d(e),N=a(e,"DIV",{class:!0});var J=o(N);v(kt.$$.fragment,J),zl=d(J),hs=a(J,"P",{});var Yd=o(hs);Ml=l(Yd,`Processor for the SQuAD data set. overridden by SquadV1Processor and SquadV2Processor, used by the version 1.1 and version 2.0 of SQuAD, respectively.`),Yd.forEach(r),Vl=d(J),Le=a(J,"DIV",{class:!0});var Ga=o(Le);v(It.$$.fragment,Ga),Cl=d(Ga),_s=a(Ga,"P",{});var Zd=o(_s);jl=l(Zd,"Returns the evaluation example from the data directory."),Zd.forEach(r),Ga.forEach(r),Ol=d(J),X=a(J,"DIV",{class:!0});var hr=o(X);v(St.$$.fragment,hr),Ql=d(hr),At=a(hr,"P",{});var Ra=o(At);Ul=l(Ra,"Creates a list of "),gs=a(Ra,"CODE",{});var ep=o(gs);Fl=l(ep,"SquadExample"),ep.forEach(r),Gl=l(Ra," using a TFDS dataset."),Ra.forEach(r),Rl=d(hr),v(De.$$.fragment,hr),hr.forEach(r),Hl=d(J),Te=a(J,"DIV",{class:!0});var Ha=o(Te);v(Lt.$$.fragment,Ha),Wl=d(Ha),vs=a(Ha,"P",{});var tp=o(vs);Xl=l(tp,"Returns the training examples from the data directory."),tp.forEach(r),Ha.forEach(r),J.forEach(r),da=d(e),Ne=a(e,"P",{});var Wa=o(Ne);Bl=l(Wa,`Additionally, the following method can be used to convert SQuAD examples into `),$s=a(Wa,"CODE",{});var rp=o($s);Jl=l(rp,"~data.processors.utils.SquadFeatures"),rp.forEach(r),Kl=l(Wa," that can be used as model inputs."),Wa.forEach(r),pa=d(e),F=a(e,"DIV",{class:!0});var _r=o(F);v(Dt.$$.fragment,_r),Yl=d(_r),bs=a(_r,"P",{});var sp=o(bs);Zl=l(sp,`Converts a list of examples into a list of features that can be directly given as input to a model. It is model-dependant and takes advantage of many of the tokenizer\u2019s features to create the model\u2019s inputs.`),sp.forEach(r),ei=d(_r),v(ze.$$.fragment,_r),_r.forEach(r),ca=d(e),Me=a(e,"P",{});var Xa=o(Me);ti=l(Xa,`These processors as well as the aforementionned method can be used with files containing the data as well as with the `),xs=a(Xa,"EM",{});var ap=o(xs);ri=l(ap,"tensorflow_datasets"),ap.forEach(r),si=l(Xa," package. Examples are given below."),Xa.forEach(r),fa=d(e),le=a(e,"H3",{class:!0});var Ba=o(le);Ve=a(Ba,"A",{id:!0,class:!0,href:!0});var op=o(Ve);Es=a(op,"SPAN",{});var np=o(Es);v(Tt.$$.fragment,np),np.forEach(r),op.forEach(r),ai=d(Ba),ws=a(Ba,"SPAN",{});var lp=o(ws);oi=l(lp,"Example usage"),lp.forEach(r),Ba.forEach(r),ma=d(e),lr=a(e,"P",{});var ip=o(lr);ni=l(ip,"Here is an example using the processors as well as the conversion method using data files:"),ip.forEach(r),ua=d(e),v(Nt.$$.fragment,e),ha=d(e),Ce=a(e,"P",{});var Ja=o(Ce);li=l(Ja,"Using "),ys=a(Ja,"EM",{});var dp=o(ys);ii=l(dp,"tensorflow_datasets"),dp.forEach(r),di=l(Ja," is as easy as using a data file:"),Ja.forEach(r),_a=d(e),v(zt.$$.fragment,e),ga=d(e),je=a(e,"P",{});var Ka=o(je);pi=l(Ka,"Another example using these processors is given in the "),Mt=a(Ka,"A",{href:!0,rel:!0});var pp=o(Mt);ci=l(pp,"run_squad.py"),pp.forEach(r),fi=l(Ka," script."),Ka.forEach(r),this.h()},h(){f(u,"name","hf:doc:metadata"),f(u,"content",JSON.stringify(wp)),f(h,"id","processors"),f(h,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(h,"href","#processors"),f(_,"class","relative group"),f(Ot,"href","../model_doc/wav2vec2"),f(Qt,"href","../model_doc/clip"),f(de,"id","transformers.ProcessorMixin"),f(de,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(de,"href","#transformers.ProcessorMixin"),f(Z,"class","relative group"),f(G,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(R,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(H,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(Gt,"href","/docs/transformers/pr_19429/en/model_doc/trocr#transformers.TrOCRProcessor.from_pretrained"),f(W,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(L,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(ue,"id","transformers.DataProcessor"),f(ue,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(ue,"href","#transformers.DataProcessor"),f(ee,"class","relative group"),f(Rt,"href","/docs/transformers/pr_19429/en/main_classes/processors#transformers.DataProcessor"),f(Ht,"href","/docs/transformers/pr_19429/en/main_classes/processors#transformers.InputExample"),f(Wt,"href","/docs/transformers/pr_19429/en/main_classes/processors#transformers.InputExample"),f(Xt,"href","/docs/transformers/pr_19429/en/main_classes/processors#transformers.InputFeatures"),f(Bt,"href","/docs/transformers/pr_19429/en/main_classes/processors#transformers.InputExample"),f(he,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(_e,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(ge,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(Jt,"href","/docs/transformers/pr_19429/en/main_classes/processors#transformers.InputExample"),f(ve,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(Kt,"href","/docs/transformers/pr_19429/en/main_classes/processors#transformers.InputExample"),f($e,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(be,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(S,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(xe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(O,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(Ee,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(Q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(we,"id","transformers.glue_convert_examples_to_features"),f(we,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(we,"href","#transformers.glue_convert_examples_to_features"),f(te,"class","relative group"),f(ft,"href","https://gluebenchmark.com/"),f(ft,"rel","nofollow"),f(mt,"href","https://openreview.net/pdf?id=rJ4km2R5t7"),f(mt,"rel","nofollow"),f(er,"href","/docs/transformers/pr_19429/en/main_classes/processors#transformers.InputExample"),f(re,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(qe,"id","xnli"),f(qe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(qe,"href","#xnli"),f(se,"class","relative group"),f(_t,"href","https://www.nyu.edu/projects/bowman/xnli/"),f(_t,"rel","nofollow"),f(gt,"href","http://www.nyu.edu/projects/bowman/multinli/"),f(gt,"rel","nofollow"),f($t,"href","https://arxiv.org/abs/1809.05053"),f($t,"rel","nofollow"),f(bt,"href","https://github.com/huggingface/transformers/tree/main/examples/legacy/text-classification/run_xnli.py"),f(bt,"rel","nofollow"),f(Ie,"id","squad"),f(Ie,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Ie,"href","#squad"),f(oe,"class","relative group"),f(Et,"href","https://rajpurkar.github.io/SQuAD-explorer//"),f(Et,"rel","nofollow"),f(wt,"href","https://arxiv.org/abs/1606.05250"),f(wt,"rel","nofollow"),f(yt,"href","https://arxiv.org/abs/1806.03822"),f(yt,"rel","nofollow"),f(Se,"id","transformers.data.processors.squad.SquadProcessor"),f(Se,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Se,"href","#transformers.data.processors.squad.SquadProcessor"),f(ne,"class","relative group"),f(Le,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(X,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(Te,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(N,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(F,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(Ve,"id","example-usage"),f(Ve,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Ve,"href","#example-usage"),f(le,"class","relative group"),f(Mt,"href","https://github.com/huggingface/transformers/tree/main/examples/legacy/question-answering/run_squad.py"),f(Mt,"rel","nofollow")},m(e,p){t(document.head,u),c(e,y,p),c(e,_,p),t(_,h),t(h,P),$(m,P,null),t(_,w),t(_,V),t(V,T),c(e,q,p),c(e,C,p),t(C,K),c(e,Ls,p),c(e,ie,p),t(ie,Y),t(Y,Ya),t(Y,Ot),t(Ot,Za),t(Y,eo),t(Y,Qt),t(Qt,to),t(Y,ro),t(ie,so),t(ie,gr),t(gr,ao),c(e,Ds,p),c(e,Z,p),t(Z,de),t(de,vr),$(Qe,vr,null),t(Z,oo),t(Z,$r),t($r,no),c(e,Ts,p),c(e,Ut,p),t(Ut,lo),c(e,Ns,p),c(e,Ft,p),t(Ft,io),c(e,zs,p),c(e,L,p),$(Ue,L,null),t(L,po),t(L,br),t(br,co),t(L,fo),t(L,G),$(Fe,G,null),t(G,mo),t(G,xr),t(xr,uo),t(G,ho),$(pe,G,null),t(L,_o),t(L,R),$(Ge,R,null),t(R,go),t(R,Re),t(Re,vo),t(Re,Er),t(Er,$o),t(Re,bo),t(R,xo),$(ce,R,null),t(L,Eo),t(L,H),$(He,H,null),t(H,wo),t(H,We),t(We,yo),t(We,wr),t(wr,Po),t(We,qo),t(H,ko),$(fe,H,null),t(L,Io),t(L,W),$(Xe,W,null),t(W,So),t(W,Be),t(Be,Ao),t(Be,Gt),t(Gt,Lo),t(Be,Do),t(W,To),$(me,W,null),c(e,Ms,p),c(e,ee,p),t(ee,ue),t(ue,yr),$(Je,yr,null),t(ee,No),t(ee,Pr),t(Pr,zo),c(e,Vs,p),c(e,M,p),t(M,Mo),t(M,Rt),t(Rt,Vo),t(M,Co),t(M,Ht),t(Ht,jo),t(M,Oo),t(M,Wt),t(Wt,Qo),t(M,Uo),t(M,Xt),t(Xt,Fo),t(M,Go),c(e,Cs,p),c(e,S,p),$(Ke,S,null),t(S,Ro),t(S,qr),t(qr,Ho),t(S,Wo),t(S,he),$(Ye,he,null),t(he,Xo),t(he,Ze),t(Ze,Bo),t(Ze,Bt),t(Bt,Jo),t(Ze,Ko),t(S,Yo),t(S,_e),$(et,_e,null),t(_e,Zo),t(_e,kr),t(kr,en),t(S,tn),t(S,ge),$(tt,ge,null),t(ge,rn),t(ge,Ir),t(Ir,sn),t(S,an),t(S,ve),$(rt,ve,null),t(ve,on),t(ve,st),t(st,nn),t(st,Jt),t(Jt,ln),t(st,dn),t(S,pn),t(S,$e),$(at,$e,null),t($e,cn),t($e,ot),t(ot,fn),t(ot,Kt),t(Kt,mn),t(ot,un),t(S,hn),t(S,be),$(nt,be,null),t(be,_n),t(be,Sr),t(Sr,gn),c(e,js,p),c(e,O,p),$(lt,O,null),t(O,vn),t(O,Ar),t(Ar,$n),t(O,bn),t(O,xe),$(it,xe,null),t(xe,xn),t(xe,Lr),t(Lr,En),c(e,Os,p),c(e,Q,p),$(dt,Q,null),t(Q,wn),t(Q,Dr),t(Dr,yn),t(Q,Pn),t(Q,Ee),$(pt,Ee,null),t(Ee,qn),t(Ee,Tr),t(Tr,kn),c(e,Qs,p),c(e,te,p),t(te,we),t(we,Nr),$(ct,Nr,null),t(te,In),t(te,zr),t(zr,Sn),c(e,Us,p),c(e,ye,p),t(ye,ft),t(ft,An),t(ye,Ln),t(ye,mt),t(mt,Dn),c(e,Fs,p),c(e,Yt,p),t(Yt,Tn),c(e,Gs,p),c(e,Zt,p),t(Zt,Nn),c(e,Rs,p),c(e,k,p),t(k,Mr),t(Mr,Vr),t(Vr,zn),t(k,Mn),t(k,Cr),t(Cr,jr),t(jr,Vn),t(k,Cn),t(k,Or),t(Or,Qr),t(Qr,jn),t(k,On),t(k,Ur),t(Ur,Fr),t(Fr,Qn),t(k,Un),t(k,Gr),t(Gr,Rr),t(Rr,Fn),t(k,Gn),t(k,Hr),t(Hr,Wr),t(Wr,Rn),t(k,Hn),t(k,Xr),t(Xr,Br),t(Br,Wn),t(k,Xn),t(k,Jr),t(Jr,Kr),t(Kr,Bn),t(k,Jn),t(k,Yr),t(Yr,Zr),t(Zr,Kn),c(e,Hs,p),c(e,Pe,p),t(Pe,Yn),t(Pe,er),t(er,Zn),t(Pe,el),c(e,Ws,p),c(e,re,p),$(ut,re,null),t(re,tl),t(re,tr),t(tr,rl),t(tr,es),t(es,sl),c(e,Xs,p),c(e,se,p),t(se,qe),t(qe,ts),$(ht,ts,null),t(se,al),t(se,rs),t(rs,ol),c(e,Bs,p),c(e,ae,p),t(ae,_t),t(_t,nl),t(ae,ll),t(ae,gt),t(gt,ss),t(ss,il),t(ae,dl),c(e,Js,p),c(e,vt,p),t(vt,pl),t(vt,$t),t($t,cl),c(e,Ks,p),c(e,rr,p),t(rr,fl),c(e,Ys,p),c(e,sr,p),t(sr,as),t(as,os),t(os,ml),c(e,Zs,p),c(e,ar,p),t(ar,ul),c(e,ea,p),c(e,ke,p),t(ke,hl),t(ke,bt),t(bt,_l),t(ke,gl),c(e,ta,p),c(e,oe,p),t(oe,Ie),t(Ie,ns),$(xt,ns,null),t(oe,vl),t(oe,ls),t(ls,$l),c(e,ra,p),c(e,U,p),t(U,Et),t(Et,bl),t(U,xl),t(U,wt),t(wt,El),t(U,wl),t(U,yt),t(yt,yl),t(U,Pl),c(e,sa,p),c(e,or,p),t(or,ql),c(e,aa,p),c(e,ne,p),t(ne,Se),t(Se,is),$(Pt,is,null),t(ne,kl),t(ne,ds),t(ds,Il),c(e,oa,p),c(e,nr,p),t(nr,Sl),c(e,na,p),c(e,Ae,p),t(Ae,ps),t(ps,cs),t(cs,Al),t(Ae,Ll),t(Ae,fs),t(fs,ms),t(ms,Dl),c(e,la,p),c(e,qt,p),t(qt,Tl),t(qt,us),t(us,Nl),c(e,ia,p),c(e,N,p),$(kt,N,null),t(N,zl),t(N,hs),t(hs,Ml),t(N,Vl),t(N,Le),$(It,Le,null),t(Le,Cl),t(Le,_s),t(_s,jl),t(N,Ol),t(N,X),$(St,X,null),t(X,Ql),t(X,At),t(At,Ul),t(At,gs),t(gs,Fl),t(At,Gl),t(X,Rl),$(De,X,null),t(N,Hl),t(N,Te),$(Lt,Te,null),t(Te,Wl),t(Te,vs),t(vs,Xl),c(e,da,p),c(e,Ne,p),t(Ne,Bl),t(Ne,$s),t($s,Jl),t(Ne,Kl),c(e,pa,p),c(e,F,p),$(Dt,F,null),t(F,Yl),t(F,bs),t(bs,Zl),t(F,ei),$(ze,F,null),c(e,ca,p),c(e,Me,p),t(Me,ti),t(Me,xs),t(xs,ri),t(Me,si),c(e,fa,p),c(e,le,p),t(le,Ve),t(Ve,Es),$(Tt,Es,null),t(le,ai),t(le,ws),t(ws,oi),c(e,ma,p),c(e,lr,p),t(lr,ni),c(e,ua,p),$(Nt,e,p),c(e,ha,p),c(e,Ce,p),t(Ce,li),t(Ce,ys),t(ys,ii),t(Ce,di),c(e,_a,p),$(zt,e,p),c(e,ga,p),c(e,je,p),t(je,pi),t(je,Mt),t(Mt,ci),t(je,fi),va=!0},p(e,[p]){const Vt={};p&2&&(Vt.$$scope={dirty:p,ctx:e}),pe.$set(Vt);const Ps={};p&2&&(Ps.$$scope={dirty:p,ctx:e}),ce.$set(Ps);const qs={};p&2&&(qs.$$scope={dirty:p,ctx:e}),fe.$set(qs);const ks={};p&2&&(ks.$$scope={dirty:p,ctx:e}),me.$set(ks);const Is={};p&2&&(Is.$$scope={dirty:p,ctx:e}),De.$set(Is);const Ct={};p&2&&(Ct.$$scope={dirty:p,ctx:e}),ze.$set(Ct)},i(e){va||(b(m.$$.fragment,e),b(Qe.$$.fragment,e),b(Ue.$$.fragment,e),b(Fe.$$.fragment,e),b(pe.$$.fragment,e),b(Ge.$$.fragment,e),b(ce.$$.fragment,e),b(He.$$.fragment,e),b(fe.$$.fragment,e),b(Xe.$$.fragment,e),b(me.$$.fragment,e),b(Je.$$.fragment,e),b(Ke.$$.fragment,e),b(Ye.$$.fragment,e),b(et.$$.fragment,e),b(tt.$$.fragment,e),b(rt.$$.fragment,e),b(at.$$.fragment,e),b(nt.$$.fragment,e),b(lt.$$.fragment,e),b(it.$$.fragment,e),b(dt.$$.fragment,e),b(pt.$$.fragment,e),b(ct.$$.fragment,e),b(ut.$$.fragment,e),b(ht.$$.fragment,e),b(xt.$$.fragment,e),b(Pt.$$.fragment,e),b(kt.$$.fragment,e),b(It.$$.fragment,e),b(St.$$.fragment,e),b(De.$$.fragment,e),b(Lt.$$.fragment,e),b(Dt.$$.fragment,e),b(ze.$$.fragment,e),b(Tt.$$.fragment,e),b(Nt.$$.fragment,e),b(zt.$$.fragment,e),va=!0)},o(e){x(m.$$.fragment,e),x(Qe.$$.fragment,e),x(Ue.$$.fragment,e),x(Fe.$$.fragment,e),x(pe.$$.fragment,e),x(Ge.$$.fragment,e),x(ce.$$.fragment,e),x(He.$$.fragment,e),x(fe.$$.fragment,e),x(Xe.$$.fragment,e),x(me.$$.fragment,e),x(Je.$$.fragment,e),x(Ke.$$.fragment,e),x(Ye.$$.fragment,e),x(et.$$.fragment,e),x(tt.$$.fragment,e),x(rt.$$.fragment,e),x(at.$$.fragment,e),x(nt.$$.fragment,e),x(lt.$$.fragment,e),x(it.$$.fragment,e),x(dt.$$.fragment,e),x(pt.$$.fragment,e),x(ct.$$.fragment,e),x(ut.$$.fragment,e),x(ht.$$.fragment,e),x(xt.$$.fragment,e),x(Pt.$$.fragment,e),x(kt.$$.fragment,e),x(It.$$.fragment,e),x(St.$$.fragment,e),x(De.$$.fragment,e),x(Lt.$$.fragment,e),x(Dt.$$.fragment,e),x(ze.$$.fragment,e),x(Tt.$$.fragment,e),x(Nt.$$.fragment,e),x(zt.$$.fragment,e),va=!1},d(e){r(u),e&&r(y),e&&r(_),E(m),e&&r(q),e&&r(C),e&&r(Ls),e&&r(ie),e&&r(Ds),e&&r(Z),E(Qe),e&&r(Ts),e&&r(Ut),e&&r(Ns),e&&r(Ft),e&&r(zs),e&&r(L),E(Ue),E(Fe),E(pe),E(Ge),E(ce),E(He),E(fe),E(Xe),E(me),e&&r(Ms),e&&r(ee),E(Je),e&&r(Vs),e&&r(M),e&&r(Cs),e&&r(S),E(Ke),E(Ye),E(et),E(tt),E(rt),E(at),E(nt),e&&r(js),e&&r(O),E(lt),E(it),e&&r(Os),e&&r(Q),E(dt),E(pt),e&&r(Qs),e&&r(te),E(ct),e&&r(Us),e&&r(ye),e&&r(Fs),e&&r(Yt),e&&r(Gs),e&&r(Zt),e&&r(Rs),e&&r(k),e&&r(Hs),e&&r(Pe),e&&r(Ws),e&&r(re),E(ut),e&&r(Xs),e&&r(se),E(ht),e&&r(Bs),e&&r(ae),e&&r(Js),e&&r(vt),e&&r(Ks),e&&r(rr),e&&r(Ys),e&&r(sr),e&&r(Zs),e&&r(ar),e&&r(ea),e&&r(ke),e&&r(ta),e&&r(oe),E(xt),e&&r(ra),e&&r(U),e&&r(sa),e&&r(or),e&&r(aa),e&&r(ne),E(Pt),e&&r(oa),e&&r(nr),e&&r(na),e&&r(Ae),e&&r(la),e&&r(qt),e&&r(ia),e&&r(N),E(kt),E(It),E(St),E(De),E(Lt),e&&r(da),e&&r(Ne),e&&r(pa),e&&r(F),E(Dt),E(ze),e&&r(ca),e&&r(Me),e&&r(fa),e&&r(le),E(Tt),e&&r(ma),e&&r(lr),e&&r(ua),E(Nt,e),e&&r(ha),e&&r(Ce),e&&r(_a),E(zt,e),e&&r(ga),e&&r(je)}}}const wp={local:"processors",sections:[{local:"transformers.ProcessorMixin",title:"Multi-modal processors"},{local:"transformers.DataProcessor",title:"Deprecated processors"},{local:"transformers.glue_convert_examples_to_features",title:"GLUE"},{local:"xnli",title:"XNLI"},{local:"squad",sections:[{local:"transformers.data.processors.squad.SquadProcessor",title:"Processors"},{local:"example-usage",title:"Example usage"}],title:"SQuAD"}],title:"Processors"};function yp(z){return hp(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Lp extends cp{constructor(u){super();fp(this,u,yp,Ep,mp,{})}}export{Lp as default,wp as metadata};
22
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/main_classes/deepspeed.mdx-hf-doc-builder.js
import{S as kY,i as PY,s as zY,e as o,k as u,w as f,t as a,M as DY,c as l,d as t,m as c,a as r,x as d,h as n,b as h,G as s,g as i,y as m,q as _,o as v,B as j,v as OY}from"../../chunks/vendor-hf-doc-builder.js";import{T as yU}from"../../chunks/Tip-hf-doc-builder.js";import{D as AY}from"../../chunks/Docstring-hf-doc-builder.js";import{C as w}from"../../chunks/CodeBlock-hf-doc-builder.js";import{I as y}from"../../chunks/IconCopyLink-hf-doc-builder.js";function TY(jt){let g,S;return{c(){g=o("p"),S=a("This section is a must-read")},l(b){g=l(b,"P",{});var k=r(g);S=n(k,"This section is a must-read"),k.forEach(t)},m(b,k){i(b,g,k),s(g,S)},d(b){b&&t(g)}}}function SY(jt){let g,S,b,k,X,z,C,Q,x,te,T;return{c(){g=o("p"),S=a("As of "),b=o("code"),k=a("deepspeed==0.6.0"),X=a(" the bf16 support is new and experimental."),z=u(),C=o("p"),Q=a("If you use "),x=o("a"),te=a("gradient accumulation"),T=a(" with bf16-enabled, you need to be aware that it\u2019ll accumulate gradients in bf16, which may not be what you want due to this format\u2019s low precision, as it may lead to a lossy accumulation."),this.h()},l(q){g=l(q,"P",{});var E=r(g);S=n(E,"As of "),b=l(E,"CODE",{});var gs=r(b);k=n(gs,"deepspeed==0.6.0"),gs.forEach(t),X=n(E," the bf16 support is new and experimental."),E.forEach(t),z=c(q),C=l(q,"P",{});var W=r(C);Q=n(W,"If you use "),x=l(W,"A",{href:!0});var bs=r(x);te=n(bs,"gradient accumulation"),bs.forEach(t),T=n(W," with bf16-enabled, you need to be aware that it\u2019ll accumulate gradients in bf16, which may not be what you want due to this format\u2019s low precision, as it may lead to a lossy accumulation."),W.forEach(t),this.h()},h(){h(x,"href","#gradient-accumulation")},m(q,E){i(q,g,E),s(g,S),s(g,b),s(b,k),s(g,X),i(q,z,E),i(q,C,E),s(C,Q),s(C,x),s(x,te),s(C,T)},d(q){q&&t(g),q&&t(z),q&&t(C)}}}function CY(jt){let g,S,b,k,X,z,C,Q,x,te,T;return{c(){g=o("p"),S=a("Note, that once "),b=o("code"),k=a("load_state_dict_from_zero_checkpoint"),X=a(" was run, the "),z=o("code"),C=a("model"),Q=a(` will no longer be useable in the DeepSpeed context of the same application. i.e. you will need to re-initialize the deepspeed engine, since `),x=o("code"),te=a("model.load_state_dict(state_dict)"),T=a(` will remove all the DeepSpeed magic from it. So do this only at the very end of the training.`)},l(q){g=l(q,"P",{});var E=r(g);S=n(E,"Note, that once "),b=l(E,"CODE",{});var gs=r(b);k=n(gs,"load_state_dict_from_zero_checkpoint"),gs.forEach(t),X=n(E," was run, the "),z=l(E,"CODE",{});var W=r(z);C=n(W,"model"),W.forEach(t),Q=n(E,` will no longer be useable in the DeepSpeed context of the same application. i.e. you will need to re-initialize the deepspeed engine, since `),x=l(E,"CODE",{});var bs=r(x);te=n(bs,"model.load_state_dict(state_dict)"),bs.forEach(t),T=n(E,` will remove all the DeepSpeed magic from it. So do this only at the very end of the training.`),E.forEach(t)},m(q,E){i(q,g,E),s(g,S),s(g,b),s(b,k),s(g,X),s(g,z),s(z,C),s(g,Q),s(g,x),s(x,te),s(g,T)},d(q){q&&t(g)}}}function xY(jt){let g,S,b,k,X,z,C,Q,x,te,T,q,E,gs,W,bs,i4,wj,R,Gh,u4,c4,Mh,h4,f4,Lh,d4,m4,Zh,_4,v4,Nh,j4,w4,Hh,y4,yj,Te,g4,Kn,b4,q4,Jn,E4,$4,gj,kp,k4,bj,Pp,P4,qj,wt,z4,Xn,D4,O4,Ej,yt,Qn,A4,zp,T4,S4,C4,F,x4,Dp,R4,I4,Bh,U4,G4,Wh,M4,L4,Fh,Z4,N4,Op,H4,B4,$j,Ap,W4,kj,Tp,F4,Pj,Sp,Vh,V4,zj,Cp,Y4,Dj,xp,eo,K4,Rp,J4,X4,Oj,Ip,Q4,Aj,Up,Tj,qs,gt,Yh,so,e6,Kh,s6,Sj,Gp,Cj,Es,bt,Jh,to,t6,Xh,a6,xj,Mp,n6,Rj,ao,Ij,Se,o6,Qh,l6,r6,ef,p6,i6,Uj,no,Gj,Ce,u6,oo,c6,h6,lo,f6,d6,Mj,qt,m6,Lp,_6,v6,Lj,Zp,j6,Zj,Np,w6,Nj,ro,Hj,xe,y6,sf,g6,b6,tf,q6,E6,Bj,Et,$6,af,k6,P6,Wj,po,Fj,ke,z6,nf,D6,O6,of,A6,T6,lf,S6,Vj,Hp,C6,Yj,io,Kj,Re,x6,rf,R6,I6,pf,U6,G6,Jj,$t,M6,uf,L6,Z6,Xj,Ie,N6,cf,H6,B6,uo,W6,F6,Qj,Bp,V6,ew,co,sw,Wp,Y6,tw,ho,aw,Fp,K6,nw,fo,ow,kt,J6,hf,X6,Q6,lw,Pt,e$,ff,s$,t$,rw,zt,a$,mo,n$,o$,pw,Vp,iw,$s,Dt,df,_o,l$,mf,r$,uw,Ot,p$,Yp,i$,u$,cw,At,ks,c$,_f,h$,f$,vf,d$,m$,_$,Pe,v$,jf,j$,w$,wf,y$,g$,vo,b$,q$,hw,Kp,E$,fw,jo,dw,Jp,$$,mw,wo,_w,I,k$,yf,P$,z$,gf,D$,O$,bf,A$,T$,qf,S$,C$,yo,x$,R$,vw,ae,I$,Ef,U$,G$,$f,M$,L$,kf,Z$,N$,jw,Tt,H$,Pf,B$,W$,ww,go,yw,St,F$,zf,V$,Y$,gw,Ct,K$,bo,J$,X$,bw,Xp,qw,Ps,xt,Df,qo,Q$,Of,e5,Ew,Rt,s5,Qp,t5,a5,$w,Eo,kw,Ue,n5,Af,o5,l5,$o,r5,p5,Pw,ei,i5,zw,It,Tf,u5,c5,Sf,h5,Dw,si,f5,Ow,ko,Aw,ti,d5,Tw,Ut,m5,Po,_5,v5,Sw,ai,j5,Cw,ni,w5,xw,oi,zs,zo,y5,Cf,g5,b5,q5,Do,E5,xf,$5,Rw,li,Iw,Ds,Gt,Rf,Oo,k5,If,P5,Uw,Mt,z5,Uf,D5,O5,Gw,ri,A5,Mw,Ao,Lw,Lt,T5,Gf,S5,C5,Zw,pi,x5,Nw,ii,R5,Hw,To,Bw,Ge,I5,Mf,U5,G5,Lf,M5,L5,Ww,So,Fw,Zt,Z5,Zf,N5,H5,Vw,Co,Yw,ui,B5,Kw,Nt,W5,Nf,F5,V5,Jw,ci,Xw,Os,Ht,Hf,xo,Y5,Bf,K5,Qw,Bt,J5,Ro,X5,Q5,ey,Wt,e9,Io,s9,t9,sy,Uo,ty,Ft,a9,Wf,n9,o9,ay,Go,ny,Vt,l9,Mo,r9,p9,oy,hi,i9,ly,ne,u9,Ff,c9,h9,Vf,f9,d9,Yf,m9,_9,ry,Lo,py,Yt,v9,fi,j9,w9,iy,di,uy,As,Kt,Kf,Zo,y9,Jf,g9,cy,U,b9,mi,q9,E9,_i,$9,k9,Xf,P9,z9,Qf,D9,O9,vi,A9,T9,hy,ji,S9,fy,No,dy,wi,C9,my,Ho,_y,yi,vy,Ts,Jt,ed,Bo,x9,sd,R9,jy,Xt,wy,Me,I9,gi,U9,G9,bi,M9,L9,yy,Qt,Z9,qi,N9,H9,gy,Le,B9,td,W9,F9,Ei,V9,Y9,by,$i,K9,qy,ea,J9,ki,X9,Q9,Ey,sa,ad,e8,s8,Wo,t8,Pi,a8,n8,$y,oe,o8,nd,l8,r8,zi,p8,i8,od,u8,c8,ky,Di,Py,Ss,ta,ld,Fo,h8,rd,f8,zy,Vo,Yo,d8,m8,Dy,Ze,_8,pd,v8,j8,Ko,w8,y8,Oy,aa,g8,Oi,b8,q8,Ay,Ai,E8,Ty,Ti,Sy,Cs,na,id,Jo,$8,ud,k8,Cy,Si,P8,xy,Xo,Ry,Ci,cd,z8,Iy,Ne,xs,D8,hd,O8,A8,fd,T8,S8,C8,G,dd,x8,R8,md,I8,U8,_d,G8,M8,vd,L8,Z8,jd,N8,H8,wd,B8,W8,F8,yd,V8,Uy,He,Y8,gd,K8,J8,bd,X8,Q8,Gy,Qo,My,xi,ek,Ly,Ri,Zy,Rs,oa,qd,el,sk,Ed,tk,Ny,Ii,ak,Hy,sl,By,V,nk,$d,ok,lk,kd,rk,pk,Pd,ik,uk,zd,ck,hk,Wy,Be,fk,Dd,dk,mk,Od,_k,vk,Fy,Ui,Ad,jk,Vy,la,tl,Td,wk,yk,Sd,gk,bk,al,Cd,qk,Ek,xd,$k,Yy,M,kk,Rd,Pk,zk,Id,Dk,Ok,Ud,Ak,Tk,Gd,Sk,Ck,Md,xk,Rk,Ky,ze,Ld,Ik,Uk,Zd,Gk,Mk,Nd,Lk,Zk,Jy,Gi,Nk,Xy,We,nl,Hd,Hk,Bk,Bd,Wk,Fk,ol,Wd,Vk,Yk,Fd,Kk,Jk,ll,Vd,Xk,Qk,Yd,e7,Qy,Fe,s7,Kd,t7,a7,Mi,n7,o7,eg,rl,Jd,l7,r7,sg,le,p7,Xd,i7,u7,Qd,c7,h7,em,f7,d7,tg,Li,pl,sm,m7,_7,tm,v7,ag,De,am,j7,w7,nm,y7,g7,om,b7,q7,ng,Ve,E7,lm,$7,k7,rm,P7,z7,og,ra,il,D7,pm,O7,A7,T7,ul,S7,im,C7,x7,lg,Zi,rg,Is,pa,um,cl,R7,cm,I7,pg,Ni,U7,ig,Hi,G7,ug,hl,cg,ia,M7,hm,L7,Z7,hg,Ye,N7,fl,H7,B7,dl,W7,F7,fg,ua,V7,fm,Y7,K7,dg,Ke,J7,dm,X7,Q7,ml,eP,sP,mg,Bi,_g,Us,ca,mm,_l,tP,_m,aP,vg,Wi,nP,jg,Fi,oP,wg,ha,Gs,lP,vm,rP,pP,jm,iP,uP,cP,vl,hP,wm,fP,dP,yg,Je,mP,ym,_P,vP,gm,jP,wP,gg,Vi,bg,Ms,fa,bm,jl,yP,qm,gP,qg,da,bP,Em,qP,EP,Eg,wl,$g,ma,$P,$m,kP,PP,kg,yl,Pg,Yi,zg,Ls,_a,km,gl,zP,Pm,DP,Dg,va,OP,zm,AP,TP,Og,bl,Ag,ja,SP,Dm,CP,xP,Tg,ql,Sg,Zs,wa,Om,El,RP,Am,IP,Cg,ya,UP,Tm,GP,MP,xg,Ki,LP,Rg,ga,ZP,Sm,NP,HP,Ig,Ji,Ug,Ns,ba,Cm,$l,BP,xm,WP,Gg,Xe,FP,Rm,VP,YP,kl,KP,JP,Mg,$,XP,Im,QP,ez,Xi,sz,tz,Um,az,nz,Gm,oz,lz,Mm,rz,pz,Lm,iz,uz,Zm,cz,hz,Nm,fz,dz,Lg,Qe,mz,Hm,_z,vz,Bm,jz,wz,Zg,Pl,Ng,Qi,yz,Hg,re,zl,Wm,gz,bz,Fm,qz,Ez,Dl,Vm,$z,kz,Ym,Pz,zz,Ol,Km,Dz,Oz,Jm,Az,Tz,Al,Xm,Sz,Cz,Qm,xz,Bg,eu,Rz,Wg,su,Iz,Fg,Tl,Vg,qa,Uz,tu,Gz,Mz,Yg,au,Lz,Kg,Sl,Jg,pe,Zz,e_,Nz,Hz,s_,Bz,Wz,t_,Fz,Vz,Xg,nu,Qg,Hs,Ea,a_,Cl,Yz,n_,Kz,e2,L,Jz,o_,Xz,Qz,l_,eD,sD,r_,tD,aD,p_,nD,oD,xl,lD,rD,s2,ou,pD,t2,$a,Rl,i_,iD,uD,u_,cD,hD,es,c_,fD,dD,h_,mD,_D,f_,vD,jD,a2,D,wD,d_,yD,gD,lu,bD,qD,m_,ED,$D,__,kD,PD,v_,zD,DD,j_,OD,AD,n2,ss,TD,w_,SD,CD,y_,xD,RD,o2,Il,l2,ts,ID,g_,UD,GD,ru,MD,LD,r2,ie,ka,b_,ZD,ND,q_,HD,BD,WD,Pa,E_,FD,VD,$_,YD,KD,JD,as,k_,XD,QD,P_,eO,sO,z_,tO,aO,nO,ns,D_,oO,lO,O_,rO,pO,A_,iO,uO,p2,pu,cO,i2,Ul,u2,za,hO,iu,fO,dO,c2,Da,mO,T_,_O,vO,h2,Gl,f2,Y,jO,S_,wO,yO,C_,gO,bO,x_,qO,EO,R_,$O,kO,d2,uu,m2,Bs,Oa,I_,Ml,PO,U_,zO,_2,cu,DO,v2,Aa,OO,G_,AO,TO,j2,Ll,w2,Ta,SO,Zl,CO,xO,y2,ue,RO,M_,IO,UO,L_,GO,MO,Z_,LO,ZO,g2,hu,b2,Ws,Sa,N_,Nl,NO,H_,HO,q2,fu,BO,E2,Fs,Ca,B_,Hl,WO,W_,FO,$2,du,VO,k2,Bl,P2,os,YO,mu,KO,JO,F_,XO,QO,z2,ls,eA,V_,sA,tA,Y_,aA,nA,D2,_u,oA,O2,Wl,A2,xa,lA,vu,rA,pA,T2,Ra,iA,Fl,uA,cA,S2,Vs,Ia,K_,Vl,hA,J_,fA,C2,ju,dA,x2,Yl,R2,wu,mA,I2,rs,_A,X_,vA,jA,Q_,wA,yA,U2,yu,gA,G2,Kl,M2,Ua,L2,Ys,Ga,e1,Jl,bA,s1,qA,Z2,gu,EA,N2,Xl,H2,ce,$A,bu,kA,PA,t1,zA,DA,a1,OA,AA,B2,Ma,TA,n1,SA,CA,W2,qu,xA,F2,Ql,V2,La,RA,Eu,IA,UA,Y2,Za,GA,er,MA,LA,K2,$u,J2,Ks,Na,o1,sr,ZA,l1,NA,X2,ku,HA,Q2,tr,eb,Z,BA,Pu,WA,FA,r1,VA,YA,p1,KA,JA,i1,XA,QA,u1,eT,sT,sb,zu,tT,tb,ar,ab,Ha,aT,Du,nT,oT,nb,Ou,ob,Js,Ba,c1,nr,lT,h1,rT,lb,Au,pT,rb,or,pb,ps,iT,Tu,uT,cT,f1,hT,fT,ib,Su,dT,ub,lr,cb,Wa,mT,Cu,_T,vT,hb,xu,fb,Xs,Fa,d1,rr,jT,m1,wT,db,Ru,yT,mb,pr,_b,is,gT,Iu,bT,qT,_1,ET,$T,vb,Uu,kT,jb,ir,wb,Va,PT,Gu,zT,DT,yb,Mu,gb,Qs,Ya,v1,ur,OT,j1,AT,bb,Ka,TT,w1,ST,CT,qb,Lu,y1,xT,Eb,Ja,RT,g1,IT,UT,$b,O,GT,b1,MT,LT,q1,ZT,NT,E1,HT,BT,$1,WT,FT,k1,VT,YT,P1,KT,JT,kb,cr,Pb,Zu,z1,XT,zb,Xa,QT,hr,eS,sS,Db,Nu,D1,tS,Ob,Hu,aS,Ab,Bu,nS,Tb,fr,Sb,us,oS,O1,lS,rS,A1,pS,iS,Cb,dr,xb,Qa,Rb,en,uS,T1,cS,hS,Ib,sn,fS,S1,dS,mS,Ub,mr,Gb,Wu,C1,_S,Mb,cs,vS,x1,jS,wS,R1,yS,gS,Lb,Fu,bS,Zb,_r,Nb,tn,qS,I1,ES,$S,Hb,vr,Bb,an,kS,U1,PS,zS,Wb,Vu,DS,Fb,jr,G1,OS,AS,Vb,hs,TS,M1,SS,CS,L1,xS,RS,Yb,Yu,IS,Kb,et,nn,Z1,wr,US,N1,GS,Jb,Ku,MS,Xb,Ju,LS,Qb,Xu,ZS,e3,st,on,H1,yr,NS,B1,HS,s3,ln,BS,W1,WS,FS,t3,gr,a3,Qu,VS,n3,P,YS,F1,KS,JS,V1,XS,QS,Y1,eC,sC,ec,tC,aC,sc,nC,oC,K1,lC,rC,J1,pC,iC,o3,br,l3,rn,uC,X1,cC,hC,r3,tc,fC,p3,pn,dC,qr,mC,_C,i3,he,vC,Q1,jC,wC,ev,yC,gC,ac,bC,qC,u3,tt,un,sv,Er,EC,tv,$C,c3,$r,kC,kr,PC,h3,cn,zC,av,DC,OC,f3,nc,AC,d3,Pr,m3,fs,TC,nv,SC,CC,ov,xC,RC,_3,oc,v3,at,hn,lv,zr,IC,rv,UC,j3,lc,GC,w3,fn,MC,rc,LC,ZC,y3,Dr,g3,pc,NC,b3,dn,HC,pv,BC,WC,q3,Or,E3,ic,FC,$3,uc,VC,k3,nt,mn,iv,Ar,YC,uv,KC,P3,cc,JC,z3,hc,XC,D3,Tr,O3,fc,QC,A3,dc,ex,T3,mc,sx,S3,_c,tx,C3,Sr,x3,vc,ax,R3,_n,nx,Cr,ox,lx,I3,ot,vn,cv,xr,rx,hv,px,U3,jc,ix,G3,wc,ux,M3,N,fv,dv,cx,hx,mv,Oe,fx,yc,dx,mx,gc,_x,vx,bc,jx,wx,yx,Rr,_v,gx,bx,Ir,qx,vv,Ur,Ex,Gr,$x,kx,Px,jv,wv,zx,Dx,yv,Mr,Ox,Lr,Ax,Tx,L3,qc,Sx,Z3,jn,lt,gv,Cx,xx,bv,Rx,Ix,qv,Ux,Gx,Ev,Zr,Mx,Nr,Lx,Zx,N3,rt,wn,$v,Hr,Nx,kv,Hx,H3,pt,yn,Pv,Br,Bx,Wr,Wx,zv,Fx,Vx,B3,H,Yx,Dv,Kx,Jx,Ov,Xx,Qx,Av,eR,sR,Tv,tR,aR,Fr,nR,oR,W3,it,gn,Sv,Vr,lR,Ec,rR,Cv,pR,F3,$c,iR,V3,kc,uR,Y3,Yr,K3,bn,cR,xv,hR,fR,J3,Kr,X3,Pc,dR,Q3,zc,mR,e0,ds,_R,Rv,vR,jR,Iv,wR,yR,s0,ut,qn,Uv,Jr,gR,Gv,bR,t0,ms,ct,qR,Dc,ER,$R,Mv,kR,PR,zR,Xr,DR,Qr,OR,AR,TR,ht,SR,Oc,CR,xR,ep,RR,IR,a0,ft,En,Lv,sp,UR,Zv,GR,n0,fe,MR,Ac,LR,ZR,Tc,NR,HR,Nv,BR,WR,o0,$n,FR,Sc,VR,YR,l0,_s,KR,Cc,JR,XR,xc,QR,eI,r0,kn,sI,Hv,tI,aI,p0,Rc,nI,i0,tp,u0,Ic,oI,c0,ap,h0,de,lI,Uc,rI,pI,np,iI,uI,Bv,cI,hI,f0,dt,Pn,Wv,op,fI,Fv,dI,d0,ee,lp,mI,Vv,_I,vI,Ae,jI,Yv,wI,yI,Kv,gI,bI,Jv,qI,EI,$I,me,Gc,kI,PI,Xv,zI,DI,Mc,OI,AI,Qv,TI,SI,m0,mt,zn,ej,rp,CI,sj,xI,_0,Dn,RI,Lc,II,UI,v0,Zc,GI,j0,Nc,MI,w0,Hc,LI,y0,On,tj,ZI,NI,aj,HI,g0,pp,b0,An,BI,nj,WI,FI,q0,ip,E0,Bc,VI,$0,_t,Tn,oj,up,YI,lj,KI,k0,_e,rj,cp,JI,XI,pj,hp,QI,eU,ij,fp,sU,tU,uj,dp,aU,P0,Wc,nU,z0,vs,cj,mp,oU,lU,hj,_p,rU,pU,fj,vp,iU,D0,js,uU,Fc,cU,hU,jp,fU,dU,O0;return z=new y({}),so=new y({}),to=new y({}),ao=new w({props:{code:"pip install deepspeed",highlighted:"pip install deepspeed"}}),no=new w({props:{code:"pip install transformers[deepspeed]",highlighted:"pip install transformers[deepspeed]"}}),ro=new w({props:{code:`git clone https://github.com/microsoft/DeepSpeed/ cd DeepSpeed rm -rf build TORCH_CUDA_ARCH_LIST="8.6" DS_BUILD_CPU_ADAM=1 DS_BUILD_UTILS=1 pip install . \\ --global-option="build_ext" --global-option="-j8" --no-cache -v \\ --disable-pip-version-check 2>&1 | tee build.log`,highlighted:`git <span class="hljs-built_in">clone</span> https://github.com/microsoft/DeepSpeed/ <span class="hljs-built_in">cd</span> DeepSpeed <span class="hljs-built_in">rm</span> -rf build TORCH_CUDA_ARCH_LIST=<span class="hljs-string">&quot;8.6&quot;</span> DS_BUILD_CPU_ADAM=1 DS_BUILD_UTILS=1 pip install . \\ --global-option=<span class="hljs-string">&quot;build_ext&quot;</span> --global-option=<span class="hljs-string">&quot;-j8&quot;</span> --no-cache -v \\ --disable-pip-version-check 2&gt;&amp;1 | <span class="hljs-built_in">tee</span> build.log`}}),po=new w({props:{code:'CUDA_VISIBLE_DEVICES=0 python -c "import torch; print(torch.cuda.get_device_capability())"',highlighted:'CUDA_VISIBLE_DEVICES=0 python -c <span class="hljs-string">&quot;import torch; print(torch.cuda.get_device_capability())&quot;</span>'}}),io=new w({props:{code:`git clone https://github.com/microsoft/DeepSpeed/ cd DeepSpeed rm -rf build TORCH_CUDA_ARCH_LIST="8.6" DS_BUILD_CPU_ADAM=1 DS_BUILD_UTILS=1 \\ python setup.py build_ext -j8 bdist_wheel`,highlighted:`git <span class="hljs-built_in">clone</span> https://github.com/microsoft/DeepSpeed/ <span class="hljs-built_in">cd</span> DeepSpeed <span class="hljs-built_in">rm</span> -rf build TORCH_CUDA_ARCH_LIST=<span class="hljs-string">&quot;8.6&quot;</span> DS_BUILD_CPU_ADAM=1 DS_BUILD_UTILS=1 \\ python setup.py build_ext -j8 bdist_wheel`}}),co=new w({props:{code:'python -c "import torch; print(torch.cuda.get_arch_list())"',highlighted:'python -c <span class="hljs-string">&quot;import torch; print(torch.cuda.get_arch_list())&quot;</span>'}}),ho=new w({props:{code:`CUDA_VISIBLE_DEVICES=0 python -c "import torch; \\ print(torch.cuda.get_device_properties(torch.device('cuda')))"`,highlighted:`CUDA_VISIBLE_DEVICES=0 python -c <span class="hljs-string">&quot;import torch; \\ print(torch.cuda.get_device_properties(torch.device(&#x27;cuda&#x27;)))&quot;</span>`}}),fo=new w({props:{code:"_CudaDeviceProperties(name='GeForce RTX 3090', major=8, minor=6, total_memory=24268MB, multi_processor_count=82)",highlighted:'_CudaDeviceProperties(name=<span class="hljs-string">&#x27;GeForce RTX 3090&#x27;</span>, major=8, minor=6, total_memory=24268MB, multi_processor_count=82)'}}),_o=new y({}),jo=new w({props:{code:"python -m torch.distributed.launch --nproc_per_node=2 your_program.py <normal cl args>",highlighted:"python -m torch.distributed.launch --nproc_per_node=2 your_program.py &lt;normal cl args&gt;"}}),wo=new w({props:{code:"deepspeed --num_gpus=2 your_program.py <normal cl args> --deepspeed ds_config.json",highlighted:"deepspeed --num_gpus=2 your_program.py &lt;normal cl args&gt; --deepspeed ds_config.json"}}),go=new w({props:{code:`deepspeed examples/pytorch/translation/run_translation.py \\ --deepspeed tests/deepspeed/ds_config_zero3.json \\ --model_name_or_path t5-small --per_device_train_batch_size 1 \\ --output_dir output_dir --overwrite_output_dir --fp16 \\ --do_train --max_train_samples 500 --num_train_epochs 1 \\ --dataset_name wmt16 --dataset_config "ro-en" \\ --source_lang en --target_lang ro`,highlighted:`deepspeed examples/pytorch/translation/run_translation.py \\ --deepspeed tests/deepspeed/ds_config_zero3.json \\ --model_name_or_path t5-small --per_device_train_batch_size 1 \\ --output_dir output_dir --overwrite_output_dir --fp16 \\ --do_train --max_train_samples 500 --num_train_epochs 1 \\ --dataset_name wmt16 --dataset_config <span class="hljs-string">&quot;ro-en&quot;</span> \\ --source_lang en --target_lang ro`}}),qo=new y({}),Eo=new w({props:{code:`deepspeed --num_gpus=1 examples/pytorch/translation/run_translation.py \\ --deepspeed tests/deepspeed/ds_config_zero2.json \\ --model_name_or_path t5-small --per_device_train_batch_size 1 \\ --output_dir output_dir --overwrite_output_dir --fp16 \\ --do_train --max_train_samples 500 --num_train_epochs 1 \\ --dataset_name wmt16 --dataset_config "ro-en" \\ --source_lang en --target_lang ro`,highlighted:`deepspeed --num_gpus=1 examples/pytorch/translation/run_translation.py \\ --deepspeed tests/deepspeed/ds_config_zero2.json \\ --model_name_or_path t5-small --per_device_train_batch_size 1 \\ --output_dir output_dir --overwrite_output_dir --fp16 \\ --do_train --max_train_samples 500 --num_train_epochs 1 \\ --dataset_name wmt16 --dataset_config <span class="hljs-string">&quot;ro-en&quot;</span> \\ --source_lang en --target_lang ro`}}),ko=new w({props:{code:`{ "zero_optimization": { "stage": 2, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "allgather_partitions": true, "allgather_bucket_size": 2e8, "reduce_scatter": true, "reduce_bucket_size": 2e8, "overlap_comm": true, "contiguous_gradients": true } }`,highlighted:`<span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;stage&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;allgather_partitions&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;allgather_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_scatter&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;overlap_comm&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;contiguous_gradients&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span>`}}),Do=new w({props:{code:"deepspeed --include localhost:1 examples/pytorch/translation/run_translation.py ...",highlighted:"deepspeed --include localhost:1 examples/pytorch/translation/run_translation.py ..."}}),Oo=new y({}),Ao=new w({props:{code:`# DeepSpeed requires a distributed environment even when only one process is used. # This emulates a launcher in the notebook import os os.environ["MASTER_ADDR"] = "localhost" os.environ["MASTER_PORT"] = "9994" # modify if RuntimeError: Address already in use os.environ["RANK"] = "0" os.environ["LOCAL_RANK"] = "0" os.environ["WORLD_SIZE"] = "1" # Now proceed as normal, plus pass the deepspeed config file training_args = TrainingArguments(..., deepspeed="ds_config_zero3.json") trainer = Trainer(...) trainer.train()`,highlighted:`<span class="hljs-comment"># DeepSpeed requires a distributed environment even when only one process is used.</span> <span class="hljs-comment"># This emulates a launcher in the notebook</span> <span class="hljs-keyword">import</span> os os.environ[<span class="hljs-string">&quot;MASTER_ADDR&quot;</span>] = <span class="hljs-string">&quot;localhost&quot;</span> os.environ[<span class="hljs-string">&quot;MASTER_PORT&quot;</span>] = <span class="hljs-string">&quot;9994&quot;</span> <span class="hljs-comment"># modify if RuntimeError: Address already in use</span> os.environ[<span class="hljs-string">&quot;RANK&quot;</span>] = <span class="hljs-string">&quot;0&quot;</span> os.environ[<span class="hljs-string">&quot;LOCAL_RANK&quot;</span>] = <span class="hljs-string">&quot;0&quot;</span> os.environ[<span class="hljs-string">&quot;WORLD_SIZE&quot;</span>] = <span class="hljs-string">&quot;1&quot;</span> <span class="hljs-comment"># Now proceed as normal, plus pass the deepspeed config file</span> training_args = TrainingArguments(..., deepspeed=<span class="hljs-string">&quot;ds_config_zero3.json&quot;</span>) trainer = Trainer(...) trainer.train()`}}),To=new w({props:{code:`%%bash cat <<'EOT' > ds_config_zero3.json { "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "betas": "auto", "eps": "auto", "weight_decay": "auto" } }, "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto" } }, "zero_optimization": { "stage": 3, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "offload_param": { "device": "cpu", "pin_memory": true }, "overlap_comm": true, "contiguous_gradients": true, "sub_group_size": 1e9, "reduce_bucket_size": "auto", "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "stage3_max_live_parameters": 1e9, "stage3_max_reuse_distance": 1e9, "stage3_gather_16bit_weights_on_model_save": true }, "gradient_accumulation_steps": "auto", "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false } EOT`,highlighted:`%%bash cat &lt;&lt;<span class="hljs-string">&#x27;EOT&#x27;</span> &gt; ds_config_zero3.json { <span class="hljs-string">&quot;fp16&quot;</span>: { <span class="hljs-string">&quot;enabled&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;loss_scale&quot;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&quot;loss_scale_window&quot;</span>: <span class="hljs-number">1000</span>, <span class="hljs-string">&quot;initial_scale_power&quot;</span>: <span class="hljs-number">16</span>, <span class="hljs-string">&quot;hysteresis&quot;</span>: <span class="hljs-number">2</span>, <span class="hljs-string">&quot;min_loss_scale&quot;</span>: <span class="hljs-number">1</span> }, <span class="hljs-string">&quot;optimizer&quot;</span>: { <span class="hljs-string">&quot;type&quot;</span>: <span class="hljs-string">&quot;AdamW&quot;</span>, <span class="hljs-string">&quot;params&quot;</span>: { <span class="hljs-string">&quot;lr&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;betas&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;eps&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;weight_decay&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span> } }, <span class="hljs-string">&quot;scheduler&quot;</span>: { <span class="hljs-string">&quot;type&quot;</span>: <span class="hljs-string">&quot;WarmupLR&quot;</span>, <span class="hljs-string">&quot;params&quot;</span>: { <span class="hljs-string">&quot;warmup_min_lr&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;warmup_max_lr&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;warmup_num_steps&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span> } }, <span class="hljs-string">&quot;zero_optimization&quot;</span>: { <span class="hljs-string">&quot;stage&quot;</span>: <span class="hljs-number">3</span>, <span class="hljs-string">&quot;offload_optimizer&quot;</span>: { <span class="hljs-string">&quot;device&quot;</span>: <span class="hljs-string">&quot;cpu&quot;</span>, <span class="hljs-string">&quot;pin_memory&quot;</span>: true }, <span class="hljs-string">&quot;offload_param&quot;</span>: { <span class="hljs-string">&quot;device&quot;</span>: <span class="hljs-string">&quot;cpu&quot;</span>, <span class="hljs-string">&quot;pin_memory&quot;</span>: true }, <span class="hljs-string">&quot;overlap_comm&quot;</span>: true, <span class="hljs-string">&quot;contiguous_gradients&quot;</span>: true, <span class="hljs-string">&quot;sub_group_size&quot;</span>: <span class="hljs-number">1e9</span>, <span class="hljs-string">&quot;reduce_bucket_size&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;stage3_prefetch_bucket_size&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;stage3_param_persistence_threshold&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;stage3_max_live_parameters&quot;</span>: <span class="hljs-number">1e9</span>, <span class="hljs-string">&quot;stage3_max_reuse_distance&quot;</span>: <span class="hljs-number">1e9</span>, <span class="hljs-string">&quot;stage3_gather_16bit_weights_on_model_save&quot;</span>: true }, <span class="hljs-string">&quot;gradient_accumulation_steps&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;gradient_clipping&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;steps_per_print&quot;</span>: <span class="hljs-number">2000</span>, <span class="hljs-string">&quot;train_batch_size&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;train_micro_batch_size_per_gpu&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;wall_clock_breakdown&quot;</span>: false } EOT`}}),So=new w({props:{code:`!git clone https://github.com/huggingface/transformers !cd transformers; deepspeed examples/pytorch/translation/run_translation.py ...`,highlighted:`!git clone https://github.com/huggingface/transformers !cd transformers; deepspeed examples/pytorch/translation/run_translation.py ...`}}),Co=new w({props:{code:`%%bash git clone https://github.com/huggingface/transformers cd transformers deepspeed examples/pytorch/translation/run_translation.py ...`,highlighted:`%%bash git clone https://github.com/huggingface/transformers cd transformers deepspeed examples/pytorch/translation/run_translation.py ...`}}),xo=new y({}),Uo=new w({props:{code:`git clone https://github.com/microsoft/DeepSpeedExamples cd DeepSpeedExamples find . -name '*json'`,highlighted:`git <span class="hljs-built_in">clone</span> https://github.com/microsoft/DeepSpeedExamples <span class="hljs-built_in">cd</span> DeepSpeedExamples find . -name <span class="hljs-string">&#x27;*json&#x27;</span>`}}),Go=new w({props:{code:"grep -i Lamb $(find . -name '*json')",highlighted:'grep -i Lamb $(find . -name <span class="hljs-string">&#x27;*json&#x27;</span>)'}}),Lo=new w({props:{code:`{ "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "betas": "auto", "eps": "auto", "weight_decay": "auto" } }, "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto" } }, "zero_optimization": { "stage": 2, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "allgather_partitions": true, "allgather_bucket_size": 2e8, "overlap_comm": true, "reduce_scatter": true, "reduce_bucket_size": 2e8, "contiguous_gradients": true }, "gradient_accumulation_steps": "auto", "gradient_clipping": "auto", "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", }`,highlighted:`<span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;fp16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale_window&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;initial_scale_power&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">16</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;hysteresis&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;min_loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;AdamW&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;betas&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;eps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;weight_decay&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;scheduler&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;WarmupLR&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;warmup_min_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_max_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_num_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;stage&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;allgather_partitions&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;allgather_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;overlap_comm&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_scatter&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;contiguous_gradients&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;gradient_accumulation_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;gradient_clipping&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;train_batch_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;train_micro_batch_size_per_gpu&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-punctuation">}</span>`}}),Zo=new y({}),No=new w({props:{code:'TrainingArguments(..., deepspeed="/path/to/ds_config.json")',highlighted:'TrainingArguments(..., deepspeed=<span class="hljs-string">&quot;/path/to/ds_config.json&quot;</span>)'}}),Ho=new w({props:{code:`ds_config_dict = dict(scheduler=scheduler_params, optimizer=optimizer_params) TrainingArguments(..., deepspeed=ds_config_dict)`,highlighted:`ds_config_dict = <span class="hljs-built_in">dict</span>(scheduler=scheduler_params, optimizer=optimizer_params) TrainingArguments(..., deepspeed=ds_config_dict)`}}),Bo=new y({}),Xt=new yU({props:{warning:!0,$$slots:{default:[TY]},$$scope:{ctx:jt}}}),Fo=new y({}),Jo=new y({}),Xo=new w({props:{code:`{ "zero_optimization": { "stage": 2, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "allgather_partitions": true, "allgather_bucket_size": 5e8, "overlap_comm": true, "reduce_scatter": true, "reduce_bucket_size": 5e8, "contiguous_gradients": true } }`,highlighted:`<span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;stage&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;allgather_partitions&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;allgather_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">5e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;overlap_comm&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_scatter&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">5e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;contiguous_gradients&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span>`}}),Qo=new w({props:{code:`{ "zero_optimization": { "round_robin_gradients": true } }`,highlighted:`<span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;round_robin_gradients&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span>`}}),el=new y({}),sl=new w({props:{code:`{ "zero_optimization": { "stage": 3, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "offload_param": { "device": "cpu", "pin_memory": true }, "overlap_comm": true, "contiguous_gradients": true, "sub_group_size": 1e9, "reduce_bucket_size": "auto", "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "stage3_max_live_parameters": 1e9, "stage3_max_reuse_distance": 1e9, "stage3_gather_16bit_weights_on_model_save": true } }`,highlighted:`<span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;stage&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_param&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;overlap_comm&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;contiguous_gradients&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;sub_group_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_prefetch_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_param_persistence_threshold&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_max_live_parameters&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_max_reuse_distance&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_gather_16bit_weights_on_model_save&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span>`}}),cl=new y({}),hl=new w({props:{code:`{ "zero_optimization": { "stage": 3, "offload_optimizer": { "device": "nvme", "nvme_path": "/local_nvme", "pin_memory": true, "buffer_count": 4, "fast_init": false }, "offload_param": { "device": "nvme", "nvme_path": "/local_nvme", "pin_memory": true, "buffer_count": 5, "buffer_size": 1e8, "max_in_cpu": 1e9 }, "aio": { "block_size": 262144, "queue_depth": 32, "thread_count": 1, "single_submit": false, "overlap_events": true }, "overlap_comm": true, "contiguous_gradients": true, "sub_group_size": 1e9, "reduce_bucket_size": "auto", "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "stage3_max_live_parameters": 1e9, "stage3_max_reuse_distance": 1e9, "stage3_gather_16bit_weights_on_model_save": true }, }`,highlighted:`<span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;stage&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;nvme&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;nvme_path&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;/local_nvme&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;buffer_count&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">4</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;fast_init&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">false</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_param&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;nvme&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;nvme_path&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;/local_nvme&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;buffer_count&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">5</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;buffer_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;max_in_cpu&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;aio&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;block_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">262144</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;queue_depth&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">32</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;thread_count&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;single_submit&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">false</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;overlap_events&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;overlap_comm&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;contiguous_gradients&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;sub_group_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_prefetch_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_param_persistence_threshold&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_max_live_parameters&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_max_reuse_distance&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_gather_16bit_weights_on_model_save&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-punctuation">}</span>`}}),_l=new y({}),jl=new y({}),wl=new w({props:{code:`{ "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "betas": "auto", "eps": "auto", "weight_decay": "auto" } }, "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto" } }, "zero_optimization": { "stage": 2, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "allgather_partitions": true, "allgather_bucket_size": 2e8, "overlap_comm": true, "reduce_scatter": true, "reduce_bucket_size": 2e8, "contiguous_gradients": true }, "gradient_accumulation_steps": "auto", "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false }`,highlighted:`<span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;fp16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale_window&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;initial_scale_power&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">16</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;hysteresis&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;min_loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;AdamW&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;betas&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;eps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;weight_decay&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;scheduler&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;WarmupLR&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;warmup_min_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_max_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_num_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;stage&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;allgather_partitions&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;allgather_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;overlap_comm&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_scatter&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;contiguous_gradients&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;gradient_accumulation_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;gradient_clipping&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;steps_per_print&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;train_batch_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;train_micro_batch_size_per_gpu&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;wall_clock_breakdown&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">false</span> <span class="hljs-punctuation">}</span>`}}),yl=new w({props:{code:`{ "fp16": { "enabled": true, "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": 3e-5, "betas": [0.8, 0.999], "eps": 1e-8, "weight_decay": 3e-7 } }, "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": 0, "warmup_max_lr": 3e-5, "warmup_num_steps": 500 } }, "zero_optimization": { "stage": 2, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "allgather_partitions": true, "allgather_bucket_size": 2e8, "overlap_comm": true, "reduce_scatter": true, "reduce_bucket_size": 2e8, "contiguous_gradients": true }, "steps_per_print": 2000, "wall_clock_breakdown": false }`,highlighted:`<span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;fp16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale_window&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;initial_scale_power&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">16</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;hysteresis&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;min_loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;AdamW&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3e-5</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;betas&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">[</span><span class="hljs-number">0.8</span><span class="hljs-punctuation">,</span> <span class="hljs-number">0.999</span><span class="hljs-punctuation">]</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;eps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e-8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;weight_decay&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3e-7</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;scheduler&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;WarmupLR&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;warmup_min_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_max_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3e-5</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_num_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">500</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;stage&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;allgather_partitions&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;allgather_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;overlap_comm&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_scatter&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;contiguous_gradients&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;steps_per_print&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;wall_clock_breakdown&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">false</span> <span class="hljs-punctuation">}</span>`}}),gl=new y({}),bl=new w({props:{code:`{ "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "betas": "auto", "eps": "auto", "weight_decay": "auto" } }, "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto" } }, "zero_optimization": { "stage": 3, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "offload_param": { "device": "cpu", "pin_memory": true }, "overlap_comm": true, "contiguous_gradients": true, "sub_group_size": 1e9, "reduce_bucket_size": "auto", "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "stage3_max_live_parameters": 1e9, "stage3_max_reuse_distance": 1e9, "stage3_gather_16bit_weights_on_model_save": true }, "gradient_accumulation_steps": "auto", "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false }`,highlighted:`<span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;fp16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale_window&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;initial_scale_power&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">16</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;hysteresis&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;min_loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;AdamW&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;betas&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;eps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;weight_decay&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;scheduler&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;WarmupLR&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;warmup_min_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_max_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_num_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;stage&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_param&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;overlap_comm&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;contiguous_gradients&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;sub_group_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_prefetch_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_param_persistence_threshold&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_max_live_parameters&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_max_reuse_distance&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_gather_16bit_weights_on_model_save&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;gradient_accumulation_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;gradient_clipping&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;steps_per_print&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;train_batch_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;train_micro_batch_size_per_gpu&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;wall_clock_breakdown&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">false</span> <span class="hljs-punctuation">}</span>`}}),ql=new w({props:{code:`{ "fp16": { "enabled": true, "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": 3e-5, "betas": [0.8, 0.999], "eps": 1e-8, "weight_decay": 3e-7 } }, "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": 0, "warmup_max_lr": 3e-5, "warmup_num_steps": 500 } }, "zero_optimization": { "stage": 3, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "offload_param": { "device": "cpu", "pin_memory": true }, "overlap_comm": true, "contiguous_gradients": true, "sub_group_size": 1e9, "reduce_bucket_size": 1e6, "stage3_prefetch_bucket_size": 0.94e6, "stage3_param_persistence_threshold": 1e4, "stage3_max_live_parameters": 1e9, "stage3_max_reuse_distance": 1e9, "stage3_gather_16bit_weights_on_model_save": true }, "steps_per_print": 2000, "wall_clock_breakdown": false }`,highlighted:`<span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;fp16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale_window&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;initial_scale_power&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">16</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;hysteresis&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;min_loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;AdamW&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3e-5</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;betas&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">[</span><span class="hljs-number">0.8</span><span class="hljs-punctuation">,</span> <span class="hljs-number">0.999</span><span class="hljs-punctuation">]</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;eps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e-8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;weight_decay&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3e-7</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;scheduler&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;WarmupLR&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;warmup_min_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_max_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3e-5</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_num_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">500</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;stage&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_param&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;overlap_comm&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;contiguous_gradients&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;sub_group_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e6</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_prefetch_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0.94e6</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_param_persistence_threshold&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e4</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_max_live_parameters&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_max_reuse_distance&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_gather_16bit_weights_on_model_save&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;steps_per_print&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;wall_clock_breakdown&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">false</span> <span class="hljs-punctuation">}</span>`}}),El=new y({}),$l=new y({}),Pl=new w({props:{code:`{ "optimizer": { "type": "AdamW", "params": { "lr": "auto", "betas": "auto", "eps": "auto", "weight_decay": "auto" } } }`,highlighted:`<span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;AdamW&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;betas&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;eps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;weight_decay&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span>`}}),Tl=new w({props:{code:`{ "optimizer": { "type": "AdamW", "params": { "lr": 0.001, "betas": [0.8, 0.999], "eps": 1e-8, "weight_decay": 3e-7 } } }`,highlighted:`<span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;AdamW&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0.001</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;betas&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">[</span><span class="hljs-number">0.8</span><span class="hljs-punctuation">,</span> <span class="hljs-number">0.999</span><span class="hljs-punctuation">]</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;eps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e-8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;weight_decay&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3e-7</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span>`}}),Sl=new w({props:{code:`{ "zero_allow_untested_optimizer": true }`,highlighted:`<span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;zero_allow_untested_optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span>`}}),Cl=new y({}),Il=new w({props:{code:`{ "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto" } } }`,highlighted:`<span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;scheduler&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;WarmupLR&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;warmup_min_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_max_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_num_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span>`}}),Ul=new w({props:{code:`{ "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": 0, "warmup_max_lr": 0.001, "warmup_num_steps": 1000 } } }`,highlighted:`<span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;scheduler&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;WarmupLR&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;warmup_min_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_max_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0.001</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_num_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1000</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span>`}}),Gl=new w({props:{code:`{ "scheduler": { "type": "WarmupDecayLR", "params": { "last_batch_iteration": -1, "total_num_steps": "auto", "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto" } } }`,highlighted:`<span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;scheduler&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;WarmupDecayLR&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;last_batch_iteration&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">-1</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;total_num_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_min_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_max_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_num_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span>`}}),Ml=new y({}),Ll=new w({props:{code:`{ "fp16": { "enabled": "false", } }`,highlighted:`<span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;fp16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;false&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span>`}}),Nl=new y({}),Hl=new y({}),Bl=new w({props:{code:`{ "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 } }`,highlighted:`<span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;fp16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale_window&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;initial_scale_power&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">16</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;hysteresis&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;min_loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span>`}}),Wl=new w({props:{code:`{ "fp16": { "enabled": true, "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 } }`,highlighted:`<span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;fp16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale_window&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;initial_scale_power&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">16</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;hysteresis&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;min_loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span>`}}),Vl=new y({}),Yl=new w({props:{code:`{ "bf16": { "enabled": "auto" } }`,highlighted:`<span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;bf16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span>`}}),Kl=new w({props:{code:`{ "bf16": { "enabled": true } }`,highlighted:`<span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;bf16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span>`}}),Ua=new yU({props:{$$slots:{default:[SY]},$$scope:{ctx:jt}}}),Jl=new y({}),Xl=new w({props:{code:`"amp": { "enabled": "auto", "opt_level": "auto" }`,highlighted:`<span class="hljs-attr">&quot;amp&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;opt_level&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span>`}}),Ql=new w({props:{code:`{ "amp": { "enabled": true, "opt_level": "O1" } }`,highlighted:`<span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;amp&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;opt_level&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;O1&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span>`}}),sr=new y({}),tr=new w({props:{code:`{ "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto" }`,highlighted:`<span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;train_batch_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;train_micro_batch_size_per_gpu&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span>`}}),ar=new w({props:{code:`{ "train_batch_size": 12, "train_micro_batch_size_per_gpu": 4 }`,highlighted:`<span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;train_batch_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">12</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;train_micro_batch_size_per_gpu&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">4</span> <span class="hljs-punctuation">}</span>`}}),nr=new y({}),or=new w({props:{code:`{ "gradient_accumulation_steps": "auto" }`,highlighted:`<span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;gradient_accumulation_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span>`}}),lr=new w({props:{code:`{ "gradient_accumulation_steps": 3 }`,highlighted:`<span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;gradient_accumulation_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3</span> <span class="hljs-punctuation">}</span>`}}),rr=new y({}),pr=new w({props:{code:`{ "gradient_clipping": "auto" }`,highlighted:`<span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;gradient_clipping&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span>`}}),ir=new w({props:{code:`{ "gradient_clipping": 1.0 }`,highlighted:`<span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;gradient_clipping&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1.0</span> <span class="hljs-punctuation">}</span>`}}),ur=new y({}),cr=new w({props:{code:`{ "zero_optimization": { "stage3_gather_16bit_weights_on_model_save": true } }`,highlighted:`<span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;stage3_gather_16bit_weights_on_model_save&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span>`}}),fr=new w({props:{code:`from transformers.trainer_utils import get_last_checkpoint from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint checkpoint_dir = get_last_checkpoint(trainer.args.output_dir) fp32_model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)`,highlighted:`<span class="hljs-keyword">from</span> transformers.trainer_utils <span class="hljs-keyword">import</span> get_last_checkpoint <span class="hljs-keyword">from</span> deepspeed.utils.zero_to_fp32 <span class="hljs-keyword">import</span> load_state_dict_from_zero_checkpoint checkpoint_dir = get_last_checkpoint(trainer.args.output_dir) fp32_model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)`}}),dr=new w({props:{code:`from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint checkpoint_dir = os.path.join(trainer.args.output_dir, "checkpoint-final") trainer.deepspeed.save_checkpoint(checkpoint_dir) fp32_model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)`,highlighted:`<span class="hljs-keyword">from</span> deepspeed.utils.zero_to_fp32 <span class="hljs-keyword">import</span> load_state_dict_from_zero_checkpoint checkpoint_dir = os.path.join(trainer.args.output_dir, <span class="hljs-string">&quot;checkpoint-final&quot;</span>) trainer.deepspeed.save_checkpoint(checkpoint_dir) fp32_model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)`}}),Qa=new yU({props:{$$slots:{default:[CY]},$$scope:{ctx:jt}}}),mr=new w({props:{code:`from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu model = model.cpu() model.load_state_dict(state_dict)`,highlighted:`<span class="hljs-keyword">from</span> deepspeed.utils.zero_to_fp32 <span class="hljs-keyword">import</span> get_fp32_state_dict_from_zero_checkpoint state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) <span class="hljs-comment"># already on cpu</span> model = model.cpu() model.load_state_dict(state_dict)`}}),_r=new w({props:{code:`$ ls -l output_dir/checkpoint-1/ -rw-rw-r-- 1 stas stas 1.4K Mar 27 20:42 config.json drwxrwxr-x 2 stas stas 4.0K Mar 25 19:52 global_step1/ -rw-rw-r-- 1 stas stas 12 Mar 27 13:16 latest -rw-rw-r-- 1 stas stas 827K Mar 27 20:42 optimizer.pt -rw-rw-r-- 1 stas stas 231M Mar 27 20:42 pytorch_model.bin -rw-rw-r-- 1 stas stas 623 Mar 27 20:42 scheduler.pt -rw-rw-r-- 1 stas stas 1.8K Mar 27 20:42 special_tokens_map.json -rw-rw-r-- 1 stas stas 774K Mar 27 20:42 spiece.model -rw-rw-r-- 1 stas stas 1.9K Mar 27 20:42 tokenizer_config.json -rw-rw-r-- 1 stas stas 339 Mar 27 20:42 trainer_state.json -rw-rw-r-- 1 stas stas 2.3K Mar 27 20:42 training_args.bin -rwxrw-r-- 1 stas stas 5.5K Mar 27 13:16 zero_to_fp32.py*`,highlighted:`$ <span class="hljs-built_in">ls</span> -l output_dir/checkpoint-1/ -rw-rw-r-- 1 stas stas 1.4K Mar 27 20:42 config.json drwxrwxr-x 2 stas stas 4.0K Mar 25 19:52 global_step1/ -rw-rw-r-- 1 stas stas 12 Mar 27 13:16 latest -rw-rw-r-- 1 stas stas 827K Mar 27 20:42 optimizer.pt -rw-rw-r-- 1 stas stas 231M Mar 27 20:42 pytorch_model.bin -rw-rw-r-- 1 stas stas 623 Mar 27 20:42 scheduler.pt -rw-rw-r-- 1 stas stas 1.8K Mar 27 20:42 special_tokens_map.json -rw-rw-r-- 1 stas stas 774K Mar 27 20:42 spiece.model -rw-rw-r-- 1 stas stas 1.9K Mar 27 20:42 tokenizer_config.json -rw-rw-r-- 1 stas stas 339 Mar 27 20:42 trainer_state.json -rw-rw-r-- 1 stas stas 2.3K Mar 27 20:42 training_args.bin -rwxrw-r-- 1 stas stas 5.5K Mar 27 13:16 zero_to_fp32.py*`}}),vr=new w({props:{code:"python zero_to_fp32.py . pytorch_model.bin",highlighted:"python zero_to_fp32.py . pytorch_model.bin"}}),wr=new y({}),yr=new y({}),gr=new w({props:{code:`from transformers import T5ForConditionalGeneration, T5Config import deepspeed with deepspeed.zero.Init(): config = T5Config.from_pretrained("t5-small") model = T5ForConditionalGeneration(config)`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5ForConditionalGeneration, T5Config <span class="hljs-keyword">import</span> deepspeed <span class="hljs-keyword">with</span> deepspeed.zero.Init(): config = T5Config.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>) model = T5ForConditionalGeneration(config)`}}),br=new w({props:{code:`from transformers import AutoModel, Trainer, TrainingArguments training_args = TrainingArguments(..., deepspeed=ds_config) model = AutoModel.from_pretrained("t5-small") trainer = Trainer(model=model, args=training_args, ...)`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModel, Trainer, TrainingArguments training_args = TrainingArguments(..., deepspeed=ds_config) model = AutoModel.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>) trainer = Trainer(model=model, args=training_args, ...)`}}),Er=new y({}),Pr=new w({props:{code:'tensor([1.0], device="cuda:0", dtype=torch.float16, requires_grad=True)',highlighted:'tensor([<span class="hljs-number">1.0</span>], device=<span class="hljs-string">&quot;cuda:0&quot;</span>, dtype=torch.float16, requires_grad=<span class="hljs-literal">True</span>)'}}),zr=new y({}),Dr=new w({props:{code:"deepspeed --num_gpus=2 your_program.py <normal cl args> --do_eval --deepspeed ds_config.json",highlighted:"deepspeed --num_gpus=2 your_program.py &lt;normal cl args&gt; --do_eval --deepspeed ds_config.json"}}),Or=new w({props:{code:`deepspeed examples/pytorch/translation/run_translation.py \\ --deepspeed tests/deepspeed/ds_config_zero3.json \\ --model_name_or_path t5-small --output_dir output_dir \\ --do_eval --max_eval_samples 50 --warmup_steps 50 \\ --max_source_length 128 --val_max_target_length 128 \\ --overwrite_output_dir --per_device_eval_batch_size 4 \\ --predict_with_generate --dataset_config "ro-en" --fp16 \\ --source_lang en --target_lang ro --dataset_name wmt16 \\ --source_prefix "translate English to Romanian: "`,highlighted:`deepspeed examples/pytorch/translation/run_translation.py \\ --deepspeed tests/deepspeed/ds_config_zero3.json \\ --model_name_or_path t5-small --output_dir output_dir \\ --do_eval --max_eval_samples 50 --warmup_steps 50 \\ --max_source_length 128 --val_max_target_length 128 \\ --overwrite_output_dir --per_device_eval_batch_size 4 \\ --predict_with_generate --dataset_config <span class="hljs-string">&quot;ro-en&quot;</span> --fp16 \\ --source_lang en --target_lang ro --dataset_name wmt16 \\ --source_prefix <span class="hljs-string">&quot;translate English to Romanian: &quot;</span>`}}),Ar=new y({}),Tr=new w({props:{code:`$ python -c 'from transformers import AutoModel; \\ from deepspeed.runtime.zero.stage3 import estimate_zero3_model_states_mem_needs_all_live; \\ model = AutoModel.from_pretrained("bigscience/T0_3B"); \\ estimate_zero3_model_states_mem_needs_all_live(model, num_gpus_per_node=1, num_nodes=1)' [...] Estimated memory needed for params, optim states and gradients for a: HW: Setup with 1 node, 1 GPU per node. SW: Model with 2783M total params, 65M largest layer params. per CPU | per GPU | Options 70.00GB | 0.25GB | offload_param=cpu , offload_optimizer=cpu , zero_init=1 70.00GB | 0.25GB | offload_param=cpu , offload_optimizer=cpu , zero_init=0 62.23GB | 5.43GB | offload_param=none, offload_optimizer=cpu , zero_init=1 62.23GB | 5.43GB | offload_param=none, offload_optimizer=cpu , zero_init=0 0.37GB | 46.91GB | offload_param=none, offload_optimizer=none, zero_init=1 15.56GB | 46.91GB | offload_param=none, offload_optimizer=none, zero_init=0`,highlighted:`$ python -c <span class="hljs-string">&#x27;from transformers import AutoModel; \\ from deepspeed.runtime.zero.stage3 import estimate_zero3_model_states_mem_needs_all_live; \\ model = AutoModel.from_pretrained(&quot;bigscience/T0_3B&quot;); \\ estimate_zero3_model_states_mem_needs_all_live(model, num_gpus_per_node=1, num_nodes=1)&#x27;</span> [...] Estimated memory needed <span class="hljs-keyword">for</span> params, optim states and gradients <span class="hljs-keyword">for</span> a: HW: Setup with 1 node, 1 GPU per node. SW: Model with 2783M total params, 65M largest layer params. per CPU | per GPU | Options 70.00GB | 0.25GB | offload_param=cpu , offload_optimizer=cpu , zero_init=1 70.00GB | 0.25GB | offload_param=cpu , offload_optimizer=cpu , zero_init=0 62.23GB | 5.43GB | offload_param=none, offload_optimizer=cpu , zero_init=1 62.23GB | 5.43GB | offload_param=none, offload_optimizer=cpu , zero_init=0 0.37GB | 46.91GB | offload_param=none, offload_optimizer=none, zero_init=1 15.56GB | 46.91GB | offload_param=none, offload_optimizer=none, zero_init=0`}}),Sr=new w({props:{code:`$ python -c 'from transformers import AutoModel; \\ from deepspeed.runtime.zero.stage3 import estimate_zero3_model_states_mem_needs_all_live; \\ model = AutoModel.from_pretrained("bigscience/T0_3B"); \\ estimate_zero3_model_states_mem_needs_all_live(model, num_gpus_per_node=2, num_nodes=1)' [...] Estimated memory needed for params, optim states and gradients for a: HW: Setup with 1 node, 2 GPUs per node. SW: Model with 2783M total params, 65M largest layer params. per CPU | per GPU | Options 70.00GB | 0.25GB | offload_param=cpu , offload_optimizer=cpu , zero_init=1 70.00GB | 0.25GB | offload_param=cpu , offload_optimizer=cpu , zero_init=0 62.23GB | 2.84GB | offload_param=none, offload_optimizer=cpu , zero_init=1 62.23GB | 2.84GB | offload_param=none, offload_optimizer=cpu , zero_init=0 0.74GB | 23.58GB | offload_param=none, offload_optimizer=none, zero_init=1 31.11GB | 23.58GB | offload_param=none, offload_optimizer=none, zero_init=0 `,highlighted:`$ python -c <span class="hljs-string">&#x27;from transformers import AutoModel; \\ from deepspeed.runtime.zero.stage3 import estimate_zero3_model_states_mem_needs_all_live; \\ model = AutoModel.from_pretrained(&quot;bigscience/T0_3B&quot;); \\ estimate_zero3_model_states_mem_needs_all_live(model, num_gpus_per_node=2, num_nodes=1)&#x27;</span> [...] Estimated memory needed <span class="hljs-keyword">for</span> params, optim states and gradients <span class="hljs-keyword">for</span> a: HW: Setup with 1 node, 2 GPUs per node. SW: Model with 2783M total params, 65M largest layer params. per CPU | per GPU | Options 70.00GB | 0.25GB | offload_param=cpu , offload_optimizer=cpu , zero_init=1 70.00GB | 0.25GB | offload_param=cpu , offload_optimizer=cpu , zero_init=0 62.23GB | 2.84GB | offload_param=none, offload_optimizer=cpu , zero_init=1 62.23GB | 2.84GB | offload_param=none, offload_optimizer=cpu , zero_init=0 0.74GB | 23.58GB | offload_param=none, offload_optimizer=none, zero_init=1 31.11GB | 23.58GB | offload_param=none, offload_optimizer=none, zero_init=0 `}}),xr=new y({}),Ir=new w({props:{code:`python -c 'import torch; print(f"torch: {torch.__version__}")' python -c 'import transformers; print(f"transformers: {transformers.__version__}")' python -c 'import deepspeed; print(f"deepspeed: {deepspeed.__version__}")'`,highlighted:`python -c <span class="hljs-string">&#x27;import torch; print(f&quot;torch: {torch.__version__}&quot;)&#x27;</span> python -c <span class="hljs-string">&#x27;import transformers; print(f&quot;transformers: {transformers.__version__}&quot;)&#x27;</span> python -c <span class="hljs-string">&#x27;import deepspeed; print(f&quot;deepspeed: {deepspeed.__version__}&quot;)&#x27;</span>`}}),Hr=new y({}),Br=new y({}),Vr=new y({}),Yr=new w({props:{code:`{ "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 } }`,highlighted:`<span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;fp16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale_window&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;initial_scale_power&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">16</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;hysteresis&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;min_loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span>`}}),Kr=new w({props:{code:`0%| | 0/189 [00:00<?, ?it/s] [deepscale] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 262144, reducing to 262144 1%|\u258C | 1/189 [00:00<01:26, 2.17it/s] [deepscale] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 262144, reducing to 131072.0 1%|\u2588\u258F [...] [deepscale] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 1, reducing to 1 14%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u258C | 27/189 [00:14<01:13, 2.21it/s] [deepscale] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 1, reducing to 1 15%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u258F | 28/189 [00:14<01:13, 2.18it/s] [deepscale] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 1, reducing to 1 15%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u258A | 29/189 [00:15<01:13, 2.18it/s] [deepscale] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 1, reducing to 1 [...]`,highlighted:`<span class="hljs-number">0</span>%| | <span class="hljs-number">0</span>/<span class="hljs-number">189</span> [<span class="hljs-number">00</span>:<span class="hljs-number">00</span>&lt;?, ?it/s] [deepscale] OVERFLOW! <span class="hljs-built_in">Rank</span> <span class="hljs-number">0</span> Skipping <span class="hljs-built_in">step</span>. Attempted loss scale: <span class="hljs-number">262144</span>, reducing <span class="hljs-keyword">to</span> <span class="hljs-number">262144</span> <span class="hljs-number">1</span>%|\u258C | <span class="hljs-number">1</span>/<span class="hljs-number">189</span> [<span class="hljs-number">00</span>:<span class="hljs-number">00</span>&lt;<span class="hljs-number">01</span>:<span class="hljs-number">26</span>, <span class="hljs-number">2.17</span>it/s] [deepscale] OVERFLOW! <span class="hljs-built_in">Rank</span> <span class="hljs-number">0</span> Skipping <span class="hljs-built_in">step</span>. Attempted loss scale: <span class="hljs-number">262144</span>, reducing <span class="hljs-keyword">to</span> <span class="hljs-number">131072.0</span> <span class="hljs-number">1</span>%|\u2588\u258F [...] [deepscale] OVERFLOW! <span class="hljs-built_in">Rank</span> <span class="hljs-number">0</span> Skipping <span class="hljs-built_in">step</span>. Attempted loss scale: <span class="hljs-number">1</span>, reducing <span class="hljs-keyword">to</span> <span class="hljs-number">1</span> <span class="hljs-number">14</span>%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u258C | <span class="hljs-number">27</span>/<span class="hljs-number">189</span> [<span class="hljs-number">00</span>:<span class="hljs-number">14</span>&lt;<span class="hljs-number">01</span>:<span class="hljs-number">13</span>, <span class="hljs-number">2.21</span>it/s] [deepscale] OVERFLOW! <span class="hljs-built_in">Rank</span> <span class="hljs-number">0</span> Skipping <span class="hljs-built_in">step</span>. Attempted loss scale: <span class="hljs-number">1</span>, reducing <span class="hljs-keyword">to</span> <span class="hljs-number">1</span> <span class="hljs-number">15</span>%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u258F | <span class="hljs-number">28</span>/<span class="hljs-number">189</span> [<span class="hljs-number">00</span>:<span class="hljs-number">14</span>&lt;<span class="hljs-number">01</span>:<span class="hljs-number">13</span>, <span class="hljs-number">2.18</span>it/s] [deepscale] OVERFLOW! <span class="hljs-built_in">Rank</span> <span class="hljs-number">0</span> Skipping <span class="hljs-built_in">step</span>. Attempted loss scale: <span class="hljs-number">1</span>, reducing <span class="hljs-keyword">to</span> <span class="hljs-number">1</span> <span class="hljs-number">15</span>%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u258A | <span class="hljs-number">29</span>/<span class="hljs-number">189</span> [<span class="hljs-number">00</span>:<span class="hljs-number">15</span>&lt;<span class="hljs-number">01</span>:<span class="hljs-number">13</span>, <span class="hljs-number">2.18</span>it/s] [deepscale] OVERFLOW! <span class="hljs-built_in">Rank</span> <span class="hljs-number">0</span> Skipping <span class="hljs-built_in">step</span>. Attempted loss scale: <span class="hljs-number">1</span>, reducing <span class="hljs-keyword">to</span> <span class="hljs-number">1</span> [...]`}}),Jr=new y({}),sp=new y({}),tp=new w({props:{code:`from transformers.deepspeed import HfDeepSpeedConfig from transformers import AutoModel import deepspeed ds_config = {...} # deepspeed config object or path to the file # must run before instantiating the model to detect zero 3 dschf = HfDeepSpeedConfig(ds_config) # keep this object alive model = AutoModel.from_pretrained("gpt2") engine = deepspeed.initialize(model=model, config_params=ds_config, ...)`,highlighted:`<span class="hljs-keyword">from</span> transformers.deepspeed <span class="hljs-keyword">import</span> HfDeepSpeedConfig <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModel <span class="hljs-keyword">import</span> deepspeed ds_config = {...} <span class="hljs-comment"># deepspeed config object or path to the file</span> <span class="hljs-comment"># must run before instantiating the model to detect zero 3</span> dschf = HfDeepSpeedConfig(ds_config) <span class="hljs-comment"># keep this object alive</span> model = AutoModel.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) engine = deepspeed.initialize(model=model, config_params=ds_config, ...)`}}),ap=new w({props:{code:`from transformers.deepspeed import HfDeepSpeedConfig from transformers import AutoModel, AutoConfig import deepspeed ds_config = {...} # deepspeed config object or path to the file # must run before instantiating the model to detect zero 3 dschf = HfDeepSpeedConfig(ds_config) # keep this object alive config = AutoConfig.from_pretrained("gpt2") model = AutoModel.from_config(config) engine = deepspeed.initialize(model=model, config_params=ds_config, ...)`,highlighted:`<span class="hljs-keyword">from</span> transformers.deepspeed <span class="hljs-keyword">import</span> HfDeepSpeedConfig <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModel, AutoConfig <span class="hljs-keyword">import</span> deepspeed ds_config = {...} <span class="hljs-comment"># deepspeed config object or path to the file</span> <span class="hljs-comment"># must run before instantiating the model to detect zero 3</span> dschf = HfDeepSpeedConfig(ds_config) <span class="hljs-comment"># keep this object alive</span> config = AutoConfig.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) model = AutoModel.from_config(config) engine = deepspeed.initialize(model=model, config_params=ds_config, ...)`}}),op=new y({}),lp=new AY({props:{name:"class transformers.deepspeed.HfDeepSpeedConfig",anchor:"transformers.deepspeed.HfDeepSpeedConfig",parameters:[{name:"config_file_or_dict",val:""}],parametersDescription:[{anchor:"transformers.deepspeed.HfDeepSpeedConfig.config_file_or_dict",description:"<strong>config_file_or_dict</strong> (<code>Union[str, Dict]</code>) &#x2014; path to DeepSpeed config file or dict.",name:"config_file_or_dict"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/deepspeed.py#L45"}}),rp=new y({}),pp=new w({props:{code:`#!/usr/bin/env python # This script demonstrates how to use Deepspeed ZeRO in an inference mode when one can't fit a model # into a single GPU # # 1. Use 1 GPU with CPU offload # 2. Or use multiple GPUs instead # # First you need to install deepspeed: pip install deepspeed # # Here we use a 3B "bigscience/T0_3B" model which needs about 15GB GPU RAM - so 1 largish or 2 # small GPUs can handle it. or 1 small GPU and a lot of CPU memory. # # To use a larger model like "bigscience/T0" which needs about 50GB, unless you have an 80GB GPU - # you will need 2-4 gpus. And then you can adapt the script to handle more gpus if you want to # process multiple inputs at once. # # The provided deepspeed config also activates CPU memory offloading, so chances are that if you # have a lot of available CPU memory and you don't mind a slowdown you should be able to load a # model that doesn't normally fit into a single GPU. If you have enough GPU memory the program will # run faster if you don't want offload to CPU - so disable that section then. # # To deploy on 1 gpu: # # deepspeed --num_gpus 1 t0.py # or: # python -m torch.distributed.run --nproc_per_node=1 t0.py # # To deploy on 2 gpus: # # deepspeed --num_gpus 2 t0.py # or: # python -m torch.distributed.run --nproc_per_node=2 t0.py from transformers import AutoTokenizer, AutoConfig, AutoModelForSeq2SeqLM from transformers.deepspeed import HfDeepSpeedConfig import deepspeed import os import torch os.environ["TOKENIZERS_PARALLELISM"] = "false" # To avoid warnings about parallelism in tokenizers # distributed setup local_rank = int(os.getenv("LOCAL_RANK", "0")) world_size = int(os.getenv("WORLD_SIZE", "1")) torch.cuda.set_device(local_rank) deepspeed.init_distributed() model_name = "bigscience/T0_3B" config = AutoConfig.from_pretrained(model_name) model_hidden_size = config.d_model # batch size has to be divisible by world_size, but can be bigger than world_size train_batch_size = 1 * world_size # ds_config notes # # - enable bf16 if you use Ampere or higher GPU - this will run in mixed precision and will be # faster. # # - for older GPUs you can enable fp16, but it'll only work for non-bf16 pretrained models - e.g. # all official t5 models are bf16-pretrained # # - set offload_param.device to "none" or completely remove the \`offload_param\` section if you don't # - want CPU offload # # - if using \`offload_param\` you can manually finetune stage3_param_persistence_threshold to control # - which params should remain on gpus - the larger the value the smaller the offload size # # For indepth info on Deepspeed config see # https://huggingface.co/docs/transformers/main/main_classes/deepspeed # keeping the same format as json for consistency, except it uses lower case for true/false # fmt: off ds_config = { "fp16": { "enabled": False }, "bf16": { "enabled": False }, "zero_optimization": { "stage": 3, "offload_param": { "device": "cpu", "pin_memory": True }, "overlap_comm": True, "contiguous_gradients": True, "reduce_bucket_size": model_hidden_size * model_hidden_size, "stage3_prefetch_bucket_size": 0.9 * model_hidden_size * model_hidden_size, "stage3_param_persistence_threshold": 10 * model_hidden_size }, "steps_per_print": 2000, "train_batch_size": train_batch_size, "train_micro_batch_size_per_gpu": 1, "wall_clock_breakdown": False } # fmt: on # next line instructs transformers to partition the model directly over multiple gpus using # deepspeed.zero.Init when model's \`from_pretrained\` method is called. # # **it has to be run before loading the model AutoModelForSeq2SeqLM.from_pretrained(model_name)** # # otherwise the model will first be loaded normally and only partitioned at forward time which is # less efficient and when there is little CPU RAM may fail dschf = HfDeepSpeedConfig(ds_config) # keep this object alive # now a model can be loaded. model = AutoModelForSeq2SeqLM.from_pretrained(model_name) # initialise Deepspeed ZeRO and store only the engine object ds_engine = deepspeed.initialize(model=model, config_params=ds_config)[0] ds_engine.module.eval() # inference # Deepspeed ZeRO can process unrelated inputs on each GPU. So for 2 gpus you process 2 inputs at once. # If you use more GPUs adjust for more. # And of course if you have just one input to process you then need to pass the same string to both gpus # If you use only one GPU, then you will have only rank 0. rank = torch.distributed.get_rank() if rank == 0: text_in = "Is this review positive or negative? Review: this is the best cast iron skillet you will ever buy" elif rank == 1: text_in = "Is this review positive or negative? Review: this is the worst restaurant ever" tokenizer = AutoTokenizer.from_pretrained(model_name) inputs = tokenizer.encode(text_in, return_tensors="pt").to(device=local_rank) with torch.no_grad(): outputs = ds_engine.module.generate(inputs, synced_gpus=True) text_out = tokenizer.decode(outputs[0], skip_special_tokens=True) print(f"rank{rank}:\\n in={text_in}\\n out={text_out}")`,highlighted:`<span class="hljs-comment">#!/usr/bin/env python</span> <span class="hljs-comment"># This script demonstrates how to use Deepspeed ZeRO in an inference mode when one can&#x27;t fit a model</span> <span class="hljs-comment"># into a single GPU</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># 1. Use 1 GPU with CPU offload</span> <span class="hljs-comment"># 2. Or use multiple GPUs instead</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># First you need to install deepspeed: pip install deepspeed</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># Here we use a 3B &quot;bigscience/T0_3B&quot; model which needs about 15GB GPU RAM - so 1 largish or 2</span> <span class="hljs-comment"># small GPUs can handle it. or 1 small GPU and a lot of CPU memory.</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># To use a larger model like &quot;bigscience/T0&quot; which needs about 50GB, unless you have an 80GB GPU -</span> <span class="hljs-comment"># you will need 2-4 gpus. And then you can adapt the script to handle more gpus if you want to</span> <span class="hljs-comment"># process multiple inputs at once.</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># The provided deepspeed config also activates CPU memory offloading, so chances are that if you</span> <span class="hljs-comment"># have a lot of available CPU memory and you don&#x27;t mind a slowdown you should be able to load a</span> <span class="hljs-comment"># model that doesn&#x27;t normally fit into a single GPU. If you have enough GPU memory the program will</span> <span class="hljs-comment"># run faster if you don&#x27;t want offload to CPU - so disable that section then.</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># To deploy on 1 gpu:</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># deepspeed --num_gpus 1 t0.py</span> <span class="hljs-comment"># or:</span> <span class="hljs-comment"># python -m torch.distributed.run --nproc_per_node=1 t0.py</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># To deploy on 2 gpus:</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># deepspeed --num_gpus 2 t0.py</span> <span class="hljs-comment"># or:</span> <span class="hljs-comment"># python -m torch.distributed.run --nproc_per_node=2 t0.py</span> <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoConfig, AutoModelForSeq2SeqLM <span class="hljs-keyword">from</span> transformers.deepspeed <span class="hljs-keyword">import</span> HfDeepSpeedConfig <span class="hljs-keyword">import</span> deepspeed <span class="hljs-keyword">import</span> os <span class="hljs-keyword">import</span> torch os.environ[<span class="hljs-string">&quot;TOKENIZERS_PARALLELISM&quot;</span>] = <span class="hljs-string">&quot;false&quot;</span> <span class="hljs-comment"># To avoid warnings about parallelism in tokenizers</span> <span class="hljs-comment"># distributed setup</span> local_rank = <span class="hljs-built_in">int</span>(os.getenv(<span class="hljs-string">&quot;LOCAL_RANK&quot;</span>, <span class="hljs-string">&quot;0&quot;</span>)) world_size = <span class="hljs-built_in">int</span>(os.getenv(<span class="hljs-string">&quot;WORLD_SIZE&quot;</span>, <span class="hljs-string">&quot;1&quot;</span>)) torch.cuda.set_device(local_rank) deepspeed.init_distributed() model_name = <span class="hljs-string">&quot;bigscience/T0_3B&quot;</span> config = AutoConfig.from_pretrained(model_name) model_hidden_size = config.d_model <span class="hljs-comment"># batch size has to be divisible by world_size, but can be bigger than world_size</span> train_batch_size = <span class="hljs-number">1</span> * world_size <span class="hljs-comment"># ds_config notes</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># - enable bf16 if you use Ampere or higher GPU - this will run in mixed precision and will be</span> <span class="hljs-comment"># faster.</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># - for older GPUs you can enable fp16, but it&#x27;ll only work for non-bf16 pretrained models - e.g.</span> <span class="hljs-comment"># all official t5 models are bf16-pretrained</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># - set offload_param.device to &quot;none&quot; or completely remove the \`offload_param\` section if you don&#x27;t</span> <span class="hljs-comment"># - want CPU offload</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># - if using \`offload_param\` you can manually finetune stage3_param_persistence_threshold to control</span> <span class="hljs-comment"># - which params should remain on gpus - the larger the value the smaller the offload size</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># For indepth info on Deepspeed config see</span> <span class="hljs-comment"># https://huggingface.co/docs/transformers/main/main_classes/deepspeed</span> <span class="hljs-comment"># keeping the same format as json for consistency, except it uses lower case for true/false</span> <span class="hljs-comment"># fmt: off</span> ds_config = { <span class="hljs-string">&quot;fp16&quot;</span>: { <span class="hljs-string">&quot;enabled&quot;</span>: <span class="hljs-literal">False</span> }, <span class="hljs-string">&quot;bf16&quot;</span>: { <span class="hljs-string">&quot;enabled&quot;</span>: <span class="hljs-literal">False</span> }, <span class="hljs-string">&quot;zero_optimization&quot;</span>: { <span class="hljs-string">&quot;stage&quot;</span>: <span class="hljs-number">3</span>, <span class="hljs-string">&quot;offload_param&quot;</span>: { <span class="hljs-string">&quot;device&quot;</span>: <span class="hljs-string">&quot;cpu&quot;</span>, <span class="hljs-string">&quot;pin_memory&quot;</span>: <span class="hljs-literal">True</span> }, <span class="hljs-string">&quot;overlap_comm&quot;</span>: <span class="hljs-literal">True</span>, <span class="hljs-string">&quot;contiguous_gradients&quot;</span>: <span class="hljs-literal">True</span>, <span class="hljs-string">&quot;reduce_bucket_size&quot;</span>: model_hidden_size * model_hidden_size, <span class="hljs-string">&quot;stage3_prefetch_bucket_size&quot;</span>: <span class="hljs-number">0.9</span> * model_hidden_size * model_hidden_size, <span class="hljs-string">&quot;stage3_param_persistence_threshold&quot;</span>: <span class="hljs-number">10</span> * model_hidden_size }, <span class="hljs-string">&quot;steps_per_print&quot;</span>: <span class="hljs-number">2000</span>, <span class="hljs-string">&quot;train_batch_size&quot;</span>: train_batch_size, <span class="hljs-string">&quot;train_micro_batch_size_per_gpu&quot;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&quot;wall_clock_breakdown&quot;</span>: <span class="hljs-literal">False</span> } <span class="hljs-comment"># fmt: on</span> <span class="hljs-comment"># next line instructs transformers to partition the model directly over multiple gpus using</span> <span class="hljs-comment"># deepspeed.zero.Init when model&#x27;s \`from_pretrained\` method is called.</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># **it has to be run before loading the model AutoModelForSeq2SeqLM.from_pretrained(model_name)**</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># otherwise the model will first be loaded normally and only partitioned at forward time which is</span> <span class="hljs-comment"># less efficient and when there is little CPU RAM may fail</span> dschf = HfDeepSpeedConfig(ds_config) <span class="hljs-comment"># keep this object alive</span> <span class="hljs-comment"># now a model can be loaded.</span> model = AutoModelForSeq2SeqLM.from_pretrained(model_name) <span class="hljs-comment"># initialise Deepspeed ZeRO and store only the engine object</span> ds_engine = deepspeed.initialize(model=model, config_params=ds_config)[<span class="hljs-number">0</span>] ds_engine.module.<span class="hljs-built_in">eval</span>() <span class="hljs-comment"># inference</span> <span class="hljs-comment"># Deepspeed ZeRO can process unrelated inputs on each GPU. So for 2 gpus you process 2 inputs at once.</span> <span class="hljs-comment"># If you use more GPUs adjust for more.</span> <span class="hljs-comment"># And of course if you have just one input to process you then need to pass the same string to both gpus</span> <span class="hljs-comment"># If you use only one GPU, then you will have only rank 0.</span> rank = torch.distributed.get_rank() <span class="hljs-keyword">if</span> rank == <span class="hljs-number">0</span>: text_in = <span class="hljs-string">&quot;Is this review positive or negative? Review: this is the best cast iron skillet you will ever buy&quot;</span> <span class="hljs-keyword">elif</span> rank == <span class="hljs-number">1</span>: text_in = <span class="hljs-string">&quot;Is this review positive or negative? Review: this is the worst restaurant ever&quot;</span> tokenizer = AutoTokenizer.from_pretrained(model_name) inputs = tokenizer.encode(text_in, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).to(device=local_rank) <span class="hljs-keyword">with</span> torch.no_grad(): outputs = ds_engine.module.generate(inputs, synced_gpus=<span class="hljs-literal">True</span>) text_out = tokenizer.decode(outputs[<span class="hljs-number">0</span>], skip_special_tokens=<span class="hljs-literal">True</span>) <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;rank<span class="hljs-subst">{rank}</span>:\\n in=<span class="hljs-subst">{text_in}</span>\\n out=<span class="hljs-subst">{text_out}</span>&quot;</span>)`}}),ip=new w({props:{code:`$ deepspeed --num_gpus 2 t0.py rank0: in=Is this review positive or negative? Review: this is the best cast iron skillet you will ever buy out=Positive rank1: in=Is this review positive or negative? Review: this is the worst restaurant ever out=negative`,highlighted:`$ deepspeed --num_gpus <span class="hljs-number">2</span> t0.py rank0: <span class="hljs-keyword">in</span>=Is <span class="hljs-keyword">this</span> review positive or negative? Review: <span class="hljs-keyword">this</span> <span class="hljs-keyword">is</span> the best cast iron skillet you will ever buy <span class="hljs-keyword">out</span>=Positive rank1: <span class="hljs-keyword">in</span>=Is <span class="hljs-keyword">this</span> review positive or negative? Review: <span class="hljs-keyword">this</span> <span class="hljs-keyword">is</span> the worst restaurant ever <span class="hljs-keyword">out</span>=negative`}}),up=new y({}),{c(){g=o("meta"),S=u(),b=o("h1"),k=o("a"),X=o("span"),f(z.$$.fragment),C=u(),Q=o("span"),x=a("DeepSpeed Integration"),te=u(),T=o("p"),q=o("a"),E=a("DeepSpeed"),gs=a(" implements everything described in the "),W=o("a"),bs=a("ZeRO paper"),i4=a(". Currently it provides full support for:"),wj=u(),R=o("ol"),Gh=o("li"),u4=a("Optimizer state partitioning (ZeRO stage 1)"),c4=u(),Mh=o("li"),h4=a("Gradient partitioning (ZeRO stage 2)"),f4=u(),Lh=o("li"),d4=a("Parameter partitioning (ZeRO stage 3)"),m4=u(),Zh=o("li"),_4=a("Custom mixed precision training handling"),v4=u(),Nh=o("li"),j4=a("A range of fast CUDA-extension-based optimizers"),w4=u(),Hh=o("li"),y4=a("ZeRO-Offload to CPU and NVMe"),yj=u(),Te=o("p"),g4=a("ZeRO-Offload has its own dedicated paper: "),Kn=o("a"),b4=a("ZeRO-Offload: Democratizing Billion-Scale Model Training"),q4=a(". And NVMe-support is described in the paper "),Jn=o("a"),E4=a(`ZeRO-Infinity: Breaking the GPU Memory Wall for Extreme Scale Deep Learning`),$4=a("."),gj=u(),kp=o("p"),k4=a("DeepSpeed ZeRO-2 is primarily used only for training, as its features are of no use to inference."),bj=u(),Pp=o("p"),P4=a(`DeepSpeed ZeRO-3 can be used for inference as well, since it allows huge models to be loaded on multiple GPUs, which won\u2019t be possible on a single GPU.`),qj=u(),wt=o("p"),z4=a("\u{1F917} Transformers integrates "),Xn=o("a"),D4=a("DeepSpeed"),O4=a(" via 2 options:"),Ej=u(),yt=o("ol"),Qn=o("li"),A4=a("Integration of the core DeepSpeed features via "),zp=o("a"),T4=a("Trainer"),S4=a(`. This is an everything-done-for-you type of integration - just supply your custom config file or use our template and you have nothing else to do. Most of this document is focused on this feature.`),C4=u(),F=o("li"),x4=a("If you don\u2019t use "),Dp=o("a"),R4=a("Trainer"),I4=a(` and want to use your own Trainer where you integrated DeepSpeed yourself, core functionality functions like `),Bh=o("code"),U4=a("from_pretrained"),G4=a(" and "),Wh=o("code"),M4=a("from_config"),L4=a(` include integration of essential parts of DeepSpeed like `),Fh=o("code"),Z4=a("zero.Init"),N4=a(` for ZeRO stage 3 and higher. To tap into this feature read the docs on `),Op=o("a"),H4=a("non-Trainer DeepSpeed Integration"),B4=a("."),$j=u(),Ap=o("p"),W4=a("What is integrated:"),kj=u(),Tp=o("p"),F4=a("Training:"),Pj=u(),Sp=o("ol"),Vh=o("li"),V4=a("DeepSpeed ZeRO training supports the full ZeRO stages 1, 2 and 3 with ZeRO-Infinity (CPU and NVME offload)."),zj=u(),Cp=o("p"),Y4=a("Inference:"),Dj=u(),xp=o("ol"),eo=o("li"),K4=a(`DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity. It uses the same ZeRO protocol as training, but it doesn\u2019t use an optimizer and a lr scheduler and only stage 3 is relevant. For more details see: `),Rp=o("a"),J4=a("zero-inference"),X4=a("."),Oj=u(),Ip=o("p"),Q4=a(`There is also DeepSpeed Inference - this is a totally different technology which uses Tensor Parallelism instead of ZeRO (coming soon).`),Aj=u(),Up=o("a"),Tj=u(),qs=o("h2"),gt=o("a"),Yh=o("span"),f(so.$$.fragment),e6=u(),Kh=o("span"),s6=a("Trainer Deepspeed Integration"),Sj=u(),Gp=o("a"),Cj=u(),Es=o("h3"),bt=o("a"),Jh=o("span"),f(to.$$.fragment),t6=u(),Xh=o("span"),a6=a("Installation"),xj=u(),Mp=o("p"),n6=a("Install the library via pypi:"),Rj=u(),f(ao.$$.fragment),Ij=u(),Se=o("p"),o6=a("or via "),Qh=o("code"),l6=a("transformers"),r6=a("\u2019 "),ef=o("code"),p6=a("extras"),i6=a(":"),Uj=u(),f(no.$$.fragment),Gj=u(),Ce=o("p"),u6=a("or find more details on "),oo=o("a"),c6=a("the DeepSpeed\u2019s GitHub page"),h6=a(` and `),lo=o("a"),f6=a("advanced install"),d6=a("."),Mj=u(),qt=o("p"),m6=a("If you\u2019re still struggling with the build, first make sure to read "),Lp=o("a"),_6=a("CUDA Extension Installation Notes"),v6=a("."),Lj=u(),Zp=o("p"),j6=a(`If you don\u2019t prebuild the extensions and rely on them to be built at run time and you tried all of the above solutions to no avail, the next thing to try is to pre-build the modules before installing them.`),Zj=u(),Np=o("p"),w6=a("To make a local build for DeepSpeed:"),Nj=u(),f(ro.$$.fragment),Hj=u(),xe=o("p"),y6=a("If you intend to use NVMe offload you will also need to include "),sf=o("code"),g6=a("DS_BUILD_AIO=1"),b6=a(` in the instructions above (and also install `),tf=o("em"),q6=a("libaio-dev"),E6=a(" system-wide)."),Bj=u(),Et=o("p"),$6=a("Edit "),af=o("code"),k6=a("TORCH_CUDA_ARCH_LIST"),P6=a(` to insert the code for the architectures of the GPU cards you intend to use. Assuming all your cards are the same you can get the arch via:`),Wj=u(),f(po.$$.fragment),Fj=u(),ke=o("p"),z6=a("So if you get "),nf=o("code"),D6=a("8, 6"),O6=a(", then use "),of=o("code"),A6=a('TORCH_CUDA_ARCH_LIST="8.6"'),T6=a(`. If you have multiple different cards, you can list all of them like so `),lf=o("code"),S6=a('TORCH_CUDA_ARCH_LIST="6.1;8.6"'),Vj=u(),Hp=o("p"),C6=a("If you need to use the same setup on multiple machines, make a binary wheel:"),Yj=u(),f(io.$$.fragment),Kj=u(),Re=o("p"),x6=a("it will generate something like "),rf=o("code"),R6=a("dist/deepspeed-0.3.13+8cd046f-cp38-cp38-linux_x86_64.whl"),I6=a(` which now you can install as `),pf=o("code"),U6=a("pip install deepspeed-0.3.13+8cd046f-cp38-cp38-linux_x86_64.whl"),G6=a(" locally or on any other machine."),Jj=u(),$t=o("p"),M6=a("Again, remember to ensure to adjust "),uf=o("code"),L6=a("TORCH_CUDA_ARCH_LIST"),Z6=a(" to the target architectures."),Xj=u(),Ie=o("p"),N6=a("You can find the complete list of NVIDIA GPUs and their corresponding "),cf=o("strong"),H6=a("Compute Capabilities"),B6=a(` (same as arch in this context) `),uo=o("a"),W6=a("here"),F6=a("."),Qj=u(),Bp=o("p"),V6=a("You can check the archs pytorch was built with using:"),ew=u(),f(co.$$.fragment),sw=u(),Wp=o("p"),Y6=a("Here is how to find out the arch for one of the installed GPUs. For example, for GPU 0:"),tw=u(),f(ho.$$.fragment),aw=u(),Fp=o("p"),K6=a("If the output is:"),nw=u(),f(fo.$$.fragment),ow=u(),kt=o("p"),J6=a("then you know that this card\u2019s arch is "),hf=o("code"),X6=a("8.6"),Q6=a("."),lw=u(),Pt=o("p"),e$=a("You can also leave "),ff=o("code"),s$=a("TORCH_CUDA_ARCH_LIST"),t$=a(` out completely and then the build program will automatically query the architecture of the GPUs the build is made on. This may or may not match the GPUs on the target machines, that\u2019s why it\u2019s best to specify the desired archs explicitly.`),rw=u(),zt=o("p"),a$=a(`If after trying everything suggested you still encounter build issues, please, proceed with the GitHub Issue of `),mo=o("a"),n$=a("Deepspeed"),o$=a(","),pw=u(),Vp=o("a"),iw=u(),$s=o("h3"),Dt=o("a"),df=o("span"),f(_o.$$.fragment),l$=u(),mf=o("span"),r$=a("Deployment with multiple GPUs"),uw=u(),Ot=o("p"),p$=a("To deploy this feature with multiple GPUs adjust the "),Yp=o("a"),i$=a("Trainer"),u$=a(` command line arguments as following:`),cw=u(),At=o("ol"),ks=o("li"),c$=a("replace "),_f=o("code"),h$=a("python -m torch.distributed.launch"),f$=a(" with "),vf=o("code"),d$=a("deepspeed"),m$=a("."),_$=u(),Pe=o("li"),v$=a("add a new argument "),jf=o("code"),j$=a("--deepspeed ds_config.json"),w$=a(", where "),wf=o("code"),y$=a("ds_config.json"),g$=a(` is the DeepSpeed configuration file as documented `),vo=o("a"),b$=a("here"),q$=a(". The file naming is up to you."),hw=u(),Kp=o("p"),E$=a("Therefore, if your original command line looked as follows:"),fw=u(),f(jo.$$.fragment),dw=u(),Jp=o("p"),$$=a("Now it should be:"),mw=u(),f(wo.$$.fragment),_w=u(),I=o("p"),k$=a("Unlike, "),yf=o("code"),P$=a("torch.distributed.launch"),z$=a(" where you have to specify how many GPUs to use with "),gf=o("code"),D$=a("--nproc_per_node"),O$=a(`, with the `),bf=o("code"),A$=a("deepspeed"),T$=a(" launcher you don\u2019t have to use the corresponding "),qf=o("code"),S$=a("--num_gpus"),C$=a(` if you want all of your GPUs used. The full details on how to configure various nodes and GPUs can be found `),yo=o("a"),x$=a("here"),R$=a("."),vw=u(),ae=o("p"),I$=a("In fact, you can continue using "),Ef=o("code"),U$=a("-m torch.distributed.launch"),G$=a(` with DeepSpeed as long as you don\u2019t need to use `),$f=o("code"),M$=a("deepspeed"),L$=a(` launcher-specific arguments. Typically if you don\u2019t need a multi-node setup you\u2019re not required to use the `),kf=o("code"),Z$=a("deepspeed"),N$=a(` launcher. But since in the DeepSpeed documentation it\u2019ll be used everywhere, for consistency we will use it here as well.`),jw=u(),Tt=o("p"),H$=a("Here is an example of running "),Pf=o("code"),B$=a("run_translation.py"),W$=a(" under DeepSpeed deploying all available GPUs:"),ww=u(),f(go.$$.fragment),yw=u(),St=o("p"),F$=a("Note that in the DeepSpeed documentation you are likely to see "),zf=o("code"),V$=a("--deepspeed --deepspeed_config ds_config.json"),Y$=a(` - i.e. two DeepSpeed-related arguments, but for the sake of simplicity, and since there are already so many arguments to deal with, we combined the two into a single argument.`),gw=u(),Ct=o("p"),K$=a("For some practical usage examples, please, see this "),bo=o("a"),J$=a("post"),X$=a("."),bw=u(),Xp=o("a"),qw=u(),Ps=o("h3"),xt=o("a"),Df=o("span"),f(qo.$$.fragment),Q$=u(),Of=o("span"),e5=a("Deployment with one GPU"),Ew=u(),Rt=o("p"),s5=a("To deploy DeepSpeed with one GPU adjust the "),Qp=o("a"),t5=a("Trainer"),a5=a(" command line arguments as follows:"),$w=u(),f(Eo.$$.fragment),kw=u(),Ue=o("p"),n5=a(`This is almost the same as with multiple-GPUs, but here we tell DeepSpeed explicitly to use just one GPU via `),Af=o("code"),o5=a("--num_gpus=1"),l5=a(`. By default, DeepSpeed deploys all GPUs it can see on the given node. If you have only 1 GPU to start with, then you don\u2019t need this argument. The following `),$o=o("a"),r5=a("documentation"),p5=a(" discusses the launcher options."),Pw=u(),ei=o("p"),i5=a("Why would you want to use DeepSpeed with just one GPU?"),zw=u(),It=o("ol"),Tf=o("li"),u5=a(`It has a ZeRO-offload feature which can delegate some computations and memory to the host\u2019s CPU and RAM, and thus leave more GPU resources for model\u2019s needs - e.g. larger batch size, or enabling a fitting of a very big model which normally won\u2019t fit.`),c5=u(),Sf=o("li"),h5=a(`It provides a smart GPU memory management system, that minimizes memory fragmentation, which again allows you to fit bigger models and data batches.`),Dw=u(),si=o("p"),f5=a(`While we are going to discuss the configuration in details next, the key to getting a huge improvement on a single GPU with DeepSpeed is to have at least the following configuration in the configuration file:`),Ow=u(),f(ko.$$.fragment),Aw=u(),ti=o("p"),d5=a(`which enables optimizer offload and some other important features. You may experiment with the buffer sizes, you will find more details in the discussion below.`),Tw=u(),Ut=o("p"),m5=a("For a practical usage example of this type of deployment, please, see this "),Po=o("a"),_5=a("post"),v5=a("."),Sw=u(),ai=o("p"),j5=a("You may also try the ZeRO-3 with CPU and NVMe offload as explained further in this document."),Cw=u(),ni=o("p"),w5=a("Notes:"),xw=u(),oi=o("ul"),zs=o("li"),zo=o("p"),y5=a("if you need to run on a specific GPU, which is different from GPU 0, you can\u2019t use "),Cf=o("code"),g5=a("CUDA_VISIBLE_DEVICES"),b5=a(` to limit the visible scope of available GPUs. Instead, you have to use the following syntax:`),q5=u(),f(Do.$$.fragment),E5=u(),xf=o("p"),$5=a("In this example, we tell DeepSpeed to use GPU 1 (second gpu)."),Rw=u(),li=o("a"),Iw=u(),Ds=o("h3"),Gt=o("a"),Rf=o("span"),f(Oo.$$.fragment),k5=u(),If=o("span"),P5=a("Deployment in Notebooks"),Uw=u(),Mt=o("p"),z5=a("The problem with running notebook cells as a script is that there is no normal "),Uf=o("code"),D5=a("deepspeed"),O5=a(` launcher to rely on, so under certain setups we have to emulate it.`),Gw=u(),ri=o("p"),A5=a("If you\u2019re using only 1 GPU, here is how you\u2019d have to adjust your training code in the notebook to use DeepSpeed."),Mw=u(),f(Ao.$$.fragment),Lw=u(),Lt=o("p"),T5=a("Note: "),Gf=o("code"),S5=a("..."),C5=a(" stands for the normal arguments that you\u2019d pass to the functions."),Zw=u(),pi=o("p"),x5=a(`If you want to use more than 1 GPU, you must use a multi-process environment for DeepSpeed to work. That is, you have to use the launcher for that purpose and this cannot be accomplished by emulating the distributed environment presented at the beginning of this section.`),Nw=u(),ii=o("p"),R5=a(`If you want to create the config file on the fly in the notebook in the current directory, you could have a dedicated cell with:`),Hw=u(),f(To.$$.fragment),Bw=u(),Ge=o("p"),I5=a("If the training script is in a normal file and not in the notebook cells, you can launch "),Mf=o("code"),U5=a("deepspeed"),G5=a(` normally via shell from a cell. For example, to use `),Lf=o("code"),M5=a("run_translation.py"),L5=a(" you would launch it with:"),Ww=u(),f(So.$$.fragment),Fw=u(),Zt=o("p"),Z5=a("or with "),Zf=o("code"),N5=a("%%bash"),H5=a(" magic, where you can write a multi-line code for the shell program to run:"),Vw=u(),f(Co.$$.fragment),Yw=u(),ui=o("p"),B5=a("In such case you don\u2019t need any of the code presented at the beginning of this section."),Kw=u(),Nt=o("p"),W5=a("Note: While "),Nf=o("code"),F5=a("%%bash"),V5=a(` magic is neat, but currently it buffers the output so you won\u2019t see the logs until the process completes.`),Jw=u(),ci=o("a"),Xw=u(),Os=o("h3"),Ht=o("a"),Hf=o("span"),f(xo.$$.fragment),Y5=u(),Bf=o("span"),K5=a("Configuration"),Qw=u(),Bt=o("p"),J5=a(`For the complete guide to the DeepSpeed configuration options that can be used in its configuration file please refer to the `),Ro=o("a"),X5=a("following documentation"),Q5=a("."),ey=u(),Wt=o("p"),e9=a("You can find dozens of DeepSpeed configuration examples that address various practical needs in "),Io=o("a"),s9=a(`the DeepSpeedExamples repo`),t9=a(":"),sy=u(),f(Uo.$$.fragment),ty=u(),Ft=o("p"),a9=a(`Continuing the code from above, let\u2019s say you\u2019re looking to configure the Lamb optimizer. So you can search through the example `),Wf=o("code"),n9=a(".json"),o9=a(" files with:"),ay=u(),f(Go.$$.fragment),ny=u(),Vt=o("p"),l9=a("Some more examples are to be found in the "),Mo=o("a"),r9=a("main repo"),p9=a(" as well."),oy=u(),hi=o("p"),i9=a(`When using DeepSpeed you always need to supply a DeepSpeed configuration file, yet some configuration parameters have to be configured via the command line. You will find the nuances in the rest of this guide.`),ly=u(),ne=o("p"),u9=a(`To get an idea of what DeepSpeed configuration file looks like, here is one that activates ZeRO stage 2 features, including optimizer states cpu offload, uses `),Ff=o("code"),c9=a("AdamW"),h9=a(" optimizer and "),Vf=o("code"),f9=a("WarmupLR"),d9=a(` scheduler and will enable mixed precision training if `),Yf=o("code"),m9=a("--fp16"),_9=a(" is passed:"),ry=u(),f(Lo.$$.fragment),py=u(),Yt=o("p"),v9=a("When you execute the program, DeepSpeed will log the configuration it received from the "),fi=o("a"),j9=a("Trainer"),w9=a(` to the console, so you can see exactly what was the final configuration passed to it.`),iy=u(),di=o("a"),uy=u(),As=o("h3"),Kt=o("a"),Kf=o("span"),f(Zo.$$.fragment),y9=u(),Jf=o("span"),g9=a("Passing Configuration"),cy=u(),U=o("p"),b9=a(`As discussed in this document normally the DeepSpeed configuration is passed as a path to a json file, but if you\u2019re not using the command line interface to configure the training, and instead instantiate the `),mi=o("a"),q9=a("Trainer"),E9=a(" via "),_i=o("a"),$9=a("TrainingArguments"),k9=a(" then for the "),Xf=o("code"),P9=a("deepspeed"),z9=a(` argument you can pass a nested `),Qf=o("code"),D9=a("dict"),O9=a(`. This allows you to create the configuration on the fly and doesn\u2019t require you to write it to the file system before passing it to `),vi=o("a"),A9=a("TrainingArguments"),T9=a("."),hy=u(),ji=o("p"),S9=a("To summarize you can do:"),fy=u(),f(No.$$.fragment),dy=u(),wi=o("p"),C9=a("or:"),my=u(),f(Ho.$$.fragment),_y=u(),yi=o("a"),vy=u(),Ts=o("h3"),Jt=o("a"),ed=o("span"),f(Bo.$$.fragment),x9=u(),sd=o("span"),R9=a("Shared Configuration"),jy=u(),f(Xt.$$.fragment),wy=u(),Me=o("p"),I9=a("Some configuration values are required by both the "),gi=o("a"),U9=a("Trainer"),G9=a(` and DeepSpeed to function correctly, therefore, to prevent conflicting definitions, which could lead to hard to detect errors, we chose to configure those via the `),bi=o("a"),M9=a("Trainer"),L9=a(" command line arguments."),yy=u(),Qt=o("p"),Z9=a(`Additionally, some configuration values are derived automatically based on the model\u2019s configuration, so instead of remembering to manually adjust multiple values, it\u2019s the best to let the `),qi=o("a"),N9=a("Trainer"),H9=a(` do the majority of configuration for you.`),gy=u(),Le=o("p"),B9=a("Therefore, in the rest of this guide you will find a special configuration value: "),td=o("code"),W9=a("auto"),F9=a(`, which when set will be automatically replaced with the correct or most efficient value. Please feel free to choose to ignore this recommendation and set the values explicitly, in which case be very careful that your the `),Ei=o("a"),V9=a("Trainer"),Y9=a(` arguments and DeepSpeed configurations agree. For example, are you using the same learning rate, or batch size, or gradient accumulation settings? if these mismatch the training may fail in very difficult to detect ways. You have been warned.`),by=u(),$i=o("p"),K9=a(`There are multiple other values that are specific to DeepSpeed-only and those you will have to set manually to suit your needs.`),qy=u(),ea=o("p"),J9=a(`In your own programs, you can also use the following approach if you\u2019d like to modify the DeepSpeed config as a master and configure `),ki=o("a"),X9=a("TrainingArguments"),Q9=a(" based on that. The steps are:"),Ey=u(),sa=o("ol"),ad=o("li"),e8=a("Create or load the DeepSpeed configuration to be used as a master configuration"),s8=u(),Wo=o("li"),t8=a("Create the "),Pi=o("a"),a8=a("TrainingArguments"),n8=a(" object based on these values"),$y=u(),oe=o("p"),o8=a("Do note that some values, such as "),nd=o("code"),l8=a("scheduler.params.total_num_steps"),r8=a(` are calculated by `),zi=o("a"),p8=a("Trainer"),i8=a(" during "),od=o("code"),u8=a("train"),c8=a(", but you can of course do the math yourself."),ky=u(),Di=o("a"),Py=u(),Ss=o("h3"),ta=o("a"),ld=o("span"),f(Fo.$$.fragment),h8=u(),rd=o("span"),f8=a("ZeRO"),zy=u(),Vo=o("p"),Yo=o("a"),d8=a("Zero Redundancy Optimizer (ZeRO)"),m8=a(` is the workhorse of DeepSpeed. It supports 3 different levels (stages) of optimization. The first one is not quite interesting for scalability purposes, therefore this document focuses on stages 2 and 3. Stage 3 is further improved by the latest addition of ZeRO-Infinity. You will find more indepth information in the DeepSpeed documentation.`),Dy=u(),Ze=o("p"),_8=a("The "),pd=o("code"),v8=a("zero_optimization"),j8=a(" section of the configuration file is the most important part ("),Ko=o("a"),w8=a("docs"),y8=a(`), since that is where you define which ZeRO stages you want to enable and how to configure them. You will find the explanation for each parameter in the DeepSpeed docs.`),Oy=u(),aa=o("p"),g8=a("This section has to be configured exclusively via DeepSpeed configuration - the "),Oi=o("a"),b8=a("Trainer"),q8=a(` provides no equivalent command line arguments.`),Ay=u(),Ai=o("p"),E8=a(`Note: currently DeepSpeed doesn\u2019t validate parameter names, so if you misspell any, it\u2019ll use the default setting for the parameter that got misspelled. You can watch the DeepSpeed engine start up log messages to see what values it is going to use.`),Ty=u(),Ti=o("a"),Sy=u(),Cs=o("h4"),na=o("a"),id=o("span"),f(Jo.$$.fragment),$8=u(),ud=o("span"),k8=a("ZeRO-2 Config"),Cy=u(),Si=o("p"),P8=a("The following is an example of configuration for ZeRO stage 2:"),xy=u(),f(Xo.$$.fragment),Ry=u(),Ci=o("p"),cd=o("strong"),z8=a("Performance tuning:"),Iy=u(),Ne=o("ul"),xs=o("li"),D8=a("enabling "),hd=o("code"),O8=a("offload_optimizer"),A8=a(" should reduce GPU RAM usage (it requires "),fd=o("code"),T8=a('"stage": 2'),S8=a(")"),C8=u(),G=o("li"),dd=o("code"),x8=a('"overlap_comm": true'),R8=a(" trades off increased GPU RAM usage to lower all-reduce latency. "),md=o("code"),I8=a("overlap_comm"),U8=a(` uses 4.5x the `),_d=o("code"),G8=a("allgather_bucket_size"),M8=a(" and "),vd=o("code"),L8=a("reduce_bucket_size"),Z8=a(` values. So if they are set to 5e8, this requires a 9GB footprint (`),jd=o("code"),N8=a("5e8 x 2Bytes x 2 x 4.5"),H8=a(`). Therefore, if you have a GPU with 8GB or less RAM, to avoid getting OOM-errors you will need to reduce those parameters to about `),wd=o("code"),B8=a("2e8"),W8=a(`, which would require 3.6GB. You will want to do the same on larger capacity GPU as well, if you\u2019re starting to hit OOM.`),F8=u(),yd=o("li"),V8=a(`when reducing these buffers you\u2019re trading communication speed to avail more GPU RAM. The smaller the buffer size is, the slower the communication gets, and the more GPU RAM will be available to other tasks. So if a bigger batch size is important, getting a slightly slower training time could be a good trade.`),Uy=u(),He=o("p"),Y8=a("Additionally, "),gd=o("code"),K8=a("deepspeed==0.4.4"),J8=a(" added a new option "),bd=o("code"),X8=a("round_robin_gradients"),Q8=a(" which you can enable with:"),Gy=u(),f(Qo.$$.fragment),My=u(),xi=o("p"),ek=a("This is a stage 2 optimization for CPU offloading that parallelizes gradient copying to CPU memory among ranks by fine-grained gradient partitioning. Performance benefit grows with gradient accumulation steps (more copying between optimizer steps) or GPU count (increased parallelism)."),Ly=u(),Ri=o("a"),Zy=u(),Rs=o("h4"),oa=o("a"),qd=o("span"),f(el.$$.fragment),sk=u(),Ed=o("span"),tk=a("ZeRO-3 Config"),Ny=u(),Ii=o("p"),ak=a("The following is an example of configuration for ZeRO stage 3:"),Hy=u(),f(sl.$$.fragment),By=u(),V=o("p"),nk=a(`If you are getting OOMs, because your model or activations don\u2019t fit into the GPU memory and you have unutilized CPU memory offloading the optimizer states and parameters to CPU memory with `),$d=o("code"),ok=a('"device": "cpu"'),lk=a(` may solve this limitation. If you don\u2019t want to offload to CPU memory, use `),kd=o("code"),rk=a("none"),pk=a(" instead of "),Pd=o("code"),ik=a("cpu"),uk=a(" for the "),zd=o("code"),ck=a("device"),hk=a(` entry. Offloading to NVMe is discussed further down.`),Wy=u(),Be=o("p"),fk=a("Pinned memory is enabled with "),Dd=o("code"),dk=a("pin_memory"),mk=a(" set to "),Od=o("code"),_k=a("true"),vk=a(`. This feature can improve the throughput at the cost of making less memory available to other processes. Pinned memory is set aside to the specific process that requested it and its typically accessed much faster than normal CPU memory.`),Fy=u(),Ui=o("p"),Ad=o("strong"),jk=a("Performance tuning:"),Vy=u(),la=o("ul"),tl=o("li"),Td=o("code"),wk=a("stage3_max_live_parameters"),yk=a(": "),Sd=o("code"),gk=a("1e9"),bk=u(),al=o("li"),Cd=o("code"),qk=a("stage3_max_reuse_distance"),Ek=a(": "),xd=o("code"),$k=a("1e9"),Yy=u(),M=o("p"),kk=a("If hitting OOM reduce "),Rd=o("code"),Pk=a("stage3_max_live_parameters"),zk=a(" and "),Id=o("code"),Dk=a("stage3_max_reuse_distance"),Ok=a(`. They should have minimal impact on performance unless you are doing activation checkpointing. `),Ud=o("code"),Ak=a("1e9"),Tk=a(` would consume ~2GB. The memory is shared by `),Gd=o("code"),Sk=a("stage3_max_live_parameters"),Ck=a(" and "),Md=o("code"),xk=a("stage3_max_reuse_distance"),Rk=a(", so it\u2019s not additive, it\u2019s just 2GB total."),Ky=u(),ze=o("p"),Ld=o("code"),Ik=a("stage3_max_live_parameters"),Uk=a(` is the upper limit on how many full parameters you want to keep on the GPU at any given time. \u201Creuse distance\u201D is a metric we are using to figure out when will a parameter be used again in the future, and we use the `),Zd=o("code"),Gk=a("stage3_max_reuse_distance"),Mk=a(` to decide whether to throw away the parameter or to keep it. If a parameter is going to be used again in near future (less than `),Nd=o("code"),Lk=a("stage3_max_reuse_distance"),Zk=a(`) then we keep it to reduce communication overhead. This is super helpful when you have activation checkpointing enabled, where we do a forward recompute and backward passes a a single layer granularity and want to keep the parameter in the forward recompute till the backward`),Jy=u(),Gi=o("p"),Nk=a("The following configuration values depend on the model\u2019s hidden size:"),Xy=u(),We=o("ul"),nl=o("li"),Hd=o("code"),Hk=a("reduce_bucket_size"),Bk=a(": "),Bd=o("code"),Wk=a("hidden_size*hidden_size"),Fk=u(),ol=o("li"),Wd=o("code"),Vk=a("stage3_prefetch_bucket_size"),Yk=a(": "),Fd=o("code"),Kk=a("0.9 * hidden_size * hidden_size"),Jk=u(),ll=o("li"),Vd=o("code"),Xk=a("stage3_param_persistence_threshold"),Qk=a(": "),Yd=o("code"),e7=a("10 * hidden_size"),Qy=u(),Fe=o("p"),s7=a("therefore set these values to "),Kd=o("code"),t7=a("auto"),a7=a(" and the "),Mi=o("a"),n7=a("Trainer"),o7=a(` will automatically assign the recommended values. But, of course, feel free to set these explicitly as well.`),eg=u(),rl=o("p"),Jd=o("code"),l7=a("stage3_gather_16bit_weights_on_model_save"),r7=a(` enables model fp16 weights consolidation when model gets saved. With large models and multiple GPUs this is an expensive operation both in terms of memory and speed. It\u2019s currently required if you plan to resume the training. Watch out for future updates that will remove this limitation and make things more flexible.`),sg=u(),le=o("p"),p7=a("If you\u2019re migrating from ZeRO-2 configuration note that "),Xd=o("code"),i7=a("allgather_partitions"),u7=a(", "),Qd=o("code"),c7=a("allgather_bucket_size"),h7=a(` and `),em=o("code"),f7=a("reduce_scatter"),d7=a(` configuration parameters are not used in ZeRO-3. If you keep these in the config file they will just be ignored.`),tg=u(),Li=o("ul"),pl=o("li"),sm=o("code"),m7=a("sub_group_size"),_7=a(": "),tm=o("code"),v7=a("1e9"),ag=u(),De=o("p"),am=o("code"),j7=a("sub_group_size"),w7=a(` controls the granularity in which parameters are updated during optimizer steps. Parameters are grouped into buckets of `),nm=o("code"),y7=a("sub_group_size"),g7=a(` and each buckets is updated one at a time. When used with NVMe offload in ZeRO-Infinity, `),om=o("code"),b7=a("sub_group_size"),q7=a(` therefore controls the granularity in which model states are moved in and out of CPU memory from NVMe during the optimizer step. This prevents running out of CPU memory for extremely large models.`),ng=u(),Ve=o("p"),E7=a("You can leave "),lm=o("code"),$7=a("sub_group_size"),k7=a(" to its default value of "),rm=o("em"),P7=a("1e9"),z7=a(` when not using NVMe offload. You may want to change its default value in the following cases:`),og=u(),ra=o("ol"),il=o("li"),D7=a("Running into OOM during optimizer step: Reduce "),pm=o("code"),O7=a("sub_group_size"),A7=a(" to reduce memory utilization of temporary buffers"),T7=u(),ul=o("li"),S7=a("Optimizer Step is taking a long time: Increase "),im=o("code"),C7=a("sub_group_size"),x7=a(` to improve bandwidth utilization as a result of the increased data buffers.`),lg=u(),Zi=o("a"),rg=u(),Is=o("h3"),pa=o("a"),um=o("span"),f(cl.$$.fragment),R7=u(),cm=o("span"),I7=a("NVMe Support"),pg=u(),Ni=o("p"),U7=a(`ZeRO-Infinity allows for training incredibly large models by extending GPU and CPU memory with NVMe memory. Thanks to smart partitioning and tiling algorithms each GPU needs to send and receive very small amounts of data during offloading so modern NVMe proved to be fit to allow for an even larger total memory pool available to your training process. ZeRO-Infinity requires ZeRO-3 enabled.`),ig=u(),Hi=o("p"),G7=a("The following configuration example enables NVMe to offload both optimizer states and the params:"),ug=u(),f(hl.$$.fragment),cg=u(),ia=o("p"),M7=a(`You can choose to offload both optimizer states and params to NVMe, or just one of them or none. For example, if you have copious amounts of CPU memory available, by all means offload to CPU memory only as it\u2019d be faster (hint: `),hm=o("em"),L7=a("\u201Cdevice\u201D: \u201Ccpu\u201D"),Z7=a(")."),hg=u(),Ye=o("p"),N7=a("Here is the full documentation for offloading "),fl=o("a"),H7=a("optimizer states"),B7=a(" and "),dl=o("a"),W7=a("parameters"),F7=a("."),fg=u(),ua=o("p"),V7=a("Make sure that your "),fm=o("code"),Y7=a("nvme_path"),K7=a(` is actually an NVMe, since it will work with the normal hard drive or SSD, but it\u2019ll be much much slower. The fast scalable training was designed with modern NVMe transfer speeds in mind (as of this writing one can have ~3.5GB/s read, ~3GB/s write peak speeds).`),dg=u(),Ke=o("p"),J7=a("In order to figure out the optimal "),dm=o("code"),X7=a("aio"),Q7=a(` configuration block you must run a benchmark on your target setup, as `),ml=o("a"),eP=a("explained here"),sP=a("."),mg=u(),Bi=o("a"),_g=u(),Us=o("h4"),ca=o("a"),mm=o("span"),f(_l.$$.fragment),tP=u(),_m=o("span"),aP=a("ZeRO-2 vs ZeRO-3 Performance"),vg=u(),Wi=o("p"),nP=a(`ZeRO-3 is likely to be slower than ZeRO-2 if everything else is configured the same because the former has to gather model weights in addition to what ZeRO-2 does. If ZeRO-2 meets your needs and you don\u2019t need to scale beyond a few GPUs then you may choose to stick to it. It\u2019s important to understand that ZeRO-3 enables a much higher scalability capacity at a cost of speed.`),jg=u(),Fi=o("p"),oP=a("It\u2019s possible to adjust ZeRO-3 configuration to make it perform closer to ZeRO-2:"),wg=u(),ha=o("ul"),Gs=o("li"),lP=a("set "),vm=o("code"),rP=a("stage3_param_persistence_threshold"),pP=a(" to a very large number - larger than the largest parameter, e.g., "),jm=o("code"),iP=a("6 * hidden_size * hidden_size"),uP=a(". This will keep the parameters on the GPUs."),cP=u(),vl=o("li"),hP=a("turn off "),wm=o("code"),fP=a("offload_params"),dP=a(" since ZeRO-2 doesn\u2019t have that option."),yg=u(),Je=o("p"),mP=a("The performance will likely improve significantly with just "),ym=o("code"),_P=a("offload_params"),vP=a(` turned off, even if you don\u2019t change `),gm=o("code"),jP=a("stage3_param_persistence_threshold"),wP=a(`. Of course, these changes will impact the size of the model you can train. So these help you to trade scalability for speed depending on your needs.`),gg=u(),Vi=o("a"),bg=u(),Ms=o("h4"),fa=o("a"),bm=o("span"),f(jl.$$.fragment),yP=u(),qm=o("span"),gP=a("ZeRO-2 Example"),qg=u(),da=o("p"),bP=a("Here is a full ZeRO-2 auto-configuration file "),Em=o("code"),qP=a("ds_config_zero2.json"),EP=a(":"),Eg=u(),f(wl.$$.fragment),$g=u(),ma=o("p"),$P=a(`Here is a full ZeRO-2 all-enabled manually set configuration file. It is here mainly for you to see what the typical values look like, but we highly recommend using the one with multiple `),$m=o("code"),kP=a("auto"),PP=a(" settings in it."),kg=u(),f(yl.$$.fragment),Pg=u(),Yi=o("a"),zg=u(),Ls=o("h4"),_a=o("a"),km=o("span"),f(gl.$$.fragment),zP=u(),Pm=o("span"),DP=a("ZeRO-3 Example"),Dg=u(),va=o("p"),OP=a("Here is a full ZeRO-3 auto-configuration file "),zm=o("code"),AP=a("ds_config_zero3.json"),TP=a(":"),Og=u(),f(bl.$$.fragment),Ag=u(),ja=o("p"),SP=a(`Here is a full ZeRO-3 all-enabled manually set configuration file. It is here mainly for you to see what the typical values look like, but we highly recommend using the one with multiple `),Dm=o("code"),CP=a("auto"),xP=a(" settings in it."),Tg=u(),f(ql.$$.fragment),Sg=u(),Zs=o("h3"),wa=o("a"),Om=o("span"),f(El.$$.fragment),RP=u(),Am=o("span"),IP=a("Optimizer and Scheduler"),Cg=u(),ya=o("p"),UP=a("As long as you don\u2019t enable "),Tm=o("code"),GP=a("offload_optimizer"),MP=a(` you can mix and match DeepSpeed and HuggingFace schedulers and optimizers, with the exception of using the combination of HuggingFace scheduler and DeepSpeed optimizer:`),xg=u(),Ki=o("p"),LP=a(`| Combos | HF Scheduler | DS Scheduler | | HF Optimizer | Yes | Yes | | DS Optimizer | No | Yes |`),Rg=u(),ga=o("p"),ZP=a("It is possible to use a non-DeepSpeed optimizer when "),Sm=o("code"),NP=a("offload_optimizer"),HP=a(` is enabled, as long as it has both CPU and GPU implementation (except LAMB).`),Ig=u(),Ji=o("a"),Ug=u(),Ns=o("h4"),ba=o("a"),Cm=o("span"),f($l.$$.fragment),BP=u(),xm=o("span"),WP=a("Optimizer"),Gg=u(),Xe=o("p"),FP=a(`DeepSpeed\u2019s main optimizers are Adam, AdamW, OneBitAdam, and Lamb. These have been thoroughly tested with ZeRO and are thus recommended to be used. It, however, can import other optimizers from `),Rm=o("code"),VP=a("torch"),YP=a(". The full documentation is "),kl=o("a"),KP=a("here"),JP=a("."),Mg=u(),$=o("p"),XP=a("If you don\u2019t configure the "),Im=o("code"),QP=a("optimizer"),ez=a(" entry in the configuration file, the "),Xi=o("a"),sz=a("Trainer"),tz=a(` will automatically set it to `),Um=o("code"),az=a("AdamW"),nz=a(` and will use the supplied values or the defaults for the following command line arguments: `),Gm=o("code"),oz=a("--learning_rate"),lz=a(", "),Mm=o("code"),rz=a("--adam_beta1"),pz=a(", "),Lm=o("code"),iz=a("--adam_beta2"),uz=a(", "),Zm=o("code"),cz=a("--adam_epsilon"),hz=a(" and "),Nm=o("code"),fz=a("--weight_decay"),dz=a("."),Lg=u(),Qe=o("p"),mz=a("Here is an example of the auto-configured "),Hm=o("code"),_z=a("optimizer"),vz=a(" entry for "),Bm=o("code"),jz=a("AdamW"),wz=a(":"),Zg=u(),f(Pl.$$.fragment),Ng=u(),Qi=o("p"),yz=a(`Note that the command line arguments will set the values in the configuration file. This is so that there is one definitive source of the values and to avoid hard to find errors when for example, the learning rate is set to different values in different places. Command line rules. The values that get overridden are:`),Hg=u(),re=o("ul"),zl=o("li"),Wm=o("code"),gz=a("lr"),bz=a(" with the value of "),Fm=o("code"),qz=a("--learning_rate"),Ez=u(),Dl=o("li"),Vm=o("code"),$z=a("betas"),kz=a(" with the value of "),Ym=o("code"),Pz=a("--adam_beta1 --adam_beta2"),zz=u(),Ol=o("li"),Km=o("code"),Dz=a("eps"),Oz=a(" with the value of "),Jm=o("code"),Az=a("--adam_epsilon"),Tz=u(),Al=o("li"),Xm=o("code"),Sz=a("weight_decay"),Cz=a(" with the value of "),Qm=o("code"),xz=a("--weight_decay"),Bg=u(),eu=o("p"),Rz=a("Therefore please remember to tune the shared hyperparameters on the command line."),Wg=u(),su=o("p"),Iz=a("You can also set the values explicitly:"),Fg=u(),f(Tl.$$.fragment),Vg=u(),qa=o("p"),Uz=a("But then you\u2019re on your own synchronizing the "),tu=o("a"),Gz=a("Trainer"),Mz=a(` command line arguments and the DeepSpeed configuration.`),Yg=u(),au=o("p"),Lz=a("If you want to use another optimizer which is not listed above, you will have to add to the top level configuration."),Kg=u(),f(Sl.$$.fragment),Jg=u(),pe=o("p"),Zz=a("Similarly to "),e_=o("code"),Nz=a("AdamW"),Hz=a(`, you can configure other officially supported optimizers. Just remember that may have different config values. e.g. for Adam you will want `),s_=o("code"),Bz=a("weight_decay"),Wz=a(" around "),t_=o("code"),Fz=a("0.01"),Vz=a("."),Xg=u(),nu=o("a"),Qg=u(),Hs=o("h4"),Ea=o("a"),a_=o("span"),f(Cl.$$.fragment),Yz=u(),n_=o("span"),Kz=a("Scheduler"),e2=u(),L=o("p"),Jz=a("DeepSpeed supports "),o_=o("code"),Xz=a("LRRangeTest"),Qz=a(", "),l_=o("code"),eD=a("OneCycle"),sD=a(", "),r_=o("code"),tD=a("WarmupLR"),aD=a(" and "),p_=o("code"),nD=a("WarmupDecayLR"),oD=a(` learning rate schedulers. The full documentation is `),xl=o("a"),lD=a("here"),rD=a("."),s2=u(),ou=o("p"),pD=a("Here is where the schedulers overlap between \u{1F917} Transformers and DeepSpeed:"),t2=u(),$a=o("ul"),Rl=o("li"),i_=o("code"),iD=a("WarmupLR"),uD=a(" via "),u_=o("code"),cD=a("--lr_scheduler_type constant_with_warmup"),hD=u(),es=o("li"),c_=o("code"),fD=a("WarmupDecayLR"),dD=a(" via "),h_=o("code"),mD=a("--lr_scheduler_type linear"),_D=a(". This is also the default value for "),f_=o("code"),vD=a("--lr_scheduler_type"),jD=a(`, therefore, if you don\u2019t configure the scheduler this is scheduler that will get configured by default.`),a2=u(),D=o("p"),wD=a("If you don\u2019t configure the "),d_=o("code"),yD=a("scheduler"),gD=a(" entry in the configuration file, the "),lu=o("a"),bD=a("Trainer"),qD=a(` will use the values of `),m_=o("code"),ED=a("--lr_scheduler_type"),$D=a(", "),__=o("code"),kD=a("--learning_rate"),PD=a(" and "),v_=o("code"),zD=a("--warmup_steps"),DD=a(" or "),j_=o("code"),OD=a("--warmup_ratio"),AD=a(` to configure a \u{1F917} Transformers version of it.`),n2=u(),ss=o("p"),TD=a("Here is an example of the auto-configured "),w_=o("code"),SD=a("scheduler"),CD=a(" entry for "),y_=o("code"),xD=a("WarmupLR"),RD=a(":"),o2=u(),f(Il.$$.fragment),l2=u(),ts=o("p"),ID=a("Since "),g_=o("em"),UD=a("\u201Cauto\u201D"),GD=a(" is used the "),ru=o("a"),MD=a("Trainer"),LD=a(` arguments will set the correct values in the configuration file. This is so that there is one definitive source of the values and to avoid hard to find errors when, for example, the learning rate is set to different values in different places. Command line rules. The values that get set are:`),r2=u(),ie=o("ul"),ka=o("li"),b_=o("code"),ZD=a("warmup_min_lr"),ND=a(" with the value of "),q_=o("code"),HD=a("0"),BD=a("."),WD=u(),Pa=o("li"),E_=o("code"),FD=a("warmup_max_lr"),VD=a(" with the value of "),$_=o("code"),YD=a("--learning_rate"),KD=a("."),JD=u(),as=o("li"),k_=o("code"),XD=a("warmup_num_steps"),QD=a(" with the value of "),P_=o("code"),eO=a("--warmup_steps"),sO=a(" if provided. Otherwise will use "),z_=o("code"),tO=a("--warmup_ratio"),aO=a(` multiplied by the number of training steps and rounded up.`),nO=u(),ns=o("li"),D_=o("code"),oO=a("total_num_steps"),lO=a(" with either the value of "),O_=o("code"),rO=a("--max_steps"),pO=a(` or if it is not provided, derived automatically at run time based on the environment and the size of the dataset and other command line arguments (needed for `),A_=o("code"),iO=a("WarmupDecayLR"),uO=a(")."),p2=u(),pu=o("p"),cO=a("You can, of course, take over any or all of the configuration values and set those yourself:"),i2=u(),f(Ul.$$.fragment),u2=u(),za=o("p"),hO=a("But then you\u2019re on your own synchronizing the "),iu=o("a"),fO=a("Trainer"),dO=a(` command line arguments and the DeepSpeed configuration.`),c2=u(),Da=o("p"),mO=a("For example, for "),T_=o("code"),_O=a("WarmupDecayLR"),vO=a(", you can use the following entry:"),h2=u(),f(Gl.$$.fragment),f2=u(),Y=o("p"),jO=a("and "),S_=o("code"),wO=a("total_num_steps"),yO=a(", "),C_=o("code"),gO=a("warmup_max_lr"),bO=a(", "),x_=o("code"),qO=a("warmup_num_steps"),EO=a(" and "),R_=o("code"),$O=a("total_num_steps"),kO=a(" will be set at loading time."),d2=u(),uu=o("a"),m2=u(),Bs=o("h3"),Oa=o("a"),I_=o("span"),f(Ml.$$.fragment),PO=u(),U_=o("span"),zO=a("fp32 Precision"),_2=u(),cu=o("p"),DO=a("Deepspeed supports the full fp32 and the fp16 mixed precision."),v2=u(),Aa=o("p"),OO=a(`Because of the much reduced memory needs and faster speed one gets with the fp16 mixed precision, the only time you will want to not use it is when the model you\u2019re using doesn\u2019t behave well under this training mode. Typically this happens when the model wasn\u2019t pretrained in the fp16 mixed precision (e.g. often this happens with bf16-pretrained models). Such models may overflow or underflow leading to `),G_=o("code"),AO=a("NaN"),TO=a(` loss. If this is your case then you will want to use the full fp32 mode, by explicitly disabling the otherwise default fp16 mixed precision mode with:`),j2=u(),f(Ll.$$.fragment),w2=u(),Ta=o("p"),SO=a(`If you\u2019re using the Ampere-architecture based GPU, pytorch version 1.7 and higher will automatically switch to using the much more efficient tf32 format for some operations, but the results will still be in fp32. For details and benchmarks, please, see `),Zl=o("a"),CO=a("TensorFloat-32(TF32) on Ampere devices"),xO=a(`. The document includes instructions on how to disable this automatic conversion if for some reason you prefer not to use it.`),y2=u(),ue=o("p"),RO=a("With the \u{1F917} Trainer you can use "),M_=o("code"),IO=a("--tf32"),UO=a(" to enable it, or disable it with "),L_=o("code"),GO=a("--tf32 0"),MO=a(" or "),Z_=o("code"),LO=a("--no_tf32"),ZO=a(". By default the PyTorch default is used."),g2=u(),hu=o("a"),b2=u(),Ws=o("h3"),Sa=o("a"),N_=o("span"),f(Nl.$$.fragment),NO=u(),H_=o("span"),HO=a("Automatic Mixed Precision"),q2=u(),fu=o("p"),BO=a("You can use automatic mixed precision with either a pytorch-like AMP way or the apex-like way:"),E2=u(),Fs=o("h3"),Ca=o("a"),B_=o("span"),f(Hl.$$.fragment),WO=u(),W_=o("span"),FO=a("fp16"),$2=u(),du=o("p"),VO=a("To configure pytorch AMP-like mode with fp16 (float16) set:"),k2=u(),f(Bl.$$.fragment),P2=u(),os=o("p"),YO=a("and the "),mu=o("a"),KO=a("Trainer"),JO=a(` will automatically enable or disable it based on the value of `),F_=o("code"),XO=a("args.fp16_backend"),QO=a(". The rest of config values are up to you."),z2=u(),ls=o("p"),eA=a("This mode gets enabled when "),V_=o("code"),sA=a("--fp16 --fp16_backend amp"),tA=a(" or "),Y_=o("code"),aA=a("--fp16_full_eval"),nA=a(" command line args are passed."),D2=u(),_u=o("p"),oA=a("You can also enable/disable this mode explicitly:"),O2=u(),f(Wl.$$.fragment),A2=u(),xa=o("p"),lA=a("But then you\u2019re on your own synchronizing the "),vu=o("a"),rA=a("Trainer"),pA=a(` command line arguments and the DeepSpeed configuration.`),T2=u(),Ra=o("p"),iA=a("Here is the "),Fl=o("a"),uA=a("documentation"),cA=a("."),S2=u(),Vs=o("h3"),Ia=o("a"),K_=o("span"),f(Vl.$$.fragment),hA=u(),J_=o("span"),fA=a("bf16"),C2=u(),ju=o("p"),dA=a("If bf16 (bfloat16) is desired instead of fp16 then the following configuration section is to be used:"),x2=u(),f(Yl.$$.fragment),R2=u(),wu=o("p"),mA=a("bf16 has the same dynamic range as fp32 and thus doesn\u2019t require loss scaling."),I2=u(),rs=o("p"),_A=a("This mode gets enabled when "),X_=o("code"),vA=a("--bf16"),jA=a(" or "),Q_=o("code"),wA=a("--bf16_full_eval"),yA=a(" command line args are passed."),U2=u(),yu=o("p"),gA=a("You can also enable/disable this mode explicitly:"),G2=u(),f(Kl.$$.fragment),M2=u(),f(Ua.$$.fragment),L2=u(),Ys=o("h3"),Ga=o("a"),e1=o("span"),f(Jl.$$.fragment),bA=u(),s1=o("span"),qA=a("apex"),Z2=u(),gu=o("p"),EA=a("To configure apex AMP-like mode set:"),N2=u(),f(Xl.$$.fragment),H2=u(),ce=o("p"),$A=a("and the "),bu=o("a"),kA=a("Trainer"),PA=a(" will automatically configure it based on the values of "),t1=o("code"),zA=a("args.fp16_backend"),DA=a(` and `),a1=o("code"),OA=a("args.fp16_opt_level"),AA=a("."),B2=u(),Ma=o("p"),TA=a("This mode gets enabled when "),n1=o("code"),SA=a("--fp16 --fp16_backend apex --fp16_opt_level 01"),CA=a(" command line args are passed."),W2=u(),qu=o("p"),xA=a("You can also configure this mode explicitly:"),F2=u(),f(Ql.$$.fragment),V2=u(),La=o("p"),RA=a("But then you\u2019re on your own synchronizing the "),Eu=o("a"),IA=a("Trainer"),UA=a(` command line arguments and the DeepSpeed configuration.`),Y2=u(),Za=o("p"),GA=a("Here is the "),er=o("a"),MA=a("documentation"),LA=a("."),K2=u(),$u=o("a"),J2=u(),Ks=o("h3"),Na=o("a"),o1=o("span"),f(sr.$$.fragment),ZA=u(),l1=o("span"),NA=a("Batch Size"),X2=u(),ku=o("p"),HA=a("To configure batch size, use:"),Q2=u(),f(tr.$$.fragment),eb=u(),Z=o("p"),BA=a("and the "),Pu=o("a"),WA=a("Trainer"),FA=a(" will automatically set "),r1=o("code"),VA=a("train_micro_batch_size_per_gpu"),YA=a(` to the value of `),p1=o("code"),KA=a("args.per_device_train_batch_size"),JA=a(" and "),i1=o("code"),XA=a("train_batch_size"),QA=a(" to "),u1=o("code"),eT=a("args.world_size * args.per_device_train_batch_size * args.gradient_accumulation_steps"),sT=a("."),sb=u(),zu=o("p"),tT=a("You can also set the values explicitly:"),tb=u(),f(ar.$$.fragment),ab=u(),Ha=o("p"),aT=a("But then you\u2019re on your own synchronizing the "),Du=o("a"),nT=a("Trainer"),oT=a(` command line arguments and the DeepSpeed configuration.`),nb=u(),Ou=o("a"),ob=u(),Js=o("h3"),Ba=o("a"),c1=o("span"),f(nr.$$.fragment),lT=u(),h1=o("span"),rT=a("Gradient Accumulation"),lb=u(),Au=o("p"),pT=a("To configure gradient accumulation set:"),rb=u(),f(or.$$.fragment),pb=u(),ps=o("p"),iT=a("and the "),Tu=o("a"),uT=a("Trainer"),cT=a(" will automatically set it to the value of "),f1=o("code"),hT=a("args.gradient_accumulation_steps"),fT=a("."),ib=u(),Su=o("p"),dT=a("You can also set the value explicitly:"),ub=u(),f(lr.$$.fragment),cb=u(),Wa=o("p"),mT=a("But then you\u2019re on your own synchronizing the "),Cu=o("a"),_T=a("Trainer"),vT=a(` command line arguments and the DeepSpeed configuration.`),hb=u(),xu=o("a"),fb=u(),Xs=o("h3"),Fa=o("a"),d1=o("span"),f(rr.$$.fragment),jT=u(),m1=o("span"),wT=a("Gradient Clipping"),db=u(),Ru=o("p"),yT=a("To configure gradient gradient clipping set:"),mb=u(),f(pr.$$.fragment),_b=u(),is=o("p"),gT=a("and the "),Iu=o("a"),bT=a("Trainer"),qT=a(" will automatically set it to the value of "),_1=o("code"),ET=a("args.max_grad_norm"),$T=a("."),vb=u(),Uu=o("p"),kT=a("You can also set the value explicitly:"),jb=u(),f(ir.$$.fragment),wb=u(),Va=o("p"),PT=a("But then you\u2019re on your own synchronizing the "),Gu=o("a"),zT=a("Trainer"),DT=a(` command line arguments and the DeepSpeed configuration.`),yb=u(),Mu=o("a"),gb=u(),Qs=o("h3"),Ya=o("a"),v1=o("span"),f(ur.$$.fragment),OT=u(),j1=o("span"),AT=a("Getting The Model Weights Out"),bb=u(),Ka=o("p"),TT=a(`As long as you continue training and resuming using DeepSpeed you don\u2019t need to worry about anything. DeepSpeed stores fp32 master weights in its custom checkpoint optimizer files, which are `),w1=o("code"),ST=a("global_step*/*optim_states.pt"),CT=a(` (this is glob pattern), and are saved under the normal checkpoint.`),qb=u(),Lu=o("p"),y1=o("strong"),xT=a("FP16 Weights:"),Eb=u(),Ja=o("p"),RT=a("When a model is saved under ZeRO-2, you end up having the normal "),g1=o("code"),IT=a("pytorch_model.bin"),UT=a(` file with the model weights, but they are only the fp16 version of the weights.`),$b=u(),O=o("p"),GT=a(`Under ZeRO-3, things are much more complicated, since the model weights are partitioned out over multiple GPUs, therefore `),b1=o("code"),MT=a('"stage3_gather_16bit_weights_on_model_save": true'),LT=a(" is required to get the "),q1=o("code"),ZT=a("Trainer"),NT=a(` to save the fp16 version of the weights. If this setting is `),E1=o("code"),HT=a("False"),BT=u(),$1=o("code"),WT=a("pytorch_model.bin"),FT=a(" won\u2019t be created. This is because by default DeepSpeed\u2019s "),k1=o("code"),VT=a("state_dict"),YT=a(" contains a placeholder and not the real weights. If we were to save this "),P1=o("code"),KT=a("state_dict"),JT=a(" it won\u2019t be possible to load it back."),kb=u(),f(cr.$$.fragment),Pb=u(),Zu=o("p"),z1=o("strong"),XT=a("FP32 Weights:"),zb=u(),Xa=o("p"),QT=a(`While the fp16 weights are fine for resuming training, if you finished finetuning your model and want to upload it to the `),hr=o("a"),eS=a("models hub"),sS=a(` or pass it to someone else you most likely will want to get the fp32 weights. This ideally shouldn\u2019t be done during training since this is a process that requires a lot of memory, and therefore best to be performed offline after the training is complete. But if desired and you have plenty of free CPU memory it can be done in the same training script. The following sections will discuss both approaches.`),Db=u(),Nu=o("p"),D1=o("strong"),tS=a("Live FP32 Weights Recovery:"),Ob=u(),Hu=o("p"),aS=a("This approach may not work if you model is large and you have little free CPU memory left, at the end of the training."),Ab=u(),Bu=o("p"),nS=a("If you have saved at least one checkpoint, and you want to use the latest one, you can do the following:"),Tb=u(),f(fr.$$.fragment),Sb=u(),us=o("p"),oS=a("If you\u2019re using the "),O1=o("code"),lS=a("--load_best_model_at_end"),rS=a(" class:"),A1=o("em"),pS=a("~transformers.TrainingArguments"),iS=a(` argument (to track the best checkpoint), then you can finish the training by first saving the final model explicitly and then do the same as above:`),Cb=u(),f(dr.$$.fragment),xb=u(),f(Qa.$$.fragment),Rb=u(),en=o("p"),uS=a("Of course, you don\u2019t have to use class:"),T1=o("em"),cS=a("~transformers.Trainer"),hS=a(` and you can adjust the examples above to your own trainer.`),Ib=u(),sn=o("p"),fS=a("If for some reason you want more refinement, you can also extract the fp32 "),S1=o("code"),dS=a("state_dict"),mS=a(` of the weights and apply these yourself as is shown in the following example:`),Ub=u(),f(mr.$$.fragment),Gb=u(),Wu=o("p"),C1=o("strong"),_S=a("Offline FP32 Weights Recovery:"),Mb=u(),cs=o("p"),vS=a("DeepSpeed creates a special conversion script "),x1=o("code"),jS=a("zero_to_fp32.py"),wS=a(` which it places in the top-level of the checkpoint folder. Using this script you can extract the weights at any point. The script is standalone and you no longer need to have the configuration file or a `),R1=o("code"),yS=a("Trainer"),gS=a(" to do the extraction."),Lb=u(),Fu=o("p"),bS=a("Let\u2019s say your checkpoint folder looks like this:"),Zb=u(),f(_r.$$.fragment),Nb=u(),tn=o("p"),qS=a("In this example there is just one DeepSpeed checkpoint sub-folder "),I1=o("em"),ES=a("global_step1"),$S=a(`. Therefore to reconstruct the fp32 weights just run:`),Hb=u(),f(vr.$$.fragment),Bb=u(),an=o("p"),kS=a("This is it. "),U1=o("code"),PS=a("pytorch_model.bin"),zS=a(" will now contain the full fp32 model weights consolidated from multiple GPUs."),Wb=u(),Vu=o("p"),DS=a("The script will automatically be able to handle either a ZeRO-2 or ZeRO-3 checkpoint."),Fb=u(),jr=o("p"),G1=o("code"),OS=a("python zero_to_fp32.py -h"),AS=a(" will give you usage details."),Vb=u(),hs=o("p"),TS=a("The script will auto-discover the deepspeed sub-folder using the contents of the file "),M1=o("code"),SS=a("latest"),CS=a(`, which in the current example will contain `),L1=o("code"),xS=a("global_step1"),RS=a("."),Yb=u(),Yu=o("p"),IS=a("Note: currently the script requires 2x general RAM of the final fp32 model weights."),Kb=u(),et=o("h3"),nn=o("a"),Z1=o("span"),f(wr.$$.fragment),US=u(),N1=o("span"),GS=a("ZeRO-3 and Infinity Nuances"),Jb=u(),Ku=o("p"),MS=a("ZeRO-3 is quite different from ZeRO-2 because of its param sharding feature."),Xb=u(),Ju=o("p"),LS=a("ZeRO-Infinity further extends ZeRO-3 to support NVMe memory and multiple other speed and scalability improvements."),Qb=u(),Xu=o("p"),ZS=a(`While all the efforts were made for things to just work without needing any special changes to your models, in certain circumstances you may find the following information to be needed.`),e3=u(),st=o("h4"),on=o("a"),H1=o("span"),f(yr.$$.fragment),NS=u(),B1=o("span"),HS=a("Constructing Massive Models"),s3=u(),ln=o("p"),BS=a(`DeepSpeed/ZeRO-3 can handle models with Trillions of parameters which may not fit onto the existing RAM. In such cases, but also if you want the initialization to happen much faster, initialize the model using `),W1=o("em"),WS=a("deepspeed.zero.Init()"),FS=a(` context manager (which is also a function decorator), like so:`),t3=u(),f(gr.$$.fragment),a3=u(),Qu=o("p"),VS=a("As you can see this gives you a randomly initialized model."),n3=u(),P=o("p"),YS=a("If you want to use a pretrained model, "),F1=o("code"),KS=a("model_class.from_pretrained"),JS=a(` will activate this feature as long as `),V1=o("code"),XS=a("is_deepspeed_zero3_enabled()"),QS=a(" returns "),Y1=o("code"),eC=a("True"),sC=a(`, which currently is setup by the `),ec=o("a"),tC=a("TrainingArguments"),aC=a(` object if the passed DeepSpeed configuration file contains ZeRO-3 config section. Thus you must create the `),sc=o("a"),nC=a("TrainingArguments"),oC=a(" object "),K1=o("strong"),lC=a("before"),rC=a(` calling `),J1=o("code"),pC=a("from_pretrained"),iC=a(". Here is an example of a possible sequence:"),o3=u(),f(br.$$.fragment),l3=u(),rn=o("p"),uC=a("If you\u2019re using the official example scripts and your command line arguments include "),X1=o("code"),cC=a("--deepspeed ds_config.json"),hC=a(` with ZeRO-3 config enabled, then everything is already done for you, since this is how example scripts are written.`),r3=u(),tc=o("p"),fC=a("Note: If the fp16 weights of the model can\u2019t fit onto the memory of a single GPU this feature must be used."),p3=u(),pn=o("p"),dC=a("For full details on this method and other related features please refer to "),qr=o("a"),mC=a("Constructing Massive Models"),_C=a("."),i3=u(),he=o("p"),vC=a("Also when loading fp16-pretrained models, you will want to tell "),Q1=o("code"),jC=a("from_pretrained"),wC=a(` to use `),ev=o("code"),yC=a("torch_dtype=torch.float16"),gC=a(". For details, please, see "),ac=o("a"),bC=a("from_pretrained-torch-dtype"),qC=a("."),u3=u(),tt=o("h4"),un=o("a"),sv=o("span"),f(Er.$$.fragment),EC=u(),tv=o("span"),$C=a("Gathering Parameters"),c3=u(),$r=o("p"),kC=a(`Under ZeRO-3 on multiple GPUs no single GPU has all the parameters unless it\u2019s the parameters for the currently executing layer. So if you need to access all parameters from all layers at once there is a specific method to do it. Most likely you won\u2019t need it, but if you do please refer to `),kr=o("a"),PC=a("Gathering Parameters"),h3=u(),cn=o("p"),zC=a(`We do however use it internally in several places, one such example is when loading pretrained model weights in `),av=o("code"),DC=a("from_pretrained"),OC=a(`. We load one layer at a time and immediately partition it to all participating GPUs, as for very large models it won\u2019t be possible to load it on one GPU and then spread it out to multiple GPUs, due to memory limitations.`),f3=u(),nc=o("p"),AC=a("Also under ZeRO-3, if you write your own code and run into a model parameter weight that looks like:"),d3=u(),f(Pr.$$.fragment),m3=u(),fs=o("p"),TC=a("stress on "),nv=o("code"),SC=a("tensor([1.])"),CC=a(", or if you get an error where it says the parameter is of size "),ov=o("code"),xC=a("1"),RC=a(`, instead of some much larger multi-dimensional shape, this means that the parameter is partitioned and what you see is a ZeRO-3 placeholder.`),_3=u(),oc=o("a"),v3=u(),at=o("h3"),hn=o("a"),lv=o("span"),f(zr.$$.fragment),IC=u(),rv=o("span"),UC=a("ZeRO Inference"),j3=u(),lc=o("p"),GC=a(`ZeRO Inference uses the same config as ZeRO-3 Training. You just don\u2019t need the optimizer and scheduler sections. In fact you can leave these in the config file if you want to share the same one with the training. They will just be ignored.`),w3=u(),fn=o("p"),MC=a("Otherwise you just need to pass the usual "),rc=o("a"),LC=a("TrainingArguments"),ZC=a(" arguments. For example:"),y3=u(),f(Dr.$$.fragment),g3=u(),pc=o("p"),NC=a(`The only important thing is that you need to use a ZeRO-3 configuration, since ZeRO-2 provides no benefit whatsoever for the inference as only ZeRO-3 performs sharding of parameters, whereas ZeRO-1 shards gradients and optimizer states.`),b3=u(),dn=o("p"),HC=a("Here is an example of running "),pv=o("code"),BC=a("run_translation.py"),WC=a(" under DeepSpeed deploying all available GPUs:"),q3=u(),f(Or.$$.fragment),E3=u(),ic=o("p"),FC=a(`Since for inference there is no need for additional large memory used by the optimizer states and the gradients you should be able to fit much larger batches and/or sequence length onto the same hardware.`),$3=u(),uc=o("p"),VC=a(`Additionally DeepSpeed is currently developing a related product called Deepspeed-Inference which has no relationship to the ZeRO technology, but instead uses tensor parallelism to scale models that can\u2019t fit onto a single GPU. This is a work in progress and we will provide the integration once that product is complete.`),k3=u(),nt=o("h3"),mn=o("a"),iv=o("span"),f(Ar.$$.fragment),YC=u(),uv=o("span"),KC=a("Memory Requirements"),P3=u(),cc=o("p"),JC=a("Since Deepspeed ZeRO can offload memory to CPU (and NVMe) the framework provides utils that allow one to tell how much CPU and GPU memory will be needed depending on the number of GPUs being used."),z3=u(),hc=o("p"),XC=a("Let\u2019s estimate how much memory is needed to finetune \u201Cbigscience/T0_3B\u201D on a single GPU:"),D3=u(),f(Tr.$$.fragment),O3=u(),fc=o("p"),QC=a("So you can fit it on a single 80GB GPU and no CPU offload, or a tiny 8GB GPU but then need ~60GB of CPU memory. (Remember this is just the memory for params, optimizer states and gradients - you will need a bit more memory for cuda kernels, activations and temps.)"),A3=u(),dc=o("p"),ex=a("Then it\u2019s a tradeoff of cost vs speed. It\u2019ll be cheaper to buy/rent a smaller GPU (or less GPUs since you can use multiple GPUs with Deepspeed ZeRO. But then it\u2019ll be slower, so even if you don\u2019t care about how fast something will be done, the slowdown has a direct impact on the duration of using the GPU and thus bigger cost. So experiment and compare which works the best."),T3=u(),mc=o("p"),sx=a("If you have enough GPU memory make sure to disable the CPU/NVMe offload as it\u2019ll make everything faster."),S3=u(),_c=o("p"),tx=a("For example, let\u2019s repeat the same for 2 GPUs:"),C3=u(),f(Sr.$$.fragment),x3=u(),vc=o("p"),ax=a("So here you\u2019d want 2x 32GB GPUs or higher without offloading to CPU."),R3=u(),_n=o("p"),nx=a("For full information please see "),Cr=o("a"),ox=a("memory estimators"),lx=a("."),I3=u(),ot=o("h3"),vn=o("a"),cv=o("span"),f(xr.$$.fragment),rx=u(),hv=o("span"),px=a("Filing Issues"),U3=u(),jc=o("p"),ix=a("Here is how to file an issue so that we could quickly get to the bottom of the issue and help you to unblock your work."),G3=u(),wc=o("p"),ux=a("In your report please always include:"),M3=u(),N=o("ol"),fv=o("li"),dv=o("p"),cx=a("the full Deepspeed config file in the report"),hx=u(),mv=o("li"),Oe=o("p"),fx=a("either the command line arguments if you were using the "),yc=o("a"),dx=a("Trainer"),mx=a(` or `),gc=o("a"),_x=a("TrainingArguments"),vx=a(` arguments if you were scripting the Trainer setup yourself. Please do not dump the `),bc=o("a"),jx=a("TrainingArguments"),wx=a(" as it has dozens of entries that are irrelevant."),yx=u(),Rr=o("li"),_v=o("p"),gx=a("Output of:"),bx=u(),f(Ir.$$.fragment),qx=u(),vv=o("li"),Ur=o("p"),Ex=a(`If possible include a link to a Google Colab notebook that we can reproduce the problem with. You can use this `),Gr=o("a"),$x=a("notebook"),kx=a(` as a starting point.`),Px=u(),jv=o("li"),wv=o("p"),zx=a("Unless it\u2019s impossible please always use a standard dataset that we can use and not something custom."),Dx=u(),yv=o("li"),Mr=o("p"),Ox=a("If possible try to use one of the existing "),Lr=o("a"),Ax=a("examples"),Tx=a(" to reproduce the problem with."),L3=u(),qc=o("p"),Sx=a("Things to consider:"),Z3=u(),jn=o("ul"),lt=o("li"),gv=o("p"),Cx=a("Deepspeed is often not the cause of the problem."),xx=u(),bv=o("p"),Rx=a(`Some of the filed issues proved to be Deepspeed-unrelated. That is once Deepspeed was removed from the setup, the problem was still there.`),Ix=u(),qv=o("p"),Ux=a(`Therefore, if it\u2019s not absolutely obvious it\u2019s a DeepSpeed-related problem, as in you can see that there is an exception and you can see that DeepSpeed modules are involved, first re-test your setup without DeepSpeed in it. And only if the problem persists then do mentioned Deepspeed and supply all the required details.`),Gx=u(),Ev=o("li"),Zr=o("p"),Mx=a(`If it\u2019s clear to you that the issue is in the DeepSpeed core and not the integration part, please file the Issue directly with `),Nr=o("a"),Lx=a("Deepspeed"),Zx=a(`. If you aren\u2019t sure, please do not worry, either Issue tracker will do, we will figure it out once you posted it and redirect you to another Issue tracker if need be.`),N3=u(),rt=o("h3"),wn=o("a"),$v=o("span"),f(Hr.$$.fragment),Nx=u(),kv=o("span"),Hx=a("Troubleshooting"),H3=u(),pt=o("h4"),yn=o("a"),Pv=o("span"),f(Br.$$.fragment),Bx=u(),Wr=o("span"),Wx=a("the "),zv=o("code"),Fx=a("deepspeed"),Vx=a(" process gets killed at startup without a traceback"),B3=u(),H=o("p"),Yx=a("If the "),Dv=o("code"),Kx=a("deepspeed"),Jx=a(` process gets killed at launch time without a traceback, that usually means that the program tried to allocate more CPU memory than your system has or your process is allowed to allocate and the OS kernel killed that process. This is because your configuration file most likely has either `),Ov=o("code"),Xx=a("offload_optimizer"),Qx=a(" or "),Av=o("code"),eR=a("offload_param"),sR=a(` or both configured to offload to `),Tv=o("code"),tR=a("cpu"),aR=a(`. If you have NVMe, experiment with offloading to NVMe if you\u2019re running under ZeRO-3. Here is how you can `),Fr=o("a"),nR=a("estimate how much memory is needed for a specific model"),oR=a("."),W3=u(),it=o("h4"),gn=o("a"),Sv=o("span"),f(Vr.$$.fragment),lR=u(),Ec=o("span"),rR=a("training and/or eval/predict loss is "),Cv=o("code"),pR=a("NaN"),F3=u(),$c=o("p"),iR=a("This often happens when one takes a model pre-trained in bf16 mixed precision mode and tries to use it under fp16 (with or without mixed precision). Most models trained on TPU and often the ones released by Google are in this category (e.g. almost all t5-based models). Here the solution is to either use fp32 or bf16 if your hardware supports it (TPU, Ampere GPUs or newer)."),V3=u(),kc=o("p"),uR=a("The other problem may have to do with using fp16. When you configure this section:"),Y3=u(),f(Yr.$$.fragment),K3=u(),bn=o("p"),cR=a("and you see in your log that Deepspeed reports "),xv=o("code"),hR=a("OVERFLOW!"),fR=a(" as follows:"),J3=u(),f(Kr.$$.fragment),X3=u(),Pc=o("p"),dR=a("that means that the Deepspeed loss scaler can\u2019t figure out a scaling co-efficient that overcomes loss overflow."),Q3=u(),zc=o("p"),mR=a("(the log was massaged to be more readable here.)"),e0=u(),ds=o("p"),_R=a("In this case you usually need to raise the value of "),Rv=o("code"),vR=a("initial_scale_power"),jR=a(". Setting it to "),Iv=o("code"),wR=a('"initial_scale_power": 32'),yR=a(" will typically resolve the problem."),s0=u(),ut=o("h3"),qn=o("a"),Uv=o("span"),f(Jr.$$.fragment),gR=u(),Gv=o("span"),bR=a("Notes"),t0=u(),ms=o("ul"),ct=o("li"),qR=a("DeepSpeed works with the PyTorch "),Dc=o("a"),ER=a("Trainer"),$R=a(" but not TF "),Mv=o("code"),kR=a("TFTrainer"),PR=a("."),zR=u(),Xr=o("li"),DR=a("While DeepSpeed has a pip installable PyPI package, it is highly recommended that it gets installed from "),Qr=o("a"),OR=a("source"),AR=a(` to best match your hardware and also if you need to enable certain features, like 1-bit Adam, which aren\u2019t available in the pypi distribution.`),TR=u(),ht=o("li"),SR=a("You don\u2019t have to use the "),Oc=o("a"),CR=a("Trainer"),xR=a(` to use DeepSpeed with \u{1F917} Transformers - you can use any model with your own trainer, and you will have to adapt the latter according to `),ep=o("a"),RR=a("the DeepSpeed integration instructions"),IR=a("."),a0=u(),ft=o("h2"),En=o("a"),Lv=o("span"),f(sp.$$.fragment),UR=u(),Zv=o("span"),GR=a("Non-Trainer Deepspeed Integration"),n0=u(),fe=o("p"),MR=a("The "),Ac=o("a"),LR=a("HfDeepSpeedConfig"),ZR=a(` is used to integrate Deepspeed into the \u{1F917} Transformers core functionality, when `),Tc=o("a"),NR=a("Trainer"),HR=a(" is not used. The only thing that it does is handling Deepspeed ZeRO-3 param gathering and automatically splitting the model onto multiple gpus during "),Nv=o("code"),BR=a("from_pretrained"),WR=a(" call. Everything else you have to do by yourself."),o0=u(),$n=o("p"),FR=a("When using "),Sc=o("a"),VR=a("Trainer"),YR=a(" everything is automatically taken care of."),l0=u(),_s=o("p"),KR=a("When not using "),Cc=o("a"),JR=a("Trainer"),XR=a(`, to efficiently deploy DeepSpeed ZeRO-3, you must instantiate the `),xc=o("a"),QR=a("HfDeepSpeedConfig"),eI=a(" object before instantiating the model and keep that object alive."),r0=u(),kn=o("p"),sI=a("If you\u2019re using Deepspeed ZeRO-1 or ZeRO-2 you don\u2019t need to use "),Hv=o("code"),tI=a("HfDeepSpeedConfig"),aI=a(" at all."),p0=u(),Rc=o("p"),nI=a("For example for a pretrained model:"),i0=u(),f(tp.$$.fragment),u0=u(),Ic=o("p"),oI=a("or for non-pretrained model:"),c0=u(),f(ap.$$.fragment),h0=u(),de=o("p"),lI=a("Please note that if you\u2019re not using the "),Uc=o("a"),rI=a("Trainer"),pI=a(" integration, you\u2019re completely on your own. Basically follow the documentation on the "),np=o("a"),iI=a("Deepspeed"),uI=a(" website. Also you have to configure explicitly the config file - you can\u2019t use "),Bv=o("code"),cI=a('"auto"'),hI=a(" values and you will have to put real values instead."),f0=u(),dt=o("h2"),Pn=o("a"),Wv=o("span"),f(op.$$.fragment),fI=u(),Fv=o("span"),dI=a("HfDeepSpeedConfig"),d0=u(),ee=o("div"),f(lp.$$.fragment),mI=u(),Vv=o("p"),_I=a("This object contains a DeepSpeed configuration dictionary and can be quickly queried for things like zero stage."),vI=u(),Ae=o("p"),jI=a("A "),Yv=o("code"),wI=a("weakref"),yI=a(` of this object is stored in the module\u2019s globals to be able to access the config from areas where things like the Trainer object is not available (e.g. `),Kv=o("code"),gI=a("from_pretrained"),bI=a(" and "),Jv=o("code"),qI=a("_get_resized_embeddings"),EI=a(`). Therefore it\u2019s important that this object remains alive while the program is still running.`),$I=u(),me=o("p"),Gc=o("a"),kI=a("Trainer"),PI=a(" uses the "),Xv=o("code"),zI=a("HfTrainerDeepSpeedConfig"),DI=a(` subclass instead. That subclass has logic to sync the configuration with values of `),Mc=o("a"),OI=a("TrainingArguments"),AI=a(" by replacing special placeholder values: "),Qv=o("code"),TI=a('"auto"'),SI=a(`. Without this special logic the DeepSpeed configuration is not modified in any way.`),m0=u(),mt=o("h3"),zn=o("a"),ej=o("span"),f(rp.$$.fragment),CI=u(),sj=o("span"),xI=a("Custom DeepSpeed ZeRO Inference"),_0=u(),Dn=o("p"),RI=a("Here is an example of how one could do DeepSpeed ZeRO Inference without using "),Lc=o("a"),II=a("Trainer"),UI=a(" when one can\u2019t fit a model onto a single GPU. The solution includes using additional GPUs or/and offloading GPU memory to CPU memory."),v0=u(),Zc=o("p"),GI=a("The important nuance to understand here is that the way ZeRO is designed you can process different inputs on different GPUs in parallel."),j0=u(),Nc=o("p"),MI=a("The example has copious notes and is self-documenting."),w0=u(),Hc=o("p"),LI=a("Make sure to:"),y0=u(),On=o("ol"),tj=o("li"),ZI=a("disable CPU offload if you have enough GPU memory (since it slows things down)"),NI=u(),aj=o("li"),HI=a("enable bf16 if you own an Ampere or a newer GPU to make things faster. If you don\u2019t have that hardware you may enable fp16 as long as you don\u2019t use any model that was pre-trained in bf16 mixed precision (such as most t5 models). These usually overflow in fp16 and you will see garbage as output."),g0=u(),f(pp.$$.fragment),b0=u(),An=o("p"),BI=a("Let\u2019s save it as "),nj=o("code"),WI=a("t0.py"),FI=a(" and run it:"),q0=u(),f(ip.$$.fragment),E0=u(),Bc=o("p"),VI=a("This was a very basic example and you will want to adapt it to your needs."),$0=u(),_t=o("h2"),Tn=o("a"),oj=o("span"),f(up.$$.fragment),YI=u(),lj=o("span"),KI=a("Main DeepSpeed Resources"),k0=u(),_e=o("ul"),rj=o("li"),cp=o("a"),JI=a("Project\u2019s github"),XI=u(),pj=o("li"),hp=o("a"),QI=a("Usage docs"),eU=u(),ij=o("li"),fp=o("a"),sU=a("API docs"),tU=u(),uj=o("li"),dp=o("a"),aU=a("Blog posts"),P0=u(),Wc=o("p"),nU=a("Papers:"),z0=u(),vs=o("ul"),cj=o("li"),mp=o("a"),oU=a("ZeRO: Memory Optimizations Toward Training Trillion Parameter Models"),lU=u(),hj=o("li"),_p=o("a"),rU=a("ZeRO-Offload: Democratizing Billion-Scale Model Training"),pU=u(),fj=o("li"),vp=o("a"),iU=a("ZeRO-Infinity: Breaking the GPU Memory Wall for Extreme Scale Deep Learning"),D0=u(),js=o("p"),uU=a("Finally, please, remember that, HuggingFace "),Fc=o("a"),cU=a("Trainer"),hU=a(` only integrates DeepSpeed, therefore if you have any problems or questions with regards to DeepSpeed usage, please, file an issue with `),jp=o("a"),fU=a("DeepSpeed GitHub"),dU=a("."),this.h()},l(e){const p=DY('[data-svelte="svelte-1phssyn"]',document.head);g=l(p,"META",{name:!0,content:!0}),p.forEach(t),S=c(e),b=l(e,"H1",{class:!0});var wp=r(b);k=l(wp,"A",{id:!0,class:!0,href:!0});var dj=r(k);X=l(dj,"SPAN",{});var mj=r(X);d(z.$$.fragment,mj),mj.forEach(t),dj.forEach(t),C=c(wp),Q=l(wp,"SPAN",{});var gU=r(Q);x=n(gU,"DeepSpeed Integration"),gU.forEach(t),wp.forEach(t),te=c(e),T=l(e,"P",{});var _j=r(T);q=l(_j,"A",{href:!0,rel:!0});var bU=r(q);E=n(bU,"DeepSpeed"),bU.forEach(t),gs=n(_j," implements everything described in the "),W=l(_j,"A",{href:!0,rel:!0});var qU=r(W);bs=n(qU,"ZeRO paper"),qU.forEach(t),i4=n(_j,". Currently it provides full support for:"),_j.forEach(t),wj=c(e),R=l(e,"OL",{});var ve=r(R);Gh=l(ve,"LI",{});var EU=r(Gh);u4=n(EU,"Optimizer state partitioning (ZeRO stage 1)"),EU.forEach(t),c4=c(ve),Mh=l(ve,"LI",{});var $U=r(Mh);h4=n($U,"Gradient partitioning (ZeRO stage 2)"),$U.forEach(t),f4=c(ve),Lh=l(ve,"LI",{});var kU=r(Lh);d4=n(kU,"Parameter partitioning (ZeRO stage 3)"),kU.forEach(t),m4=c(ve),Zh=l(ve,"LI",{});var PU=r(Zh);_4=n(PU,"Custom mixed precision training handling"),PU.forEach(t),v4=c(ve),Nh=l(ve,"LI",{});var zU=r(Nh);j4=n(zU,"A range of fast CUDA-extension-based optimizers"),zU.forEach(t),w4=c(ve),Hh=l(ve,"LI",{});var DU=r(Hh);y4=n(DU,"ZeRO-Offload to CPU and NVMe"),DU.forEach(t),ve.forEach(t),yj=c(e),Te=l(e,"P",{});var Vc=r(Te);g4=n(Vc,"ZeRO-Offload has its own dedicated paper: "),Kn=l(Vc,"A",{href:!0,rel:!0});var OU=r(Kn);b4=n(OU,"ZeRO-Offload: Democratizing Billion-Scale Model Training"),OU.forEach(t),q4=n(Vc,". And NVMe-support is described in the paper "),Jn=l(Vc,"A",{href:!0,rel:!0});var AU=r(Jn);E4=n(AU,`ZeRO-Infinity: Breaking the GPU Memory Wall for Extreme Scale Deep Learning`),AU.forEach(t),$4=n(Vc,"."),Vc.forEach(t),gj=c(e),kp=l(e,"P",{});var TU=r(kp);k4=n(TU,"DeepSpeed ZeRO-2 is primarily used only for training, as its features are of no use to inference."),TU.forEach(t),bj=c(e),Pp=l(e,"P",{});var SU=r(Pp);P4=n(SU,`DeepSpeed ZeRO-3 can be used for inference as well, since it allows huge models to be loaded on multiple GPUs, which won\u2019t be possible on a single GPU.`),SU.forEach(t),qj=c(e),wt=l(e,"P",{});var A0=r(wt);z4=n(A0,"\u{1F917} Transformers integrates "),Xn=l(A0,"A",{href:!0,rel:!0});var CU=r(Xn);D4=n(CU,"DeepSpeed"),CU.forEach(t),O4=n(A0," via 2 options:"),A0.forEach(t),Ej=c(e),yt=l(e,"OL",{});var T0=r(yt);Qn=l(T0,"LI",{});var S0=r(Qn);A4=n(S0,"Integration of the core DeepSpeed features via "),zp=l(S0,"A",{href:!0});var xU=r(zp);T4=n(xU,"Trainer"),xU.forEach(t),S4=n(S0,`. This is an everything-done-for-you type of integration - just supply your custom config file or use our template and you have nothing else to do. Most of this document is focused on this feature.`),S0.forEach(t),C4=c(T0),F=l(T0,"LI",{});var je=r(F);x4=n(je,"If you don\u2019t use "),Dp=l(je,"A",{href:!0});var RU=r(Dp);R4=n(RU,"Trainer"),RU.forEach(t),I4=n(je,` and want to use your own Trainer where you integrated DeepSpeed yourself, core functionality functions like `),Bh=l(je,"CODE",{});var IU=r(Bh);U4=n(IU,"from_pretrained"),IU.forEach(t),G4=n(je," and "),Wh=l(je,"CODE",{});var UU=r(Wh);M4=n(UU,"from_config"),UU.forEach(t),L4=n(je,` include integration of essential parts of DeepSpeed like `),Fh=l(je,"CODE",{});var GU=r(Fh);Z4=n(GU,"zero.Init"),GU.forEach(t),N4=n(je,` for ZeRO stage 3 and higher. To tap into this feature read the docs on `),Op=l(je,"A",{href:!0});var MU=r(Op);H4=n(MU,"non-Trainer DeepSpeed Integration"),MU.forEach(t),B4=n(je,"."),je.forEach(t),T0.forEach(t),$j=c(e),Ap=l(e,"P",{});var LU=r(Ap);W4=n(LU,"What is integrated:"),LU.forEach(t),kj=c(e),Tp=l(e,"P",{});var ZU=r(Tp);F4=n(ZU,"Training:"),ZU.forEach(t),Pj=c(e),Sp=l(e,"OL",{});var NU=r(Sp);Vh=l(NU,"LI",{});var HU=r(Vh);V4=n(HU,"DeepSpeed ZeRO training supports the full ZeRO stages 1, 2 and 3 with ZeRO-Infinity (CPU and NVME offload)."),HU.forEach(t),NU.forEach(t),zj=c(e),Cp=l(e,"P",{});var BU=r(Cp);Y4=n(BU,"Inference:"),BU.forEach(t),Dj=c(e),xp=l(e,"OL",{});var WU=r(xp);eo=l(WU,"LI",{});var C0=r(eo);K4=n(C0,`DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity. It uses the same ZeRO protocol as training, but it doesn\u2019t use an optimizer and a lr scheduler and only stage 3 is relevant. For more details see: `),Rp=l(C0,"A",{href:!0});var FU=r(Rp);J4=n(FU,"zero-inference"),FU.forEach(t),X4=n(C0,"."),C0.forEach(t),WU.forEach(t),Oj=c(e),Ip=l(e,"P",{});var VU=r(Ip);Q4=n(VU,`There is also DeepSpeed Inference - this is a totally different technology which uses Tensor Parallelism instead of ZeRO (coming soon).`),VU.forEach(t),Aj=c(e),Up=l(e,"A",{id:!0}),r(Up).forEach(t),Tj=c(e),qs=l(e,"H2",{class:!0});var x0=r(qs);gt=l(x0,"A",{id:!0,class:!0,href:!0});var YU=r(gt);Yh=l(YU,"SPAN",{});var KU=r(Yh);d(so.$$.fragment,KU),KU.forEach(t),YU.forEach(t),e6=c(x0),Kh=l(x0,"SPAN",{});var JU=r(Kh);s6=n(JU,"Trainer Deepspeed Integration"),JU.forEach(t),x0.forEach(t),Sj=c(e),Gp=l(e,"A",{id:!0}),r(Gp).forEach(t),Cj=c(e),Es=l(e,"H3",{class:!0});var R0=r(Es);bt=l(R0,"A",{id:!0,class:!0,href:!0});var XU=r(bt);Jh=l(XU,"SPAN",{});var QU=r(Jh);d(to.$$.fragment,QU),QU.forEach(t),XU.forEach(t),t6=c(R0),Xh=l(R0,"SPAN",{});var eG=r(Xh);a6=n(eG,"Installation"),eG.forEach(t),R0.forEach(t),xj=c(e),Mp=l(e,"P",{});var sG=r(Mp);n6=n(sG,"Install the library via pypi:"),sG.forEach(t),Rj=c(e),d(ao.$$.fragment,e),Ij=c(e),Se=l(e,"P",{});var Yc=r(Se);o6=n(Yc,"or via "),Qh=l(Yc,"CODE",{});var tG=r(Qh);l6=n(tG,"transformers"),tG.forEach(t),r6=n(Yc,"\u2019 "),ef=l(Yc,"CODE",{});var aG=r(ef);p6=n(aG,"extras"),aG.forEach(t),i6=n(Yc,":"),Yc.forEach(t),Uj=c(e),d(no.$$.fragment,e),Gj=c(e),Ce=l(e,"P",{});var Kc=r(Ce);u6=n(Kc,"or find more details on "),oo=l(Kc,"A",{href:!0,rel:!0});var nG=r(oo);c6=n(nG,"the DeepSpeed\u2019s GitHub page"),nG.forEach(t),h6=n(Kc,` and `),lo=l(Kc,"A",{href:!0,rel:!0});var oG=r(lo);f6=n(oG,"advanced install"),oG.forEach(t),d6=n(Kc,"."),Kc.forEach(t),Mj=c(e),qt=l(e,"P",{});var I0=r(qt);m6=n(I0,"If you\u2019re still struggling with the build, first make sure to read "),Lp=l(I0,"A",{href:!0});var lG=r(Lp);_6=n(lG,"CUDA Extension Installation Notes"),lG.forEach(t),v6=n(I0,"."),I0.forEach(t),Lj=c(e),Zp=l(e,"P",{});var rG=r(Zp);j6=n(rG,`If you don\u2019t prebuild the extensions and rely on them to be built at run time and you tried all of the above solutions to no avail, the next thing to try is to pre-build the modules before installing them.`),rG.forEach(t),Zj=c(e),Np=l(e,"P",{});var pG=r(Np);w6=n(pG,"To make a local build for DeepSpeed:"),pG.forEach(t),Nj=c(e),d(ro.$$.fragment,e),Hj=c(e),xe=l(e,"P",{});var Jc=r(xe);y6=n(Jc,"If you intend to use NVMe offload you will also need to include "),sf=l(Jc,"CODE",{});var iG=r(sf);g6=n(iG,"DS_BUILD_AIO=1"),iG.forEach(t),b6=n(Jc,` in the instructions above (and also install `),tf=l(Jc,"EM",{});var uG=r(tf);q6=n(uG,"libaio-dev"),uG.forEach(t),E6=n(Jc," system-wide)."),Jc.forEach(t),Bj=c(e),Et=l(e,"P",{});var U0=r(Et);$6=n(U0,"Edit "),af=l(U0,"CODE",{});var cG=r(af);k6=n(cG,"TORCH_CUDA_ARCH_LIST"),cG.forEach(t),P6=n(U0,` to insert the code for the architectures of the GPU cards you intend to use. Assuming all your cards are the same you can get the arch via:`),U0.forEach(t),Wj=c(e),d(po.$$.fragment,e),Fj=c(e),ke=l(e,"P",{});var yp=r(ke);z6=n(yp,"So if you get "),nf=l(yp,"CODE",{});var hG=r(nf);D6=n(hG,"8, 6"),hG.forEach(t),O6=n(yp,", then use "),of=l(yp,"CODE",{});var fG=r(of);A6=n(fG,'TORCH_CUDA_ARCH_LIST="8.6"'),fG.forEach(t),T6=n(yp,`. If you have multiple different cards, you can list all of them like so `),lf=l(yp,"CODE",{});var dG=r(lf);S6=n(dG,'TORCH_CUDA_ARCH_LIST="6.1;8.6"'),dG.forEach(t),yp.forEach(t),Vj=c(e),Hp=l(e,"P",{});var mG=r(Hp);C6=n(mG,"If you need to use the same setup on multiple machines, make a binary wheel:"),mG.forEach(t),Yj=c(e),d(io.$$.fragment,e),Kj=c(e),Re=l(e,"P",{});var Xc=r(Re);x6=n(Xc,"it will generate something like "),rf=l(Xc,"CODE",{});var _G=r(rf);R6=n(_G,"dist/deepspeed-0.3.13+8cd046f-cp38-cp38-linux_x86_64.whl"),_G.forEach(t),I6=n(Xc,` which now you can install as `),pf=l(Xc,"CODE",{});var vG=r(pf);U6=n(vG,"pip install deepspeed-0.3.13+8cd046f-cp38-cp38-linux_x86_64.whl"),vG.forEach(t),G6=n(Xc," locally or on any other machine."),Xc.forEach(t),Jj=c(e),$t=l(e,"P",{});var G0=r($t);M6=n(G0,"Again, remember to ensure to adjust "),uf=l(G0,"CODE",{});var jG=r(uf);L6=n(jG,"TORCH_CUDA_ARCH_LIST"),jG.forEach(t),Z6=n(G0," to the target architectures."),G0.forEach(t),Xj=c(e),Ie=l(e,"P",{});var Qc=r(Ie);N6=n(Qc,"You can find the complete list of NVIDIA GPUs and their corresponding "),cf=l(Qc,"STRONG",{});var wG=r(cf);H6=n(wG,"Compute Capabilities"),wG.forEach(t),B6=n(Qc,` (same as arch in this context) `),uo=l(Qc,"A",{href:!0,rel:!0});var yG=r(uo);W6=n(yG,"here"),yG.forEach(t),F6=n(Qc,"."),Qc.forEach(t),Qj=c(e),Bp=l(e,"P",{});var gG=r(Bp);V6=n(gG,"You can check the archs pytorch was built with using:"),gG.forEach(t),ew=c(e),d(co.$$.fragment,e),sw=c(e),Wp=l(e,"P",{});var bG=r(Wp);Y6=n(bG,"Here is how to find out the arch for one of the installed GPUs. For example, for GPU 0:"),bG.forEach(t),tw=c(e),d(ho.$$.fragment,e),aw=c(e),Fp=l(e,"P",{});var qG=r(Fp);K6=n(qG,"If the output is:"),qG.forEach(t),nw=c(e),d(fo.$$.fragment,e),ow=c(e),kt=l(e,"P",{});var M0=r(kt);J6=n(M0,"then you know that this card\u2019s arch is "),hf=l(M0,"CODE",{});var EG=r(hf);X6=n(EG,"8.6"),EG.forEach(t),Q6=n(M0,"."),M0.forEach(t),lw=c(e),Pt=l(e,"P",{});var L0=r(Pt);e$=n(L0,"You can also leave "),ff=l(L0,"CODE",{});var $G=r(ff);s$=n($G,"TORCH_CUDA_ARCH_LIST"),$G.forEach(t),t$=n(L0,` out completely and then the build program will automatically query the architecture of the GPUs the build is made on. This may or may not match the GPUs on the target machines, that\u2019s why it\u2019s best to specify the desired archs explicitly.`),L0.forEach(t),rw=c(e),zt=l(e,"P",{});var Z0=r(zt);a$=n(Z0,`If after trying everything suggested you still encounter build issues, please, proceed with the GitHub Issue of `),mo=l(Z0,"A",{href:!0,rel:!0});var kG=r(mo);n$=n(kG,"Deepspeed"),kG.forEach(t),o$=n(Z0,","),Z0.forEach(t),pw=c(e),Vp=l(e,"A",{id:!0}),r(Vp).forEach(t),iw=c(e),$s=l(e,"H3",{class:!0});var N0=r($s);Dt=l(N0,"A",{id:!0,class:!0,href:!0});var PG=r(Dt);df=l(PG,"SPAN",{});var zG=r(df);d(_o.$$.fragment,zG),zG.forEach(t),PG.forEach(t),l$=c(N0),mf=l(N0,"SPAN",{});var DG=r(mf);r$=n(DG,"Deployment with multiple GPUs"),DG.forEach(t),N0.forEach(t),uw=c(e),Ot=l(e,"P",{});var H0=r(Ot);p$=n(H0,"To deploy this feature with multiple GPUs adjust the "),Yp=l(H0,"A",{href:!0});var OG=r(Yp);i$=n(OG,"Trainer"),OG.forEach(t),u$=n(H0,` command line arguments as following:`),H0.forEach(t),cw=c(e),At=l(e,"OL",{});var B0=r(At);ks=l(B0,"LI",{});var eh=r(ks);c$=n(eh,"replace "),_f=l(eh,"CODE",{});var AG=r(_f);h$=n(AG,"python -m torch.distributed.launch"),AG.forEach(t),f$=n(eh," with "),vf=l(eh,"CODE",{});var TG=r(vf);d$=n(TG,"deepspeed"),TG.forEach(t),m$=n(eh,"."),eh.forEach(t),_$=c(B0),Pe=l(B0,"LI",{});var Sn=r(Pe);v$=n(Sn,"add a new argument "),jf=l(Sn,"CODE",{});var SG=r(jf);j$=n(SG,"--deepspeed ds_config.json"),SG.forEach(t),w$=n(Sn,", where "),wf=l(Sn,"CODE",{});var CG=r(wf);y$=n(CG,"ds_config.json"),CG.forEach(t),g$=n(Sn,` is the DeepSpeed configuration file as documented `),vo=l(Sn,"A",{href:!0,rel:!0});var xG=r(vo);b$=n(xG,"here"),xG.forEach(t),q$=n(Sn,". The file naming is up to you."),Sn.forEach(t),B0.forEach(t),hw=c(e),Kp=l(e,"P",{});var RG=r(Kp);E$=n(RG,"Therefore, if your original command line looked as follows:"),RG.forEach(t),fw=c(e),d(jo.$$.fragment,e),dw=c(e),Jp=l(e,"P",{});var IG=r(Jp);$$=n(IG,"Now it should be:"),IG.forEach(t),mw=c(e),d(wo.$$.fragment,e),_w=c(e),I=l(e,"P",{});var we=r(I);k$=n(we,"Unlike, "),yf=l(we,"CODE",{});var UG=r(yf);P$=n(UG,"torch.distributed.launch"),UG.forEach(t),z$=n(we," where you have to specify how many GPUs to use with "),gf=l(we,"CODE",{});var GG=r(gf);D$=n(GG,"--nproc_per_node"),GG.forEach(t),O$=n(we,`, with the `),bf=l(we,"CODE",{});var MG=r(bf);A$=n(MG,"deepspeed"),MG.forEach(t),T$=n(we," launcher you don\u2019t have to use the corresponding "),qf=l(we,"CODE",{});var LG=r(qf);S$=n(LG,"--num_gpus"),LG.forEach(t),C$=n(we,` if you want all of your GPUs used. The full details on how to configure various nodes and GPUs can be found `),yo=l(we,"A",{href:!0,rel:!0});var ZG=r(yo);x$=n(ZG,"here"),ZG.forEach(t),R$=n(we,"."),we.forEach(t),vw=c(e),ae=l(e,"P",{});var Cn=r(ae);I$=n(Cn,"In fact, you can continue using "),Ef=l(Cn,"CODE",{});var NG=r(Ef);U$=n(NG,"-m torch.distributed.launch"),NG.forEach(t),G$=n(Cn,` with DeepSpeed as long as you don\u2019t need to use `),$f=l(Cn,"CODE",{});var HG=r($f);M$=n(HG,"deepspeed"),HG.forEach(t),L$=n(Cn,` launcher-specific arguments. Typically if you don\u2019t need a multi-node setup you\u2019re not required to use the `),kf=l(Cn,"CODE",{});var BG=r(kf);Z$=n(BG,"deepspeed"),BG.forEach(t),N$=n(Cn,` launcher. But since in the DeepSpeed documentation it\u2019ll be used everywhere, for consistency we will use it here as well.`),Cn.forEach(t),jw=c(e),Tt=l(e,"P",{});var W0=r(Tt);H$=n(W0,"Here is an example of running "),Pf=l(W0,"CODE",{});var WG=r(Pf);B$=n(WG,"run_translation.py"),WG.forEach(t),W$=n(W0," under DeepSpeed deploying all available GPUs:"),W0.forEach(t),ww=c(e),d(go.$$.fragment,e),yw=c(e),St=l(e,"P",{});var F0=r(St);F$=n(F0,"Note that in the DeepSpeed documentation you are likely to see "),zf=l(F0,"CODE",{});var FG=r(zf);V$=n(FG,"--deepspeed --deepspeed_config ds_config.json"),FG.forEach(t),Y$=n(F0,` - i.e. two DeepSpeed-related arguments, but for the sake of simplicity, and since there are already so many arguments to deal with, we combined the two into a single argument.`),F0.forEach(t),gw=c(e),Ct=l(e,"P",{});var V0=r(Ct);K$=n(V0,"For some practical usage examples, please, see this "),bo=l(V0,"A",{href:!0,rel:!0});var VG=r(bo);J$=n(VG,"post"),VG.forEach(t),X$=n(V0,"."),V0.forEach(t),bw=c(e),Xp=l(e,"A",{id:!0}),r(Xp).forEach(t),qw=c(e),Ps=l(e,"H3",{class:!0});var Y0=r(Ps);xt=l(Y0,"A",{id:!0,class:!0,href:!0});var YG=r(xt);Df=l(YG,"SPAN",{});var KG=r(Df);d(qo.$$.fragment,KG),KG.forEach(t),YG.forEach(t),Q$=c(Y0),Of=l(Y0,"SPAN",{});var JG=r(Of);e5=n(JG,"Deployment with one GPU"),JG.forEach(t),Y0.forEach(t),Ew=c(e),Rt=l(e,"P",{});var K0=r(Rt);s5=n(K0,"To deploy DeepSpeed with one GPU adjust the "),Qp=l(K0,"A",{href:!0});var XG=r(Qp);t5=n(XG,"Trainer"),XG.forEach(t),a5=n(K0," command line arguments as follows:"),K0.forEach(t),$w=c(e),d(Eo.$$.fragment,e),kw=c(e),Ue=l(e,"P",{});var sh=r(Ue);n5=n(sh,`This is almost the same as with multiple-GPUs, but here we tell DeepSpeed explicitly to use just one GPU via `),Af=l(sh,"CODE",{});var QG=r(Af);o5=n(QG,"--num_gpus=1"),QG.forEach(t),l5=n(sh,`. By default, DeepSpeed deploys all GPUs it can see on the given node. If you have only 1 GPU to start with, then you don\u2019t need this argument. The following `),$o=l(sh,"A",{href:!0,rel:!0});var eM=r($o);r5=n(eM,"documentation"),eM.forEach(t),p5=n(sh," discusses the launcher options."),sh.forEach(t),Pw=c(e),ei=l(e,"P",{});var sM=r(ei);i5=n(sM,"Why would you want to use DeepSpeed with just one GPU?"),sM.forEach(t),zw=c(e),It=l(e,"OL",{});var J0=r(It);Tf=l(J0,"LI",{});var tM=r(Tf);u5=n(tM,`It has a ZeRO-offload feature which can delegate some computations and memory to the host\u2019s CPU and RAM, and thus leave more GPU resources for model\u2019s needs - e.g. larger batch size, or enabling a fitting of a very big model which normally won\u2019t fit.`),tM.forEach(t),c5=c(J0),Sf=l(J0,"LI",{});var aM=r(Sf);h5=n(aM,`It provides a smart GPU memory management system, that minimizes memory fragmentation, which again allows you to fit bigger models and data batches.`),aM.forEach(t),J0.forEach(t),Dw=c(e),si=l(e,"P",{});var nM=r(si);f5=n(nM,`While we are going to discuss the configuration in details next, the key to getting a huge improvement on a single GPU with DeepSpeed is to have at least the following configuration in the configuration file:`),nM.forEach(t),Ow=c(e),d(ko.$$.fragment,e),Aw=c(e),ti=l(e,"P",{});var oM=r(ti);d5=n(oM,`which enables optimizer offload and some other important features. You may experiment with the buffer sizes, you will find more details in the discussion below.`),oM.forEach(t),Tw=c(e),Ut=l(e,"P",{});var X0=r(Ut);m5=n(X0,"For a practical usage example of this type of deployment, please, see this "),Po=l(X0,"A",{href:!0,rel:!0});var lM=r(Po);_5=n(lM,"post"),lM.forEach(t),v5=n(X0,"."),X0.forEach(t),Sw=c(e),ai=l(e,"P",{});var rM=r(ai);j5=n(rM,"You may also try the ZeRO-3 with CPU and NVMe offload as explained further in this document."),rM.forEach(t),Cw=c(e),ni=l(e,"P",{});var pM=r(ni);w5=n(pM,"Notes:"),pM.forEach(t),xw=c(e),oi=l(e,"UL",{});var iM=r(oi);zs=l(iM,"LI",{});var th=r(zs);zo=l(th,"P",{});var Q0=r(zo);y5=n(Q0,"if you need to run on a specific GPU, which is different from GPU 0, you can\u2019t use "),Cf=l(Q0,"CODE",{});var uM=r(Cf);g5=n(uM,"CUDA_VISIBLE_DEVICES"),uM.forEach(t),b5=n(Q0,` to limit the visible scope of available GPUs. Instead, you have to use the following syntax:`),Q0.forEach(t),q5=c(th),d(Do.$$.fragment,th),E5=c(th),xf=l(th,"P",{});var cM=r(xf);$5=n(cM,"In this example, we tell DeepSpeed to use GPU 1 (second gpu)."),cM.forEach(t),th.forEach(t),iM.forEach(t),Rw=c(e),li=l(e,"A",{id:!0}),r(li).forEach(t),Iw=c(e),Ds=l(e,"H3",{class:!0});var eq=r(Ds);Gt=l(eq,"A",{id:!0,class:!0,href:!0});var hM=r(Gt);Rf=l(hM,"SPAN",{});var fM=r(Rf);d(Oo.$$.fragment,fM),fM.forEach(t),hM.forEach(t),k5=c(eq),If=l(eq,"SPAN",{});var dM=r(If);P5=n(dM,"Deployment in Notebooks"),dM.forEach(t),eq.forEach(t),Uw=c(e),Mt=l(e,"P",{});var sq=r(Mt);z5=n(sq,"The problem with running notebook cells as a script is that there is no normal "),Uf=l(sq,"CODE",{});var mM=r(Uf);D5=n(mM,"deepspeed"),mM.forEach(t),O5=n(sq,` launcher to rely on, so under certain setups we have to emulate it.`),sq.forEach(t),Gw=c(e),ri=l(e,"P",{});var _M=r(ri);A5=n(_M,"If you\u2019re using only 1 GPU, here is how you\u2019d have to adjust your training code in the notebook to use DeepSpeed."),_M.forEach(t),Mw=c(e),d(Ao.$$.fragment,e),Lw=c(e),Lt=l(e,"P",{});var tq=r(Lt);T5=n(tq,"Note: "),Gf=l(tq,"CODE",{});var vM=r(Gf);S5=n(vM,"..."),vM.forEach(t),C5=n(tq," stands for the normal arguments that you\u2019d pass to the functions."),tq.forEach(t),Zw=c(e),pi=l(e,"P",{});var jM=r(pi);x5=n(jM,`If you want to use more than 1 GPU, you must use a multi-process environment for DeepSpeed to work. That is, you have to use the launcher for that purpose and this cannot be accomplished by emulating the distributed environment presented at the beginning of this section.`),jM.forEach(t),Nw=c(e),ii=l(e,"P",{});var wM=r(ii);R5=n(wM,`If you want to create the config file on the fly in the notebook in the current directory, you could have a dedicated cell with:`),wM.forEach(t),Hw=c(e),d(To.$$.fragment,e),Bw=c(e),Ge=l(e,"P",{});var ah=r(Ge);I5=n(ah,"If the training script is in a normal file and not in the notebook cells, you can launch "),Mf=l(ah,"CODE",{});var yM=r(Mf);U5=n(yM,"deepspeed"),yM.forEach(t),G5=n(ah,` normally via shell from a cell. For example, to use `),Lf=l(ah,"CODE",{});var gM=r(Lf);M5=n(gM,"run_translation.py"),gM.forEach(t),L5=n(ah," you would launch it with:"),ah.forEach(t),Ww=c(e),d(So.$$.fragment,e),Fw=c(e),Zt=l(e,"P",{});var aq=r(Zt);Z5=n(aq,"or with "),Zf=l(aq,"CODE",{});var bM=r(Zf);N5=n(bM,"%%bash"),bM.forEach(t),H5=n(aq," magic, where you can write a multi-line code for the shell program to run:"),aq.forEach(t),Vw=c(e),d(Co.$$.fragment,e),Yw=c(e),ui=l(e,"P",{});var qM=r(ui);B5=n(qM,"In such case you don\u2019t need any of the code presented at the beginning of this section."),qM.forEach(t),Kw=c(e),Nt=l(e,"P",{});var nq=r(Nt);W5=n(nq,"Note: While "),Nf=l(nq,"CODE",{});var EM=r(Nf);F5=n(EM,"%%bash"),EM.forEach(t),V5=n(nq,` magic is neat, but currently it buffers the output so you won\u2019t see the logs until the process completes.`),nq.forEach(t),Jw=c(e),ci=l(e,"A",{id:!0}),r(ci).forEach(t),Xw=c(e),Os=l(e,"H3",{class:!0});var oq=r(Os);Ht=l(oq,"A",{id:!0,class:!0,href:!0});var $M=r(Ht);Hf=l($M,"SPAN",{});var kM=r(Hf);d(xo.$$.fragment,kM),kM.forEach(t),$M.forEach(t),Y5=c(oq),Bf=l(oq,"SPAN",{});var PM=r(Bf);K5=n(PM,"Configuration"),PM.forEach(t),oq.forEach(t),Qw=c(e),Bt=l(e,"P",{});var lq=r(Bt);J5=n(lq,`For the complete guide to the DeepSpeed configuration options that can be used in its configuration file please refer to the `),Ro=l(lq,"A",{href:!0,rel:!0});var zM=r(Ro);X5=n(zM,"following documentation"),zM.forEach(t),Q5=n(lq,"."),lq.forEach(t),ey=c(e),Wt=l(e,"P",{});var rq=r(Wt);e9=n(rq,"You can find dozens of DeepSpeed configuration examples that address various practical needs in "),Io=l(rq,"A",{href:!0,rel:!0});var DM=r(Io);s9=n(DM,`the DeepSpeedExamples repo`),DM.forEach(t),t9=n(rq,":"),rq.forEach(t),sy=c(e),d(Uo.$$.fragment,e),ty=c(e),Ft=l(e,"P",{});var pq=r(Ft);a9=n(pq,`Continuing the code from above, let\u2019s say you\u2019re looking to configure the Lamb optimizer. So you can search through the example `),Wf=l(pq,"CODE",{});var OM=r(Wf);n9=n(OM,".json"),OM.forEach(t),o9=n(pq," files with:"),pq.forEach(t),ay=c(e),d(Go.$$.fragment,e),ny=c(e),Vt=l(e,"P",{});var iq=r(Vt);l9=n(iq,"Some more examples are to be found in the "),Mo=l(iq,"A",{href:!0,rel:!0});var AM=r(Mo);r9=n(AM,"main repo"),AM.forEach(t),p9=n(iq," as well."),iq.forEach(t),oy=c(e),hi=l(e,"P",{});var TM=r(hi);i9=n(TM,`When using DeepSpeed you always need to supply a DeepSpeed configuration file, yet some configuration parameters have to be configured via the command line. You will find the nuances in the rest of this guide.`),TM.forEach(t),ly=c(e),ne=l(e,"P",{});var xn=r(ne);u9=n(xn,`To get an idea of what DeepSpeed configuration file looks like, here is one that activates ZeRO stage 2 features, including optimizer states cpu offload, uses `),Ff=l(xn,"CODE",{});var SM=r(Ff);c9=n(SM,"AdamW"),SM.forEach(t),h9=n(xn," optimizer and "),Vf=l(xn,"CODE",{});var CM=r(Vf);f9=n(CM,"WarmupLR"),CM.forEach(t),d9=n(xn,` scheduler and will enable mixed precision training if `),Yf=l(xn,"CODE",{});var xM=r(Yf);m9=n(xM,"--fp16"),xM.forEach(t),_9=n(xn," is passed:"),xn.forEach(t),ry=c(e),d(Lo.$$.fragment,e),py=c(e),Yt=l(e,"P",{});var uq=r(Yt);v9=n(uq,"When you execute the program, DeepSpeed will log the configuration it received from the "),fi=l(uq,"A",{href:!0});var RM=r(fi);j9=n(RM,"Trainer"),RM.forEach(t),w9=n(uq,` to the console, so you can see exactly what was the final configuration passed to it.`),uq.forEach(t),iy=c(e),di=l(e,"A",{id:!0}),r(di).forEach(t),uy=c(e),As=l(e,"H3",{class:!0});var cq=r(As);Kt=l(cq,"A",{id:!0,class:!0,href:!0});var IM=r(Kt);Kf=l(IM,"SPAN",{});var UM=r(Kf);d(Zo.$$.fragment,UM),UM.forEach(t),IM.forEach(t),y9=c(cq),Jf=l(cq,"SPAN",{});var GM=r(Jf);g9=n(GM,"Passing Configuration"),GM.forEach(t),cq.forEach(t),cy=c(e),U=l(e,"P",{});var ye=r(U);b9=n(ye,`As discussed in this document normally the DeepSpeed configuration is passed as a path to a json file, but if you\u2019re not using the command line interface to configure the training, and instead instantiate the `),mi=l(ye,"A",{href:!0});var MM=r(mi);q9=n(MM,"Trainer"),MM.forEach(t),E9=n(ye," via "),_i=l(ye,"A",{href:!0});var LM=r(_i);$9=n(LM,"TrainingArguments"),LM.forEach(t),k9=n(ye," then for the "),Xf=l(ye,"CODE",{});var ZM=r(Xf);P9=n(ZM,"deepspeed"),ZM.forEach(t),z9=n(ye,` argument you can pass a nested `),Qf=l(ye,"CODE",{});var NM=r(Qf);D9=n(NM,"dict"),NM.forEach(t),O9=n(ye,`. This allows you to create the configuration on the fly and doesn\u2019t require you to write it to the file system before passing it to `),vi=l(ye,"A",{href:!0});var HM=r(vi);A9=n(HM,"TrainingArguments"),HM.forEach(t),T9=n(ye,"."),ye.forEach(t),hy=c(e),ji=l(e,"P",{});var BM=r(ji);S9=n(BM,"To summarize you can do:"),BM.forEach(t),fy=c(e),d(No.$$.fragment,e),dy=c(e),wi=l(e,"P",{});var WM=r(wi);C9=n(WM,"or:"),WM.forEach(t),my=c(e),d(Ho.$$.fragment,e),_y=c(e),yi=l(e,"A",{id:!0}),r(yi).forEach(t),vy=c(e),Ts=l(e,"H3",{class:!0});var hq=r(Ts);Jt=l(hq,"A",{id:!0,class:!0,href:!0});var FM=r(Jt);ed=l(FM,"SPAN",{});var VM=r(ed);d(Bo.$$.fragment,VM),VM.forEach(t),FM.forEach(t),x9=c(hq),sd=l(hq,"SPAN",{});var YM=r(sd);R9=n(YM,"Shared Configuration"),YM.forEach(t),hq.forEach(t),jy=c(e),d(Xt.$$.fragment,e),wy=c(e),Me=l(e,"P",{});var nh=r(Me);I9=n(nh,"Some configuration values are required by both the "),gi=l(nh,"A",{href:!0});var KM=r(gi);U9=n(KM,"Trainer"),KM.forEach(t),G9=n(nh,` and DeepSpeed to function correctly, therefore, to prevent conflicting definitions, which could lead to hard to detect errors, we chose to configure those via the `),bi=l(nh,"A",{href:!0});var JM=r(bi);M9=n(JM,"Trainer"),JM.forEach(t),L9=n(nh," command line arguments."),nh.forEach(t),yy=c(e),Qt=l(e,"P",{});var fq=r(Qt);Z9=n(fq,`Additionally, some configuration values are derived automatically based on the model\u2019s configuration, so instead of remembering to manually adjust multiple values, it\u2019s the best to let the `),qi=l(fq,"A",{href:!0});var XM=r(qi);N9=n(XM,"Trainer"),XM.forEach(t),H9=n(fq,` do the majority of configuration for you.`),fq.forEach(t),gy=c(e),Le=l(e,"P",{});var oh=r(Le);B9=n(oh,"Therefore, in the rest of this guide you will find a special configuration value: "),td=l(oh,"CODE",{});var QM=r(td);W9=n(QM,"auto"),QM.forEach(t),F9=n(oh,`, which when set will be automatically replaced with the correct or most efficient value. Please feel free to choose to ignore this recommendation and set the values explicitly, in which case be very careful that your the `),Ei=l(oh,"A",{href:!0});var eL=r(Ei);V9=n(eL,"Trainer"),eL.forEach(t),Y9=n(oh,` arguments and DeepSpeed configurations agree. For example, are you using the same learning rate, or batch size, or gradient accumulation settings? if these mismatch the training may fail in very difficult to detect ways. You have been warned.`),oh.forEach(t),by=c(e),$i=l(e,"P",{});var sL=r($i);K9=n(sL,`There are multiple other values that are specific to DeepSpeed-only and those you will have to set manually to suit your needs.`),sL.forEach(t),qy=c(e),ea=l(e,"P",{});var dq=r(ea);J9=n(dq,`In your own programs, you can also use the following approach if you\u2019d like to modify the DeepSpeed config as a master and configure `),ki=l(dq,"A",{href:!0});var tL=r(ki);X9=n(tL,"TrainingArguments"),tL.forEach(t),Q9=n(dq," based on that. The steps are:"),dq.forEach(t),Ey=c(e),sa=l(e,"OL",{});var mq=r(sa);ad=l(mq,"LI",{});var aL=r(ad);e8=n(aL,"Create or load the DeepSpeed configuration to be used as a master configuration"),aL.forEach(t),s8=c(mq),Wo=l(mq,"LI",{});var _q=r(Wo);t8=n(_q,"Create the "),Pi=l(_q,"A",{href:!0});var nL=r(Pi);a8=n(nL,"TrainingArguments"),nL.forEach(t),n8=n(_q," object based on these values"),_q.forEach(t),mq.forEach(t),$y=c(e),oe=l(e,"P",{});var Rn=r(oe);o8=n(Rn,"Do note that some values, such as "),nd=l(Rn,"CODE",{});var oL=r(nd);l8=n(oL,"scheduler.params.total_num_steps"),oL.forEach(t),r8=n(Rn,` are calculated by `),zi=l(Rn,"A",{href:!0});var lL=r(zi);p8=n(lL,"Trainer"),lL.forEach(t),i8=n(Rn," during "),od=l(Rn,"CODE",{});var rL=r(od);u8=n(rL,"train"),rL.forEach(t),c8=n(Rn,", but you can of course do the math yourself."),Rn.forEach(t),ky=c(e),Di=l(e,"A",{id:!0}),r(Di).forEach(t),Py=c(e),Ss=l(e,"H3",{class:!0});var vq=r(Ss);ta=l(vq,"A",{id:!0,class:!0,href:!0});var pL=r(ta);ld=l(pL,"SPAN",{});var iL=r(ld);d(Fo.$$.fragment,iL),iL.forEach(t),pL.forEach(t),h8=c(vq),rd=l(vq,"SPAN",{});var uL=r(rd);f8=n(uL,"ZeRO"),uL.forEach(t),vq.forEach(t),zy=c(e),Vo=l(e,"P",{});var mU=r(Vo);Yo=l(mU,"A",{href:!0,rel:!0});var cL=r(Yo);d8=n(cL,"Zero Redundancy Optimizer (ZeRO)"),cL.forEach(t),m8=n(mU,` is the workhorse of DeepSpeed. It supports 3 different levels (stages) of optimization. The first one is not quite interesting for scalability purposes, therefore this document focuses on stages 2 and 3. Stage 3 is further improved by the latest addition of ZeRO-Infinity. You will find more indepth information in the DeepSpeed documentation.`),mU.forEach(t),Dy=c(e),Ze=l(e,"P",{});var lh=r(Ze);_8=n(lh,"The "),pd=l(lh,"CODE",{});var hL=r(pd);v8=n(hL,"zero_optimization"),hL.forEach(t),j8=n(lh," section of the configuration file is the most important part ("),Ko=l(lh,"A",{href:!0,rel:!0});var fL=r(Ko);w8=n(fL,"docs"),fL.forEach(t),y8=n(lh,`), since that is where you define which ZeRO stages you want to enable and how to configure them. You will find the explanation for each parameter in the DeepSpeed docs.`),lh.forEach(t),Oy=c(e),aa=l(e,"P",{});var jq=r(aa);g8=n(jq,"This section has to be configured exclusively via DeepSpeed configuration - the "),Oi=l(jq,"A",{href:!0});var dL=r(Oi);b8=n(dL,"Trainer"),dL.forEach(t),q8=n(jq,` provides no equivalent command line arguments.`),jq.forEach(t),Ay=c(e),Ai=l(e,"P",{});var mL=r(Ai);E8=n(mL,`Note: currently DeepSpeed doesn\u2019t validate parameter names, so if you misspell any, it\u2019ll use the default setting for the parameter that got misspelled. You can watch the DeepSpeed engine start up log messages to see what values it is going to use.`),mL.forEach(t),Ty=c(e),Ti=l(e,"A",{id:!0}),r(Ti).forEach(t),Sy=c(e),Cs=l(e,"H4",{class:!0});var wq=r(Cs);na=l(wq,"A",{id:!0,class:!0,href:!0});var _L=r(na);id=l(_L,"SPAN",{});var vL=r(id);d(Jo.$$.fragment,vL),vL.forEach(t),_L.forEach(t),$8=c(wq),ud=l(wq,"SPAN",{});var jL=r(ud);k8=n(jL,"ZeRO-2 Config"),jL.forEach(t),wq.forEach(t),Cy=c(e),Si=l(e,"P",{});var wL=r(Si);P8=n(wL,"The following is an example of configuration for ZeRO stage 2:"),wL.forEach(t),xy=c(e),d(Xo.$$.fragment,e),Ry=c(e),Ci=l(e,"P",{});var yL=r(Ci);cd=l(yL,"STRONG",{});var gL=r(cd);z8=n(gL,"Performance tuning:"),gL.forEach(t),yL.forEach(t),Iy=c(e),Ne=l(e,"UL",{});var rh=r(Ne);xs=l(rh,"LI",{});var ph=r(xs);D8=n(ph,"enabling "),hd=l(ph,"CODE",{});var bL=r(hd);O8=n(bL,"offload_optimizer"),bL.forEach(t),A8=n(ph," should reduce GPU RAM usage (it requires "),fd=l(ph,"CODE",{});var qL=r(fd);T8=n(qL,'"stage": 2'),qL.forEach(t),S8=n(ph,")"),ph.forEach(t),C8=c(rh),G=l(rh,"LI",{});var se=r(G);dd=l(se,"CODE",{});var EL=r(dd);x8=n(EL,'"overlap_comm": true'),EL.forEach(t),R8=n(se," trades off increased GPU RAM usage to lower all-reduce latency. "),md=l(se,"CODE",{});var $L=r(md);I8=n($L,"overlap_comm"),$L.forEach(t),U8=n(se,` uses 4.5x the `),_d=l(se,"CODE",{});var kL=r(_d);G8=n(kL,"allgather_bucket_size"),kL.forEach(t),M8=n(se," and "),vd=l(se,"CODE",{});var PL=r(vd);L8=n(PL,"reduce_bucket_size"),PL.forEach(t),Z8=n(se,` values. So if they are set to 5e8, this requires a 9GB footprint (`),jd=l(se,"CODE",{});var zL=r(jd);N8=n(zL,"5e8 x 2Bytes x 2 x 4.5"),zL.forEach(t),H8=n(se,`). Therefore, if you have a GPU with 8GB or less RAM, to avoid getting OOM-errors you will need to reduce those parameters to about `),wd=l(se,"CODE",{});var DL=r(wd);B8=n(DL,"2e8"),DL.forEach(t),W8=n(se,`, which would require 3.6GB. You will want to do the same on larger capacity GPU as well, if you\u2019re starting to hit OOM.`),se.forEach(t),F8=c(rh),yd=l(rh,"LI",{});var OL=r(yd);V8=n(OL,`when reducing these buffers you\u2019re trading communication speed to avail more GPU RAM. The smaller the buffer size is, the slower the communication gets, and the more GPU RAM will be available to other tasks. So if a bigger batch size is important, getting a slightly slower training time could be a good trade.`),OL.forEach(t),rh.forEach(t),Uy=c(e),He=l(e,"P",{});var ih=r(He);Y8=n(ih,"Additionally, "),gd=l(ih,"CODE",{});var AL=r(gd);K8=n(AL,"deepspeed==0.4.4"),AL.forEach(t),J8=n(ih," added a new option "),bd=l(ih,"CODE",{});var TL=r(bd);X8=n(TL,"round_robin_gradients"),TL.forEach(t),Q8=n(ih," which you can enable with:"),ih.forEach(t),Gy=c(e),d(Qo.$$.fragment,e),My=c(e),xi=l(e,"P",{});var SL=r(xi);ek=n(SL,"This is a stage 2 optimization for CPU offloading that parallelizes gradient copying to CPU memory among ranks by fine-grained gradient partitioning. Performance benefit grows with gradient accumulation steps (more copying between optimizer steps) or GPU count (increased parallelism)."),SL.forEach(t),Ly=c(e),Ri=l(e,"A",{id:!0}),r(Ri).forEach(t),Zy=c(e),Rs=l(e,"H4",{class:!0});var yq=r(Rs);oa=l(yq,"A",{id:!0,class:!0,href:!0});var CL=r(oa);qd=l(CL,"SPAN",{});var xL=r(qd);d(el.$$.fragment,xL),xL.forEach(t),CL.forEach(t),sk=c(yq),Ed=l(yq,"SPAN",{});var RL=r(Ed);tk=n(RL,"ZeRO-3 Config"),RL.forEach(t),yq.forEach(t),Ny=c(e),Ii=l(e,"P",{});var IL=r(Ii);ak=n(IL,"The following is an example of configuration for ZeRO stage 3:"),IL.forEach(t),Hy=c(e),d(sl.$$.fragment,e),By=c(e),V=l(e,"P",{});var ws=r(V);nk=n(ws,`If you are getting OOMs, because your model or activations don\u2019t fit into the GPU memory and you have unutilized CPU memory offloading the optimizer states and parameters to CPU memory with `),$d=l(ws,"CODE",{});var UL=r($d);ok=n(UL,'"device": "cpu"'),UL.forEach(t),lk=n(ws,` may solve this limitation. If you don\u2019t want to offload to CPU memory, use `),kd=l(ws,"CODE",{});var GL=r(kd);rk=n(GL,"none"),GL.forEach(t),pk=n(ws," instead of "),Pd=l(ws,"CODE",{});var ML=r(Pd);ik=n(ML,"cpu"),ML.forEach(t),uk=n(ws," for the "),zd=l(ws,"CODE",{});var LL=r(zd);ck=n(LL,"device"),LL.forEach(t),hk=n(ws,` entry. Offloading to NVMe is discussed further down.`),ws.forEach(t),Wy=c(e),Be=l(e,"P",{});var uh=r(Be);fk=n(uh,"Pinned memory is enabled with "),Dd=l(uh,"CODE",{});var ZL=r(Dd);dk=n(ZL,"pin_memory"),ZL.forEach(t),mk=n(uh," set to "),Od=l(uh,"CODE",{});var NL=r(Od);_k=n(NL,"true"),NL.forEach(t),vk=n(uh,`. This feature can improve the throughput at the cost of making less memory available to other processes. Pinned memory is set aside to the specific process that requested it and its typically accessed much faster than normal CPU memory.`),uh.forEach(t),Fy=c(e),Ui=l(e,"P",{});var HL=r(Ui);Ad=l(HL,"STRONG",{});var BL=r(Ad);jk=n(BL,"Performance tuning:"),BL.forEach(t),HL.forEach(t),Vy=c(e),la=l(e,"UL",{});var gq=r(la);tl=l(gq,"LI",{});var bq=r(tl);Td=l(bq,"CODE",{});var WL=r(Td);wk=n(WL,"stage3_max_live_parameters"),WL.forEach(t),yk=n(bq,": "),Sd=l(bq,"CODE",{});var FL=r(Sd);gk=n(FL,"1e9"),FL.forEach(t),bq.forEach(t),bk=c(gq),al=l(gq,"LI",{});var qq=r(al);Cd=l(qq,"CODE",{});var VL=r(Cd);qk=n(VL,"stage3_max_reuse_distance"),VL.forEach(t),Ek=n(qq,": "),xd=l(qq,"CODE",{});var YL=r(xd);$k=n(YL,"1e9"),YL.forEach(t),qq.forEach(t),gq.forEach(t),Yy=c(e),M=l(e,"P",{});var ge=r(M);kk=n(ge,"If hitting OOM reduce "),Rd=l(ge,"CODE",{});var KL=r(Rd);Pk=n(KL,"stage3_max_live_parameters"),KL.forEach(t),zk=n(ge," and "),Id=l(ge,"CODE",{});var JL=r(Id);Dk=n(JL,"stage3_max_reuse_distance"),JL.forEach(t),Ok=n(ge,`. They should have minimal impact on performance unless you are doing activation checkpointing. `),Ud=l(ge,"CODE",{});var XL=r(Ud);Ak=n(XL,"1e9"),XL.forEach(t),Tk=n(ge,` would consume ~2GB. The memory is shared by `),Gd=l(ge,"CODE",{});var QL=r(Gd);Sk=n(QL,"stage3_max_live_parameters"),QL.forEach(t),Ck=n(ge," and "),Md=l(ge,"CODE",{});var eZ=r(Md);xk=n(eZ,"stage3_max_reuse_distance"),eZ.forEach(t),Rk=n(ge,", so it\u2019s not additive, it\u2019s just 2GB total."),ge.forEach(t),Ky=c(e),ze=l(e,"P",{});var gp=r(ze);Ld=l(gp,"CODE",{});var sZ=r(Ld);Ik=n(sZ,"stage3_max_live_parameters"),sZ.forEach(t),Uk=n(gp,` is the upper limit on how many full parameters you want to keep on the GPU at any given time. \u201Creuse distance\u201D is a metric we are using to figure out when will a parameter be used again in the future, and we use the `),Zd=l(gp,"CODE",{});var tZ=r(Zd);Gk=n(tZ,"stage3_max_reuse_distance"),tZ.forEach(t),Mk=n(gp,` to decide whether to throw away the parameter or to keep it. If a parameter is going to be used again in near future (less than `),Nd=l(gp,"CODE",{});var aZ=r(Nd);Lk=n(aZ,"stage3_max_reuse_distance"),aZ.forEach(t),Zk=n(gp,`) then we keep it to reduce communication overhead. This is super helpful when you have activation checkpointing enabled, where we do a forward recompute and backward passes a a single layer granularity and want to keep the parameter in the forward recompute till the backward`),gp.forEach(t),Jy=c(e),Gi=l(e,"P",{});var nZ=r(Gi);Nk=n(nZ,"The following configuration values depend on the model\u2019s hidden size:"),nZ.forEach(t),Xy=c(e),We=l(e,"UL",{});var ch=r(We);nl=l(ch,"LI",{});var Eq=r(nl);Hd=l(Eq,"CODE",{});var oZ=r(Hd);Hk=n(oZ,"reduce_bucket_size"),oZ.forEach(t),Bk=n(Eq,": "),Bd=l(Eq,"CODE",{});var lZ=r(Bd);Wk=n(lZ,"hidden_size*hidden_size"),lZ.forEach(t),Eq.forEach(t),Fk=c(ch),ol=l(ch,"LI",{});var $q=r(ol);Wd=l($q,"CODE",{});var rZ=r(Wd);Vk=n(rZ,"stage3_prefetch_bucket_size"),rZ.forEach(t),Yk=n($q,": "),Fd=l($q,"CODE",{});var pZ=r(Fd);Kk=n(pZ,"0.9 * hidden_size * hidden_size"),pZ.forEach(t),$q.forEach(t),Jk=c(ch),ll=l(ch,"LI",{});var kq=r(ll);Vd=l(kq,"CODE",{});var iZ=r(Vd);Xk=n(iZ,"stage3_param_persistence_threshold"),iZ.forEach(t),Qk=n(kq,": "),Yd=l(kq,"CODE",{});var uZ=r(Yd);e7=n(uZ,"10 * hidden_size"),uZ.forEach(t),kq.forEach(t),ch.forEach(t),Qy=c(e),Fe=l(e,"P",{});var hh=r(Fe);s7=n(hh,"therefore set these values to "),Kd=l(hh,"CODE",{});var cZ=r(Kd);t7=n(cZ,"auto"),cZ.forEach(t),a7=n(hh," and the "),Mi=l(hh,"A",{href:!0});var hZ=r(Mi);n7=n(hZ,"Trainer"),hZ.forEach(t),o7=n(hh,` will automatically assign the recommended values. But, of course, feel free to set these explicitly as well.`),hh.forEach(t),eg=c(e),rl=l(e,"P",{});var _U=r(rl);Jd=l(_U,"CODE",{});var fZ=r(Jd);l7=n(fZ,"stage3_gather_16bit_weights_on_model_save"),fZ.forEach(t),r7=n(_U,` enables model fp16 weights consolidation when model gets saved. With large models and multiple GPUs this is an expensive operation both in terms of memory and speed. It\u2019s currently required if you plan to resume the training. Watch out for future updates that will remove this limitation and make things more flexible.`),_U.forEach(t),sg=c(e),le=l(e,"P",{});var In=r(le);p7=n(In,"If you\u2019re migrating from ZeRO-2 configuration note that "),Xd=l(In,"CODE",{});var dZ=r(Xd);i7=n(dZ,"allgather_partitions"),dZ.forEach(t),u7=n(In,", "),Qd=l(In,"CODE",{});var mZ=r(Qd);c7=n(mZ,"allgather_bucket_size"),mZ.forEach(t),h7=n(In,` and `),em=l(In,"CODE",{});var _Z=r(em);f7=n(_Z,"reduce_scatter"),_Z.forEach(t),d7=n(In,` configuration parameters are not used in ZeRO-3. If you keep these in the config file they will just be ignored.`),In.forEach(t),tg=c(e),Li=l(e,"UL",{});var vZ=r(Li);pl=l(vZ,"LI",{});var Pq=r(pl);sm=l(Pq,"CODE",{});var jZ=r(sm);m7=n(jZ,"sub_group_size"),jZ.forEach(t),_7=n(Pq,": "),tm=l(Pq,"CODE",{});var wZ=r(tm);v7=n(wZ,"1e9"),wZ.forEach(t),Pq.forEach(t),vZ.forEach(t),ag=c(e),De=l(e,"P",{});var bp=r(De);am=l(bp,"CODE",{});var yZ=r(am);j7=n(yZ,"sub_group_size"),yZ.forEach(t),w7=n(bp,` controls the granularity in which parameters are updated during optimizer steps. Parameters are grouped into buckets of `),nm=l(bp,"CODE",{});var gZ=r(nm);y7=n(gZ,"sub_group_size"),gZ.forEach(t),g7=n(bp,` and each buckets is updated one at a time. When used with NVMe offload in ZeRO-Infinity, `),om=l(bp,"CODE",{});var bZ=r(om);b7=n(bZ,"sub_group_size"),bZ.forEach(t),q7=n(bp,` therefore controls the granularity in which model states are moved in and out of CPU memory from NVMe during the optimizer step. This prevents running out of CPU memory for extremely large models.`),bp.forEach(t),ng=c(e),Ve=l(e,"P",{});var fh=r(Ve);E7=n(fh,"You can leave "),lm=l(fh,"CODE",{});var qZ=r(lm);$7=n(qZ,"sub_group_size"),qZ.forEach(t),k7=n(fh," to its default value of "),rm=l(fh,"EM",{});var EZ=r(rm);P7=n(EZ,"1e9"),EZ.forEach(t),z7=n(fh,` when not using NVMe offload. You may want to change its default value in the following cases:`),fh.forEach(t),og=c(e),ra=l(e,"OL",{});var zq=r(ra);il=l(zq,"LI",{});var Dq=r(il);D7=n(Dq,"Running into OOM during optimizer step: Reduce "),pm=l(Dq,"CODE",{});var $Z=r(pm);O7=n($Z,"sub_group_size"),$Z.forEach(t),A7=n(Dq," to reduce memory utilization of temporary buffers"),Dq.forEach(t),T7=c(zq),ul=l(zq,"LI",{});var Oq=r(ul);S7=n(Oq,"Optimizer Step is taking a long time: Increase "),im=l(Oq,"CODE",{});var kZ=r(im);C7=n(kZ,"sub_group_size"),kZ.forEach(t),x7=n(Oq,` to improve bandwidth utilization as a result of the increased data buffers.`),Oq.forEach(t),zq.forEach(t),lg=c(e),Zi=l(e,"A",{id:!0}),r(Zi).forEach(t),rg=c(e),Is=l(e,"H3",{class:!0});var Aq=r(Is);pa=l(Aq,"A",{id:!0,class:!0,href:!0});var PZ=r(pa);um=l(PZ,"SPAN",{});var zZ=r(um);d(cl.$$.fragment,zZ),zZ.forEach(t),PZ.forEach(t),R7=c(Aq),cm=l(Aq,"SPAN",{});var DZ=r(cm);I7=n(DZ,"NVMe Support"),DZ.forEach(t),Aq.forEach(t),pg=c(e),Ni=l(e,"P",{});var OZ=r(Ni);U7=n(OZ,`ZeRO-Infinity allows for training incredibly large models by extending GPU and CPU memory with NVMe memory. Thanks to smart partitioning and tiling algorithms each GPU needs to send and receive very small amounts of data during offloading so modern NVMe proved to be fit to allow for an even larger total memory pool available to your training process. ZeRO-Infinity requires ZeRO-3 enabled.`),OZ.forEach(t),ig=c(e),Hi=l(e,"P",{});var AZ=r(Hi);G7=n(AZ,"The following configuration example enables NVMe to offload both optimizer states and the params:"),AZ.forEach(t),ug=c(e),d(hl.$$.fragment,e),cg=c(e),ia=l(e,"P",{});var Tq=r(ia);M7=n(Tq,`You can choose to offload both optimizer states and params to NVMe, or just one of them or none. For example, if you have copious amounts of CPU memory available, by all means offload to CPU memory only as it\u2019d be faster (hint: `),hm=l(Tq,"EM",{});var TZ=r(hm);L7=n(TZ,"\u201Cdevice\u201D: \u201Ccpu\u201D"),TZ.forEach(t),Z7=n(Tq,")."),Tq.forEach(t),hg=c(e),Ye=l(e,"P",{});var dh=r(Ye);N7=n(dh,"Here is the full documentation for offloading "),fl=l(dh,"A",{href:!0,rel:!0});var SZ=r(fl);H7=n(SZ,"optimizer states"),SZ.forEach(t),B7=n(dh," and "),dl=l(dh,"A",{href:!0,rel:!0});var CZ=r(dl);W7=n(CZ,"parameters"),CZ.forEach(t),F7=n(dh,"."),dh.forEach(t),fg=c(e),ua=l(e,"P",{});var Sq=r(ua);V7=n(Sq,"Make sure that your "),fm=l(Sq,"CODE",{});var xZ=r(fm);Y7=n(xZ,"nvme_path"),xZ.forEach(t),K7=n(Sq,` is actually an NVMe, since it will work with the normal hard drive or SSD, but it\u2019ll be much much slower. The fast scalable training was designed with modern NVMe transfer speeds in mind (as of this writing one can have ~3.5GB/s read, ~3GB/s write peak speeds).`),Sq.forEach(t),dg=c(e),Ke=l(e,"P",{});var mh=r(Ke);J7=n(mh,"In order to figure out the optimal "),dm=l(mh,"CODE",{});var RZ=r(dm);X7=n(RZ,"aio"),RZ.forEach(t),Q7=n(mh,` configuration block you must run a benchmark on your target setup, as `),ml=l(mh,"A",{href:!0,rel:!0});var IZ=r(ml);eP=n(IZ,"explained here"),IZ.forEach(t),sP=n(mh,"."),mh.forEach(t),mg=c(e),Bi=l(e,"A",{id:!0}),r(Bi).forEach(t),_g=c(e),Us=l(e,"H4",{class:!0});var Cq=r(Us);ca=l(Cq,"A",{id:!0,class:!0,href:!0});var UZ=r(ca);mm=l(UZ,"SPAN",{});var GZ=r(mm);d(_l.$$.fragment,GZ),GZ.forEach(t),UZ.forEach(t),tP=c(Cq),_m=l(Cq,"SPAN",{});var MZ=r(_m);aP=n(MZ,"ZeRO-2 vs ZeRO-3 Performance"),MZ.forEach(t),Cq.forEach(t),vg=c(e),Wi=l(e,"P",{});var LZ=r(Wi);nP=n(LZ,`ZeRO-3 is likely to be slower than ZeRO-2 if everything else is configured the same because the former has to gather model weights in addition to what ZeRO-2 does. If ZeRO-2 meets your needs and you don\u2019t need to scale beyond a few GPUs then you may choose to stick to it. It\u2019s important to understand that ZeRO-3 enables a much higher scalability capacity at a cost of speed.`),LZ.forEach(t),jg=c(e),Fi=l(e,"P",{});var ZZ=r(Fi);oP=n(ZZ,"It\u2019s possible to adjust ZeRO-3 configuration to make it perform closer to ZeRO-2:"),ZZ.forEach(t),wg=c(e),ha=l(e,"UL",{});var xq=r(ha);Gs=l(xq,"LI",{});var _h=r(Gs);lP=n(_h,"set "),vm=l(_h,"CODE",{});var NZ=r(vm);rP=n(NZ,"stage3_param_persistence_threshold"),NZ.forEach(t),pP=n(_h," to a very large number - larger than the largest parameter, e.g., "),jm=l(_h,"CODE",{});var HZ=r(jm);iP=n(HZ,"6 * hidden_size * hidden_size"),HZ.forEach(t),uP=n(_h,". This will keep the parameters on the GPUs."),_h.forEach(t),cP=c(xq),vl=l(xq,"LI",{});var Rq=r(vl);hP=n(Rq,"turn off "),wm=l(Rq,"CODE",{});var BZ=r(wm);fP=n(BZ,"offload_params"),BZ.forEach(t),dP=n(Rq," since ZeRO-2 doesn\u2019t have that option."),Rq.forEach(t),xq.forEach(t),yg=c(e),Je=l(e,"P",{});var vh=r(Je);mP=n(vh,"The performance will likely improve significantly with just "),ym=l(vh,"CODE",{});var WZ=r(ym);_P=n(WZ,"offload_params"),WZ.forEach(t),vP=n(vh,` turned off, even if you don\u2019t change `),gm=l(vh,"CODE",{});var FZ=r(gm);jP=n(FZ,"stage3_param_persistence_threshold"),FZ.forEach(t),wP=n(vh,`. Of course, these changes will impact the size of the model you can train. So these help you to trade scalability for speed depending on your needs.`),vh.forEach(t),gg=c(e),Vi=l(e,"A",{id:!0}),r(Vi).forEach(t),bg=c(e),Ms=l(e,"H4",{class:!0});var Iq=r(Ms);fa=l(Iq,"A",{id:!0,class:!0,href:!0});var VZ=r(fa);bm=l(VZ,"SPAN",{});var YZ=r(bm);d(jl.$$.fragment,YZ),YZ.forEach(t),VZ.forEach(t),yP=c(Iq),qm=l(Iq,"SPAN",{});var KZ=r(qm);gP=n(KZ,"ZeRO-2 Example"),KZ.forEach(t),Iq.forEach(t),qg=c(e),da=l(e,"P",{});var Uq=r(da);bP=n(Uq,"Here is a full ZeRO-2 auto-configuration file "),Em=l(Uq,"CODE",{});var JZ=r(Em);qP=n(JZ,"ds_config_zero2.json"),JZ.forEach(t),EP=n(Uq,":"),Uq.forEach(t),Eg=c(e),d(wl.$$.fragment,e),$g=c(e),ma=l(e,"P",{});var Gq=r(ma);$P=n(Gq,`Here is a full ZeRO-2 all-enabled manually set configuration file. It is here mainly for you to see what the typical values look like, but we highly recommend using the one with multiple `),$m=l(Gq,"CODE",{});var XZ=r($m);kP=n(XZ,"auto"),XZ.forEach(t),PP=n(Gq," settings in it."),Gq.forEach(t),kg=c(e),d(yl.$$.fragment,e),Pg=c(e),Yi=l(e,"A",{id:!0}),r(Yi).forEach(t),zg=c(e),Ls=l(e,"H4",{class:!0});var Mq=r(Ls);_a=l(Mq,"A",{id:!0,class:!0,href:!0});var QZ=r(_a);km=l(QZ,"SPAN",{});var eN=r(km);d(gl.$$.fragment,eN),eN.forEach(t),QZ.forEach(t),zP=c(Mq),Pm=l(Mq,"SPAN",{});var sN=r(Pm);DP=n(sN,"ZeRO-3 Example"),sN.forEach(t),Mq.forEach(t),Dg=c(e),va=l(e,"P",{});var Lq=r(va);OP=n(Lq,"Here is a full ZeRO-3 auto-configuration file "),zm=l(Lq,"CODE",{});var tN=r(zm);AP=n(tN,"ds_config_zero3.json"),tN.forEach(t),TP=n(Lq,":"),Lq.forEach(t),Og=c(e),d(bl.$$.fragment,e),Ag=c(e),ja=l(e,"P",{});var Zq=r(ja);SP=n(Zq,`Here is a full ZeRO-3 all-enabled manually set configuration file. It is here mainly for you to see what the typical values look like, but we highly recommend using the one with multiple `),Dm=l(Zq,"CODE",{});var aN=r(Dm);CP=n(aN,"auto"),aN.forEach(t),xP=n(Zq," settings in it."),Zq.forEach(t),Tg=c(e),d(ql.$$.fragment,e),Sg=c(e),Zs=l(e,"H3",{class:!0});var Nq=r(Zs);wa=l(Nq,"A",{id:!0,class:!0,href:!0});var nN=r(wa);Om=l(nN,"SPAN",{});var oN=r(Om);d(El.$$.fragment,oN),oN.forEach(t),nN.forEach(t),RP=c(Nq),Am=l(Nq,"SPAN",{});var lN=r(Am);IP=n(lN,"Optimizer and Scheduler"),lN.forEach(t),Nq.forEach(t),Cg=c(e),ya=l(e,"P",{});var Hq=r(ya);UP=n(Hq,"As long as you don\u2019t enable "),Tm=l(Hq,"CODE",{});var rN=r(Tm);GP=n(rN,"offload_optimizer"),rN.forEach(t),MP=n(Hq,` you can mix and match DeepSpeed and HuggingFace schedulers and optimizers, with the exception of using the combination of HuggingFace scheduler and DeepSpeed optimizer:`),Hq.forEach(t),xg=c(e),Ki=l(e,"P",{});var pN=r(Ki);LP=n(pN,`| Combos | HF Scheduler | DS Scheduler | | HF Optimizer | Yes | Yes | | DS Optimizer | No | Yes |`),pN.forEach(t),Rg=c(e),ga=l(e,"P",{});var Bq=r(ga);ZP=n(Bq,"It is possible to use a non-DeepSpeed optimizer when "),Sm=l(Bq,"CODE",{});var iN=r(Sm);NP=n(iN,"offload_optimizer"),iN.forEach(t),HP=n(Bq,` is enabled, as long as it has both CPU and GPU implementation (except LAMB).`),Bq.forEach(t),Ig=c(e),Ji=l(e,"A",{id:!0}),r(Ji).forEach(t),Ug=c(e),Ns=l(e,"H4",{class:!0});var Wq=r(Ns);ba=l(Wq,"A",{id:!0,class:!0,href:!0});var uN=r(ba);Cm=l(uN,"SPAN",{});var cN=r(Cm);d($l.$$.fragment,cN),cN.forEach(t),uN.forEach(t),BP=c(Wq),xm=l(Wq,"SPAN",{});var hN=r(xm);WP=n(hN,"Optimizer"),hN.forEach(t),Wq.forEach(t),Gg=c(e),Xe=l(e,"P",{});var jh=r(Xe);FP=n(jh,`DeepSpeed\u2019s main optimizers are Adam, AdamW, OneBitAdam, and Lamb. These have been thoroughly tested with ZeRO and are thus recommended to be used. It, however, can import other optimizers from `),Rm=l(jh,"CODE",{});var fN=r(Rm);VP=n(fN,"torch"),fN.forEach(t),YP=n(jh,". The full documentation is "),kl=l(jh,"A",{href:!0,rel:!0});var dN=r(kl);KP=n(dN,"here"),dN.forEach(t),JP=n(jh,"."),jh.forEach(t),Mg=c(e),$=l(e,"P",{});var A=r($);XP=n(A,"If you don\u2019t configure the "),Im=l(A,"CODE",{});var mN=r(Im);QP=n(mN,"optimizer"),mN.forEach(t),ez=n(A," entry in the configuration file, the "),Xi=l(A,"A",{href:!0});var _N=r(Xi);sz=n(_N,"Trainer"),_N.forEach(t),tz=n(A,` will automatically set it to `),Um=l(A,"CODE",{});var vN=r(Um);az=n(vN,"AdamW"),vN.forEach(t),nz=n(A,` and will use the supplied values or the defaults for the following command line arguments: `),Gm=l(A,"CODE",{});var jN=r(Gm);oz=n(jN,"--learning_rate"),jN.forEach(t),lz=n(A,", "),Mm=l(A,"CODE",{});var wN=r(Mm);rz=n(wN,"--adam_beta1"),wN.forEach(t),pz=n(A,", "),Lm=l(A,"CODE",{});var yN=r(Lm);iz=n(yN,"--adam_beta2"),yN.forEach(t),uz=n(A,", "),Zm=l(A,"CODE",{});var gN=r(Zm);cz=n(gN,"--adam_epsilon"),gN.forEach(t),hz=n(A," and "),Nm=l(A,"CODE",{});var bN=r(Nm);fz=n(bN,"--weight_decay"),bN.forEach(t),dz=n(A,"."),A.forEach(t),Lg=c(e),Qe=l(e,"P",{});var wh=r(Qe);mz=n(wh,"Here is an example of the auto-configured "),Hm=l(wh,"CODE",{});var qN=r(Hm);_z=n(qN,"optimizer"),qN.forEach(t),vz=n(wh," entry for "),Bm=l(wh,"CODE",{});var EN=r(Bm);jz=n(EN,"AdamW"),EN.forEach(t),wz=n(wh,":"),wh.forEach(t),Zg=c(e),d(Pl.$$.fragment,e),Ng=c(e),Qi=l(e,"P",{});var $N=r(Qi);yz=n($N,`Note that the command line arguments will set the values in the configuration file. This is so that there is one definitive source of the values and to avoid hard to find errors when for example, the learning rate is set to different values in different places. Command line rules. The values that get overridden are:`),$N.forEach(t),Hg=c(e),re=l(e,"UL",{});var Un=r(re);zl=l(Un,"LI",{});var Fq=r(zl);Wm=l(Fq,"CODE",{});var kN=r(Wm);gz=n(kN,"lr"),kN.forEach(t),bz=n(Fq," with the value of "),Fm=l(Fq,"CODE",{});var PN=r(Fm);qz=n(PN,"--learning_rate"),PN.forEach(t),Fq.forEach(t),Ez=c(Un),Dl=l(Un,"LI",{});var Vq=r(Dl);Vm=l(Vq,"CODE",{});var zN=r(Vm);$z=n(zN,"betas"),zN.forEach(t),kz=n(Vq," with the value of "),Ym=l(Vq,"CODE",{});var DN=r(Ym);Pz=n(DN,"--adam_beta1 --adam_beta2"),DN.forEach(t),Vq.forEach(t),zz=c(Un),Ol=l(Un,"LI",{});var Yq=r(Ol);Km=l(Yq,"CODE",{});var ON=r(Km);Dz=n(ON,"eps"),ON.forEach(t),Oz=n(Yq," with the value of "),Jm=l(Yq,"CODE",{});var AN=r(Jm);Az=n(AN,"--adam_epsilon"),AN.forEach(t),Yq.forEach(t),Tz=c(Un),Al=l(Un,"LI",{});var Kq=r(Al);Xm=l(Kq,"CODE",{});var TN=r(Xm);Sz=n(TN,"weight_decay"),TN.forEach(t),Cz=n(Kq," with the value of "),Qm=l(Kq,"CODE",{});var SN=r(Qm);xz=n(SN,"--weight_decay"),SN.forEach(t),Kq.forEach(t),Un.forEach(t),Bg=c(e),eu=l(e,"P",{});var CN=r(eu);Rz=n(CN,"Therefore please remember to tune the shared hyperparameters on the command line."),CN.forEach(t),Wg=c(e),su=l(e,"P",{});var xN=r(su);Iz=n(xN,"You can also set the values explicitly:"),xN.forEach(t),Fg=c(e),d(Tl.$$.fragment,e),Vg=c(e),qa=l(e,"P",{});var Jq=r(qa);Uz=n(Jq,"But then you\u2019re on your own synchronizing the "),tu=l(Jq,"A",{href:!0});var RN=r(tu);Gz=n(RN,"Trainer"),RN.forEach(t),Mz=n(Jq,` command line arguments and the DeepSpeed configuration.`),Jq.forEach(t),Yg=c(e),au=l(e,"P",{});var IN=r(au);Lz=n(IN,"If you want to use another optimizer which is not listed above, you will have to add to the top level configuration."),IN.forEach(t),Kg=c(e),d(Sl.$$.fragment,e),Jg=c(e),pe=l(e,"P",{});var Gn=r(pe);Zz=n(Gn,"Similarly to "),e_=l(Gn,"CODE",{});var UN=r(e_);Nz=n(UN,"AdamW"),UN.forEach(t),Hz=n(Gn,`, you can configure other officially supported optimizers. Just remember that may have different config values. e.g. for Adam you will want `),s_=l(Gn,"CODE",{});var GN=r(s_);Bz=n(GN,"weight_decay"),GN.forEach(t),Wz=n(Gn," around "),t_=l(Gn,"CODE",{});var MN=r(t_);Fz=n(MN,"0.01"),MN.forEach(t),Vz=n(Gn,"."),Gn.forEach(t),Xg=c(e),nu=l(e,"A",{id:!0}),r(nu).forEach(t),Qg=c(e),Hs=l(e,"H4",{class:!0});var Xq=r(Hs);Ea=l(Xq,"A",{id:!0,class:!0,href:!0});var LN=r(Ea);a_=l(LN,"SPAN",{});var ZN=r(a_);d(Cl.$$.fragment,ZN),ZN.forEach(t),LN.forEach(t),Yz=c(Xq),n_=l(Xq,"SPAN",{});var NN=r(n_);Kz=n(NN,"Scheduler"),NN.forEach(t),Xq.forEach(t),e2=c(e),L=l(e,"P",{});var be=r(L);Jz=n(be,"DeepSpeed supports "),o_=l(be,"CODE",{});var HN=r(o_);Xz=n(HN,"LRRangeTest"),HN.forEach(t),Qz=n(be,", "),l_=l(be,"CODE",{});var BN=r(l_);eD=n(BN,"OneCycle"),BN.forEach(t),sD=n(be,", "),r_=l(be,"CODE",{});var WN=r(r_);tD=n(WN,"WarmupLR"),WN.forEach(t),aD=n(be," and "),p_=l(be,"CODE",{});var FN=r(p_);nD=n(FN,"WarmupDecayLR"),FN.forEach(t),oD=n(be,` learning rate schedulers. The full documentation is `),xl=l(be,"A",{href:!0,rel:!0});var VN=r(xl);lD=n(VN,"here"),VN.forEach(t),rD=n(be,"."),be.forEach(t),s2=c(e),ou=l(e,"P",{});var YN=r(ou);pD=n(YN,"Here is where the schedulers overlap between \u{1F917} Transformers and DeepSpeed:"),YN.forEach(t),t2=c(e),$a=l(e,"UL",{});var Qq=r($a);Rl=l(Qq,"LI",{});var eE=r(Rl);i_=l(eE,"CODE",{});var KN=r(i_);iD=n(KN,"WarmupLR"),KN.forEach(t),uD=n(eE," via "),u_=l(eE,"CODE",{});var JN=r(u_);cD=n(JN,"--lr_scheduler_type constant_with_warmup"),JN.forEach(t),eE.forEach(t),hD=c(Qq),es=l(Qq,"LI",{});var qp=r(es);c_=l(qp,"CODE",{});var XN=r(c_);fD=n(XN,"WarmupDecayLR"),XN.forEach(t),dD=n(qp," via "),h_=l(qp,"CODE",{});var QN=r(h_);mD=n(QN,"--lr_scheduler_type linear"),QN.forEach(t),_D=n(qp,". This is also the default value for "),f_=l(qp,"CODE",{});var eH=r(f_);vD=n(eH,"--lr_scheduler_type"),eH.forEach(t),jD=n(qp,`, therefore, if you don\u2019t configure the scheduler this is scheduler that will get configured by default.`),qp.forEach(t),Qq.forEach(t),a2=c(e),D=l(e,"P",{});var K=r(D);wD=n(K,"If you don\u2019t configure the "),d_=l(K,"CODE",{});var sH=r(d_);yD=n(sH,"scheduler"),sH.forEach(t),gD=n(K," entry in the configuration file, the "),lu=l(K,"A",{href:!0});var tH=r(lu);bD=n(tH,"Trainer"),tH.forEach(t),qD=n(K,` will use the values of `),m_=l(K,"CODE",{});var aH=r(m_);ED=n(aH,"--lr_scheduler_type"),aH.forEach(t),$D=n(K,", "),__=l(K,"CODE",{});var nH=r(__);kD=n(nH,"--learning_rate"),nH.forEach(t),PD=n(K," and "),v_=l(K,"CODE",{});var oH=r(v_);zD=n(oH,"--warmup_steps"),oH.forEach(t),DD=n(K," or "),j_=l(K,"CODE",{});var lH=r(j_);OD=n(lH,"--warmup_ratio"),lH.forEach(t),AD=n(K,` to configure a \u{1F917} Transformers version of it.`),K.forEach(t),n2=c(e),ss=l(e,"P",{});var yh=r(ss);TD=n(yh,"Here is an example of the auto-configured "),w_=l(yh,"CODE",{});var rH=r(w_);SD=n(rH,"scheduler"),rH.forEach(t),CD=n(yh," entry for "),y_=l(yh,"CODE",{});var pH=r(y_);xD=n(pH,"WarmupLR"),pH.forEach(t),RD=n(yh,":"),yh.forEach(t),o2=c(e),d(Il.$$.fragment,e),l2=c(e),ts=l(e,"P",{});var gh=r(ts);ID=n(gh,"Since "),g_=l(gh,"EM",{});var iH=r(g_);UD=n(iH,"\u201Cauto\u201D"),iH.forEach(t),GD=n(gh," is used the "),ru=l(gh,"A",{href:!0});var uH=r(ru);MD=n(uH,"Trainer"),uH.forEach(t),LD=n(gh,` arguments will set the correct values in the configuration file. This is so that there is one definitive source of the values and to avoid hard to find errors when, for example, the learning rate is set to different values in different places. Command line rules. The values that get set are:`),gh.forEach(t),r2=c(e),ie=l(e,"UL",{});var Mn=r(ie);ka=l(Mn,"LI",{});var vj=r(ka);b_=l(vj,"CODE",{});var cH=r(b_);ZD=n(cH,"warmup_min_lr"),cH.forEach(t),ND=n(vj," with the value of "),q_=l(vj,"CODE",{});var hH=r(q_);HD=n(hH,"0"),hH.forEach(t),BD=n(vj,"."),vj.forEach(t),WD=c(Mn),Pa=l(Mn,"LI",{});var jj=r(Pa);E_=l(jj,"CODE",{});var fH=r(E_);FD=n(fH,"warmup_max_lr"),fH.forEach(t),VD=n(jj," with the value of "),$_=l(jj,"CODE",{});var dH=r($_);YD=n(dH,"--learning_rate"),dH.forEach(t),KD=n(jj,"."),jj.forEach(t),JD=c(Mn),as=l(Mn,"LI",{});var Ep=r(as);k_=l(Ep,"CODE",{});var mH=r(k_);XD=n(mH,"warmup_num_steps"),mH.forEach(t),QD=n(Ep," with the value of "),P_=l(Ep,"CODE",{});var _H=r(P_);eO=n(_H,"--warmup_steps"),_H.forEach(t),sO=n(Ep," if provided. Otherwise will use "),z_=l(Ep,"CODE",{});var vH=r(z_);tO=n(vH,"--warmup_ratio"),vH.forEach(t),aO=n(Ep,` multiplied by the number of training steps and rounded up.`),Ep.forEach(t),nO=c(Mn),ns=l(Mn,"LI",{});var $p=r(ns);D_=l($p,"CODE",{});var jH=r(D_);oO=n(jH,"total_num_steps"),jH.forEach(t),lO=n($p," with either the value of "),O_=l($p,"CODE",{});var wH=r(O_);rO=n(wH,"--max_steps"),wH.forEach(t),pO=n($p,` or if it is not provided, derived automatically at run time based on the environment and the size of the dataset and other command line arguments (needed for `),A_=l($p,"CODE",{});var yH=r(A_);iO=n(yH,"WarmupDecayLR"),yH.forEach(t),uO=n($p,")."),$p.forEach(t),Mn.forEach(t),p2=c(e),pu=l(e,"P",{});var gH=r(pu);cO=n(gH,"You can, of course, take over any or all of the configuration values and set those yourself:"),gH.forEach(t),i2=c(e),d(Ul.$$.fragment,e),u2=c(e),za=l(e,"P",{});var sE=r(za);hO=n(sE,"But then you\u2019re on your own synchronizing the "),iu=l(sE,"A",{href:!0});var bH=r(iu);fO=n(bH,"Trainer"),bH.forEach(t),dO=n(sE,` command line arguments and the DeepSpeed configuration.`),sE.forEach(t),c2=c(e),Da=l(e,"P",{});var tE=r(Da);mO=n(tE,"For example, for "),T_=l(tE,"CODE",{});var qH=r(T_);_O=n(qH,"WarmupDecayLR"),qH.forEach(t),vO=n(tE,", you can use the following entry:"),tE.forEach(t),h2=c(e),d(Gl.$$.fragment,e),f2=c(e),Y=l(e,"P",{});var ys=r(Y);jO=n(ys,"and "),S_=l(ys,"CODE",{});var EH=r(S_);wO=n(EH,"total_num_steps"),EH.forEach(t),yO=n(ys,", "),C_=l(ys,"CODE",{});var $H=r(C_);gO=n($H,"warmup_max_lr"),$H.forEach(t),bO=n(ys,", "),x_=l(ys,"CODE",{});var kH=r(x_);qO=n(kH,"warmup_num_steps"),kH.forEach(t),EO=n(ys," and "),R_=l(ys,"CODE",{});var PH=r(R_);$O=n(PH,"total_num_steps"),PH.forEach(t),kO=n(ys," will be set at loading time."),ys.forEach(t),d2=c(e),uu=l(e,"A",{id:!0}),r(uu).forEach(t),m2=c(e),Bs=l(e,"H3",{class:!0});var aE=r(Bs);Oa=l(aE,"A",{id:!0,class:!0,href:!0});var zH=r(Oa);I_=l(zH,"SPAN",{});var DH=r(I_);d(Ml.$$.fragment,DH),DH.forEach(t),zH.forEach(t),PO=c(aE),U_=l(aE,"SPAN",{});var OH=r(U_);zO=n(OH,"fp32 Precision"),OH.forEach(t),aE.forEach(t),_2=c(e),cu=l(e,"P",{});var AH=r(cu);DO=n(AH,"Deepspeed supports the full fp32 and the fp16 mixed precision."),AH.forEach(t),v2=c(e),Aa=l(e,"P",{});var nE=r(Aa);OO=n(nE,`Because of the much reduced memory needs and faster speed one gets with the fp16 mixed precision, the only time you will want to not use it is when the model you\u2019re using doesn\u2019t behave well under this training mode. Typically this happens when the model wasn\u2019t pretrained in the fp16 mixed precision (e.g. often this happens with bf16-pretrained models). Such models may overflow or underflow leading to `),G_=l(nE,"CODE",{});var TH=r(G_);AO=n(TH,"NaN"),TH.forEach(t),TO=n(nE,` loss. If this is your case then you will want to use the full fp32 mode, by explicitly disabling the otherwise default fp16 mixed precision mode with:`),nE.forEach(t),j2=c(e),d(Ll.$$.fragment,e),w2=c(e),Ta=l(e,"P",{});var oE=r(Ta);SO=n(oE,`If you\u2019re using the Ampere-architecture based GPU, pytorch version 1.7 and higher will automatically switch to using the much more efficient tf32 format for some operations, but the results will still be in fp32. For details and benchmarks, please, see `),Zl=l(oE,"A",{href:!0,rel:!0});var SH=r(Zl);CO=n(SH,"TensorFloat-32(TF32) on Ampere devices"),SH.forEach(t),xO=n(oE,`. The document includes instructions on how to disable this automatic conversion if for some reason you prefer not to use it.`),oE.forEach(t),y2=c(e),ue=l(e,"P",{});var Ln=r(ue);RO=n(Ln,"With the \u{1F917} Trainer you can use "),M_=l(Ln,"CODE",{});var CH=r(M_);IO=n(CH,"--tf32"),CH.forEach(t),UO=n(Ln," to enable it, or disable it with "),L_=l(Ln,"CODE",{});var xH=r(L_);GO=n(xH,"--tf32 0"),xH.forEach(t),MO=n(Ln," or "),Z_=l(Ln,"CODE",{});var RH=r(Z_);LO=n(RH,"--no_tf32"),RH.forEach(t),ZO=n(Ln,". By default the PyTorch default is used."),Ln.forEach(t),g2=c(e),hu=l(e,"A",{id:!0}),r(hu).forEach(t),b2=c(e),Ws=l(e,"H3",{class:!0});var lE=r(Ws);Sa=l(lE,"A",{id:!0,class:!0,href:!0});var IH=r(Sa);N_=l(IH,"SPAN",{});var UH=r(N_);d(Nl.$$.fragment,UH),UH.forEach(t),IH.forEach(t),NO=c(lE),H_=l(lE,"SPAN",{});var GH=r(H_);HO=n(GH,"Automatic Mixed Precision"),GH.forEach(t),lE.forEach(t),q2=c(e),fu=l(e,"P",{});var MH=r(fu);BO=n(MH,"You can use automatic mixed precision with either a pytorch-like AMP way or the apex-like way:"),MH.forEach(t),E2=c(e),Fs=l(e,"H3",{class:!0});var rE=r(Fs);Ca=l(rE,"A",{id:!0,class:!0,href:!0});var LH=r(Ca);B_=l(LH,"SPAN",{});var ZH=r(B_);d(Hl.$$.fragment,ZH),ZH.forEach(t),LH.forEach(t),WO=c(rE),W_=l(rE,"SPAN",{});var NH=r(W_);FO=n(NH,"fp16"),NH.forEach(t),rE.forEach(t),$2=c(e),du=l(e,"P",{});var HH=r(du);VO=n(HH,"To configure pytorch AMP-like mode with fp16 (float16) set:"),HH.forEach(t),k2=c(e),d(Bl.$$.fragment,e),P2=c(e),os=l(e,"P",{});var bh=r(os);YO=n(bh,"and the "),mu=l(bh,"A",{href:!0});var BH=r(mu);KO=n(BH,"Trainer"),BH.forEach(t),JO=n(bh,` will automatically enable or disable it based on the value of `),F_=l(bh,"CODE",{});var WH=r(F_);XO=n(WH,"args.fp16_backend"),WH.forEach(t),QO=n(bh,". The rest of config values are up to you."),bh.forEach(t),z2=c(e),ls=l(e,"P",{});var qh=r(ls);eA=n(qh,"This mode gets enabled when "),V_=l(qh,"CODE",{});var FH=r(V_);sA=n(FH,"--fp16 --fp16_backend amp"),FH.forEach(t),tA=n(qh," or "),Y_=l(qh,"CODE",{});var VH=r(Y_);aA=n(VH,"--fp16_full_eval"),VH.forEach(t),nA=n(qh," command line args are passed."),qh.forEach(t),D2=c(e),_u=l(e,"P",{});var YH=r(_u);oA=n(YH,"You can also enable/disable this mode explicitly:"),YH.forEach(t),O2=c(e),d(Wl.$$.fragment,e),A2=c(e),xa=l(e,"P",{});var pE=r(xa);lA=n(pE,"But then you\u2019re on your own synchronizing the "),vu=l(pE,"A",{href:!0});var KH=r(vu);rA=n(KH,"Trainer"),KH.forEach(t),pA=n(pE,` command line arguments and the DeepSpeed configuration.`),pE.forEach(t),T2=c(e),Ra=l(e,"P",{});var iE=r(Ra);iA=n(iE,"Here is the "),Fl=l(iE,"A",{href:!0,rel:!0});var JH=r(Fl);uA=n(JH,"documentation"),JH.forEach(t),cA=n(iE,"."),iE.forEach(t),S2=c(e),Vs=l(e,"H3",{class:!0});var uE=r(Vs);Ia=l(uE,"A",{id:!0,class:!0,href:!0});var XH=r(Ia);K_=l(XH,"SPAN",{});var QH=r(K_);d(Vl.$$.fragment,QH),QH.forEach(t),XH.forEach(t),hA=c(uE),J_=l(uE,"SPAN",{});var eB=r(J_);fA=n(eB,"bf16"),eB.forEach(t),uE.forEach(t),C2=c(e),ju=l(e,"P",{});var sB=r(ju);dA=n(sB,"If bf16 (bfloat16) is desired instead of fp16 then the following configuration section is to be used:"),sB.forEach(t),x2=c(e),d(Yl.$$.fragment,e),R2=c(e),wu=l(e,"P",{});var tB=r(wu);mA=n(tB,"bf16 has the same dynamic range as fp32 and thus doesn\u2019t require loss scaling."),tB.forEach(t),I2=c(e),rs=l(e,"P",{});var Eh=r(rs);_A=n(Eh,"This mode gets enabled when "),X_=l(Eh,"CODE",{});var aB=r(X_);vA=n(aB,"--bf16"),aB.forEach(t),jA=n(Eh," or "),Q_=l(Eh,"CODE",{});var nB=r(Q_);wA=n(nB,"--bf16_full_eval"),nB.forEach(t),yA=n(Eh," command line args are passed."),Eh.forEach(t),U2=c(e),yu=l(e,"P",{});var oB=r(yu);gA=n(oB,"You can also enable/disable this mode explicitly:"),oB.forEach(t),G2=c(e),d(Kl.$$.fragment,e),M2=c(e),d(Ua.$$.fragment,e),L2=c(e),Ys=l(e,"H3",{class:!0});var cE=r(Ys);Ga=l(cE,"A",{id:!0,class:!0,href:!0});var lB=r(Ga);e1=l(lB,"SPAN",{});var rB=r(e1);d(Jl.$$.fragment,rB),rB.forEach(t),lB.forEach(t),bA=c(cE),s1=l(cE,"SPAN",{});var pB=r(s1);qA=n(pB,"apex"),pB.forEach(t),cE.forEach(t),Z2=c(e),gu=l(e,"P",{});var iB=r(gu);EA=n(iB,"To configure apex AMP-like mode set:"),iB.forEach(t),N2=c(e),d(Xl.$$.fragment,e),H2=c(e),ce=l(e,"P",{});var Zn=r(ce);$A=n(Zn,"and the "),bu=l(Zn,"A",{href:!0});var uB=r(bu);kA=n(uB,"Trainer"),uB.forEach(t),PA=n(Zn," will automatically configure it based on the values of "),t1=l(Zn,"CODE",{});var cB=r(t1);zA=n(cB,"args.fp16_backend"),cB.forEach(t),DA=n(Zn,` and `),a1=l(Zn,"CODE",{});var hB=r(a1);OA=n(hB,"args.fp16_opt_level"),hB.forEach(t),AA=n(Zn,"."),Zn.forEach(t),B2=c(e),Ma=l(e,"P",{});var hE=r(Ma);TA=n(hE,"This mode gets enabled when "),n1=l(hE,"CODE",{});var fB=r(n1);SA=n(fB,"--fp16 --fp16_backend apex --fp16_opt_level 01"),fB.forEach(t),CA=n(hE," command line args are passed."),hE.forEach(t),W2=c(e),qu=l(e,"P",{});var dB=r(qu);xA=n(dB,"You can also configure this mode explicitly:"),dB.forEach(t),F2=c(e),d(Ql.$$.fragment,e),V2=c(e),La=l(e,"P",{});var fE=r(La);RA=n(fE,"But then you\u2019re on your own synchronizing the "),Eu=l(fE,"A",{href:!0});var mB=r(Eu);IA=n(mB,"Trainer"),mB.forEach(t),UA=n(fE,` command line arguments and the DeepSpeed configuration.`),fE.forEach(t),Y2=c(e),Za=l(e,"P",{});var dE=r(Za);GA=n(dE,"Here is the "),er=l(dE,"A",{href:!0,rel:!0});var _B=r(er);MA=n(_B,"documentation"),_B.forEach(t),LA=n(dE,"."),dE.forEach(t),K2=c(e),$u=l(e,"A",{id:!0}),r($u).forEach(t),J2=c(e),Ks=l(e,"H3",{class:!0});var mE=r(Ks);Na=l(mE,"A",{id:!0,class:!0,href:!0});var vB=r(Na);o1=l(vB,"SPAN",{});var jB=r(o1);d(sr.$$.fragment,jB),jB.forEach(t),vB.forEach(t),ZA=c(mE),l1=l(mE,"SPAN",{});var wB=r(l1);NA=n(wB,"Batch Size"),wB.forEach(t),mE.forEach(t),X2=c(e),ku=l(e,"P",{});var yB=r(ku);HA=n(yB,"To configure batch size, use:"),yB.forEach(t),Q2=c(e),d(tr.$$.fragment,e),eb=c(e),Z=l(e,"P",{});var qe=r(Z);BA=n(qe,"and the "),Pu=l(qe,"A",{href:!0});var gB=r(Pu);WA=n(gB,"Trainer"),gB.forEach(t),FA=n(qe," will automatically set "),r1=l(qe,"CODE",{});var bB=r(r1);VA=n(bB,"train_micro_batch_size_per_gpu"),bB.forEach(t),YA=n(qe,` to the value of `),p1=l(qe,"CODE",{});var qB=r(p1);KA=n(qB,"args.per_device_train_batch_size"),qB.forEach(t),JA=n(qe," and "),i1=l(qe,"CODE",{});var EB=r(i1);XA=n(EB,"train_batch_size"),EB.forEach(t),QA=n(qe," to "),u1=l(qe,"CODE",{});var $B=r(u1);eT=n($B,"args.world_size * args.per_device_train_batch_size * args.gradient_accumulation_steps"),$B.forEach(t),sT=n(qe,"."),qe.forEach(t),sb=c(e),zu=l(e,"P",{});var kB=r(zu);tT=n(kB,"You can also set the values explicitly:"),kB.forEach(t),tb=c(e),d(ar.$$.fragment,e),ab=c(e),Ha=l(e,"P",{});var _E=r(Ha);aT=n(_E,"But then you\u2019re on your own synchronizing the "),Du=l(_E,"A",{href:!0});var PB=r(Du);nT=n(PB,"Trainer"),PB.forEach(t),oT=n(_E,` command line arguments and the DeepSpeed configuration.`),_E.forEach(t),nb=c(e),Ou=l(e,"A",{id:!0}),r(Ou).forEach(t),ob=c(e),Js=l(e,"H3",{class:!0});var vE=r(Js);Ba=l(vE,"A",{id:!0,class:!0,href:!0});var zB=r(Ba);c1=l(zB,"SPAN",{});var DB=r(c1);d(nr.$$.fragment,DB),DB.forEach(t),zB.forEach(t),lT=c(vE),h1=l(vE,"SPAN",{});var OB=r(h1);rT=n(OB,"Gradient Accumulation"),OB.forEach(t),vE.forEach(t),lb=c(e),Au=l(e,"P",{});var AB=r(Au);pT=n(AB,"To configure gradient accumulation set:"),AB.forEach(t),rb=c(e),d(or.$$.fragment,e),pb=c(e),ps=l(e,"P",{});var $h=r(ps);iT=n($h,"and the "),Tu=l($h,"A",{href:!0});var TB=r(Tu);uT=n(TB,"Trainer"),TB.forEach(t),cT=n($h," will automatically set it to the value of "),f1=l($h,"CODE",{});var SB=r(f1);hT=n(SB,"args.gradient_accumulation_steps"),SB.forEach(t),fT=n($h,"."),$h.forEach(t),ib=c(e),Su=l(e,"P",{});var CB=r(Su);dT=n(CB,"You can also set the value explicitly:"),CB.forEach(t),ub=c(e),d(lr.$$.fragment,e),cb=c(e),Wa=l(e,"P",{});var jE=r(Wa);mT=n(jE,"But then you\u2019re on your own synchronizing the "),Cu=l(jE,"A",{href:!0});var xB=r(Cu);_T=n(xB,"Trainer"),xB.forEach(t),vT=n(jE,` command line arguments and the DeepSpeed configuration.`),jE.forEach(t),hb=c(e),xu=l(e,"A",{id:!0}),r(xu).forEach(t),fb=c(e),Xs=l(e,"H3",{class:!0});var wE=r(Xs);Fa=l(wE,"A",{id:!0,class:!0,href:!0});var RB=r(Fa);d1=l(RB,"SPAN",{});var IB=r(d1);d(rr.$$.fragment,IB),IB.forEach(t),RB.forEach(t),jT=c(wE),m1=l(wE,"SPAN",{});var UB=r(m1);wT=n(UB,"Gradient Clipping"),UB.forEach(t),wE.forEach(t),db=c(e),Ru=l(e,"P",{});var GB=r(Ru);yT=n(GB,"To configure gradient gradient clipping set:"),GB.forEach(t),mb=c(e),d(pr.$$.fragment,e),_b=c(e),is=l(e,"P",{});var kh=r(is);gT=n(kh,"and the "),Iu=l(kh,"A",{href:!0});var MB=r(Iu);bT=n(MB,"Trainer"),MB.forEach(t),qT=n(kh," will automatically set it to the value of "),_1=l(kh,"CODE",{});var LB=r(_1);ET=n(LB,"args.max_grad_norm"),LB.forEach(t),$T=n(kh,"."),kh.forEach(t),vb=c(e),Uu=l(e,"P",{});var ZB=r(Uu);kT=n(ZB,"You can also set the value explicitly:"),ZB.forEach(t),jb=c(e),d(ir.$$.fragment,e),wb=c(e),Va=l(e,"P",{});var yE=r(Va);PT=n(yE,"But then you\u2019re on your own synchronizing the "),Gu=l(yE,"A",{href:!0});var NB=r(Gu);zT=n(NB,"Trainer"),NB.forEach(t),DT=n(yE,` command line arguments and the DeepSpeed configuration.`),yE.forEach(t),yb=c(e),Mu=l(e,"A",{id:!0}),r(Mu).forEach(t),gb=c(e),Qs=l(e,"H3",{class:!0});var gE=r(Qs);Ya=l(gE,"A",{id:!0,class:!0,href:!0});var HB=r(Ya);v1=l(HB,"SPAN",{});var BB=r(v1);d(ur.$$.fragment,BB),BB.forEach(t),HB.forEach(t),OT=c(gE),j1=l(gE,"SPAN",{});var WB=r(j1);AT=n(WB,"Getting The Model Weights Out"),WB.forEach(t),gE.forEach(t),bb=c(e),Ka=l(e,"P",{});var bE=r(Ka);TT=n(bE,`As long as you continue training and resuming using DeepSpeed you don\u2019t need to worry about anything. DeepSpeed stores fp32 master weights in its custom checkpoint optimizer files, which are `),w1=l(bE,"CODE",{});var FB=r(w1);ST=n(FB,"global_step*/*optim_states.pt"),FB.forEach(t),CT=n(bE,` (this is glob pattern), and are saved under the normal checkpoint.`),bE.forEach(t),qb=c(e),Lu=l(e,"P",{});var VB=r(Lu);y1=l(VB,"STRONG",{});var YB=r(y1);xT=n(YB,"FP16 Weights:"),YB.forEach(t),VB.forEach(t),Eb=c(e),Ja=l(e,"P",{});var qE=r(Ja);RT=n(qE,"When a model is saved under ZeRO-2, you end up having the normal "),g1=l(qE,"CODE",{});var KB=r(g1);IT=n(KB,"pytorch_model.bin"),KB.forEach(t),UT=n(qE,` file with the model weights, but they are only the fp16 version of the weights.`),qE.forEach(t),$b=c(e),O=l(e,"P",{});var J=r(O);GT=n(J,`Under ZeRO-3, things are much more complicated, since the model weights are partitioned out over multiple GPUs, therefore `),b1=l(J,"CODE",{});var JB=r(b1);MT=n(JB,'"stage3_gather_16bit_weights_on_model_save": true'),JB.forEach(t),LT=n(J," is required to get the "),q1=l(J,"CODE",{});var XB=r(q1);ZT=n(XB,"Trainer"),XB.forEach(t),NT=n(J,` to save the fp16 version of the weights. If this setting is `),E1=l(J,"CODE",{});var QB=r(E1);HT=n(QB,"False"),QB.forEach(t),BT=c(J),$1=l(J,"CODE",{});var eW=r($1);WT=n(eW,"pytorch_model.bin"),eW.forEach(t),FT=n(J," won\u2019t be created. This is because by default DeepSpeed\u2019s "),k1=l(J,"CODE",{});var sW=r(k1);VT=n(sW,"state_dict"),sW.forEach(t),YT=n(J," contains a placeholder and not the real weights. If we were to save this "),P1=l(J,"CODE",{});var tW=r(P1);KT=n(tW,"state_dict"),tW.forEach(t),JT=n(J," it won\u2019t be possible to load it back."),J.forEach(t),kb=c(e),d(cr.$$.fragment,e),Pb=c(e),Zu=l(e,"P",{});var aW=r(Zu);z1=l(aW,"STRONG",{});var nW=r(z1);XT=n(nW,"FP32 Weights:"),nW.forEach(t),aW.forEach(t),zb=c(e),Xa=l(e,"P",{});var EE=r(Xa);QT=n(EE,`While the fp16 weights are fine for resuming training, if you finished finetuning your model and want to upload it to the `),hr=l(EE,"A",{href:!0,rel:!0});var oW=r(hr);eS=n(oW,"models hub"),oW.forEach(t),sS=n(EE,` or pass it to someone else you most likely will want to get the fp32 weights. This ideally shouldn\u2019t be done during training since this is a process that requires a lot of memory, and therefore best to be performed offline after the training is complete. But if desired and you have plenty of free CPU memory it can be done in the same training script. The following sections will discuss both approaches.`),EE.forEach(t),Db=c(e),Nu=l(e,"P",{});var lW=r(Nu);D1=l(lW,"STRONG",{});var rW=r(D1);tS=n(rW,"Live FP32 Weights Recovery:"),rW.forEach(t),lW.forEach(t),Ob=c(e),Hu=l(e,"P",{});var pW=r(Hu);aS=n(pW,"This approach may not work if you model is large and you have little free CPU memory left, at the end of the training."),pW.forEach(t),Ab=c(e),Bu=l(e,"P",{});var iW=r(Bu);nS=n(iW,"If you have saved at least one checkpoint, and you want to use the latest one, you can do the following:"),iW.forEach(t),Tb=c(e),d(fr.$$.fragment,e),Sb=c(e),us=l(e,"P",{});var Ph=r(us);oS=n(Ph,"If you\u2019re using the "),O1=l(Ph,"CODE",{});var uW=r(O1);lS=n(uW,"--load_best_model_at_end"),uW.forEach(t),rS=n(Ph," class:"),A1=l(Ph,"EM",{});var cW=r(A1);pS=n(cW,"~transformers.TrainingArguments"),cW.forEach(t),iS=n(Ph,` argument (to track the best checkpoint), then you can finish the training by first saving the final model explicitly and then do the same as above:`),Ph.forEach(t),Cb=c(e),d(dr.$$.fragment,e),xb=c(e),d(Qa.$$.fragment,e),Rb=c(e),en=l(e,"P",{});var $E=r(en);uS=n($E,"Of course, you don\u2019t have to use class:"),T1=l($E,"EM",{});var hW=r(T1);cS=n(hW,"~transformers.Trainer"),hW.forEach(t),hS=n($E,` and you can adjust the examples above to your own trainer.`),$E.forEach(t),Ib=c(e),sn=l(e,"P",{});var kE=r(sn);fS=n(kE,"If for some reason you want more refinement, you can also extract the fp32 "),S1=l(kE,"CODE",{});var fW=r(S1);dS=n(fW,"state_dict"),fW.forEach(t),mS=n(kE,` of the weights and apply these yourself as is shown in the following example:`),kE.forEach(t),Ub=c(e),d(mr.$$.fragment,e),Gb=c(e),Wu=l(e,"P",{});var dW=r(Wu);C1=l(dW,"STRONG",{});var mW=r(C1);_S=n(mW,"Offline FP32 Weights Recovery:"),mW.forEach(t),dW.forEach(t),Mb=c(e),cs=l(e,"P",{});var zh=r(cs);vS=n(zh,"DeepSpeed creates a special conversion script "),x1=l(zh,"CODE",{});var _W=r(x1);jS=n(_W,"zero_to_fp32.py"),_W.forEach(t),wS=n(zh,` which it places in the top-level of the checkpoint folder. Using this script you can extract the weights at any point. The script is standalone and you no longer need to have the configuration file or a `),R1=l(zh,"CODE",{});var vW=r(R1);yS=n(vW,"Trainer"),vW.forEach(t),gS=n(zh," to do the extraction."),zh.forEach(t),Lb=c(e),Fu=l(e,"P",{});var jW=r(Fu);bS=n(jW,"Let\u2019s say your checkpoint folder looks like this:"),jW.forEach(t),Zb=c(e),d(_r.$$.fragment,e),Nb=c(e),tn=l(e,"P",{});var PE=r(tn);qS=n(PE,"In this example there is just one DeepSpeed checkpoint sub-folder "),I1=l(PE,"EM",{});var wW=r(I1);ES=n(wW,"global_step1"),wW.forEach(t),$S=n(PE,`. Therefore to reconstruct the fp32 weights just run:`),PE.forEach(t),Hb=c(e),d(vr.$$.fragment,e),Bb=c(e),an=l(e,"P",{});var zE=r(an);kS=n(zE,"This is it. "),U1=l(zE,"CODE",{});var yW=r(U1);PS=n(yW,"pytorch_model.bin"),yW.forEach(t),zS=n(zE," will now contain the full fp32 model weights consolidated from multiple GPUs."),zE.forEach(t),Wb=c(e),Vu=l(e,"P",{});var gW=r(Vu);DS=n(gW,"The script will automatically be able to handle either a ZeRO-2 or ZeRO-3 checkpoint."),gW.forEach(t),Fb=c(e),jr=l(e,"P",{});var vU=r(jr);G1=l(vU,"CODE",{});var bW=r(G1);OS=n(bW,"python zero_to_fp32.py -h"),bW.forEach(t),AS=n(vU," will give you usage details."),vU.forEach(t),Vb=c(e),hs=l(e,"P",{});var Dh=r(hs);TS=n(Dh,"The script will auto-discover the deepspeed sub-folder using the contents of the file "),M1=l(Dh,"CODE",{});var qW=r(M1);SS=n(qW,"latest"),qW.forEach(t),CS=n(Dh,`, which in the current example will contain `),L1=l(Dh,"CODE",{});var EW=r(L1);xS=n(EW,"global_step1"),EW.forEach(t),RS=n(Dh,"."),Dh.forEach(t),Yb=c(e),Yu=l(e,"P",{});var $W=r(Yu);IS=n($W,"Note: currently the script requires 2x general RAM of the final fp32 model weights."),$W.forEach(t),Kb=c(e),et=l(e,"H3",{class:!0});var DE=r(et);nn=l(DE,"A",{id:!0,class:!0,href:!0});var kW=r(nn);Z1=l(kW,"SPAN",{});var PW=r(Z1);d(wr.$$.fragment,PW),PW.forEach(t),kW.forEach(t),US=c(DE),N1=l(DE,"SPAN",{});var zW=r(N1);GS=n(zW,"ZeRO-3 and Infinity Nuances"),zW.forEach(t),DE.forEach(t),Jb=c(e),Ku=l(e,"P",{});var DW=r(Ku);MS=n(DW,"ZeRO-3 is quite different from ZeRO-2 because of its param sharding feature."),DW.forEach(t),Xb=c(e),Ju=l(e,"P",{});var OW=r(Ju);LS=n(OW,"ZeRO-Infinity further extends ZeRO-3 to support NVMe memory and multiple other speed and scalability improvements."),OW.forEach(t),Qb=c(e),Xu=l(e,"P",{});var AW=r(Xu);ZS=n(AW,`While all the efforts were made for things to just work without needing any special changes to your models, in certain circumstances you may find the following information to be needed.`),AW.forEach(t),e3=c(e),st=l(e,"H4",{class:!0});var OE=r(st);on=l(OE,"A",{id:!0,class:!0,href:!0});var TW=r(on);H1=l(TW,"SPAN",{});var SW=r(H1);d(yr.$$.fragment,SW),SW.forEach(t),TW.forEach(t),NS=c(OE),B1=l(OE,"SPAN",{});var CW=r(B1);HS=n(CW,"Constructing Massive Models"),CW.forEach(t),OE.forEach(t),s3=c(e),ln=l(e,"P",{});var AE=r(ln);BS=n(AE,`DeepSpeed/ZeRO-3 can handle models with Trillions of parameters which may not fit onto the existing RAM. In such cases, but also if you want the initialization to happen much faster, initialize the model using `),W1=l(AE,"EM",{});var xW=r(W1);WS=n(xW,"deepspeed.zero.Init()"),xW.forEach(t),FS=n(AE,` context manager (which is also a function decorator), like so:`),AE.forEach(t),t3=c(e),d(gr.$$.fragment,e),a3=c(e),Qu=l(e,"P",{});var RW=r(Qu);VS=n(RW,"As you can see this gives you a randomly initialized model."),RW.forEach(t),n3=c(e),P=l(e,"P",{});var B=r(P);YS=n(B,"If you want to use a pretrained model, "),F1=l(B,"CODE",{});var IW=r(F1);KS=n(IW,"model_class.from_pretrained"),IW.forEach(t),JS=n(B,` will activate this feature as long as `),V1=l(B,"CODE",{});var UW=r(V1);XS=n(UW,"is_deepspeed_zero3_enabled()"),UW.forEach(t),QS=n(B," returns "),Y1=l(B,"CODE",{});var GW=r(Y1);eC=n(GW,"True"),GW.forEach(t),sC=n(B,`, which currently is setup by the `),ec=l(B,"A",{href:!0});var MW=r(ec);tC=n(MW,"TrainingArguments"),MW.forEach(t),aC=n(B,` object if the passed DeepSpeed configuration file contains ZeRO-3 config section. Thus you must create the `),sc=l(B,"A",{href:!0});var LW=r(sc);nC=n(LW,"TrainingArguments"),LW.forEach(t),oC=n(B," object "),K1=l(B,"STRONG",{});var ZW=r(K1);lC=n(ZW,"before"),ZW.forEach(t),rC=n(B,` calling `),J1=l(B,"CODE",{});var NW=r(J1);pC=n(NW,"from_pretrained"),NW.forEach(t),iC=n(B,". Here is an example of a possible sequence:"),B.forEach(t),o3=c(e),d(br.$$.fragment,e),l3=c(e),rn=l(e,"P",{});var TE=r(rn);uC=n(TE,"If you\u2019re using the official example scripts and your command line arguments include "),X1=l(TE,"CODE",{});var HW=r(X1);cC=n(HW,"--deepspeed ds_config.json"),HW.forEach(t),hC=n(TE,` with ZeRO-3 config enabled, then everything is already done for you, since this is how example scripts are written.`),TE.forEach(t),r3=c(e),tc=l(e,"P",{});var BW=r(tc);fC=n(BW,"Note: If the fp16 weights of the model can\u2019t fit onto the memory of a single GPU this feature must be used."),BW.forEach(t),p3=c(e),pn=l(e,"P",{});var SE=r(pn);dC=n(SE,"For full details on this method and other related features please refer to "),qr=l(SE,"A",{href:!0,rel:!0});var WW=r(qr);mC=n(WW,"Constructing Massive Models"),WW.forEach(t),_C=n(SE,"."),SE.forEach(t),i3=c(e),he=l(e,"P",{});var Nn=r(he);vC=n(Nn,"Also when loading fp16-pretrained models, you will want to tell "),Q1=l(Nn,"CODE",{});var FW=r(Q1);jC=n(FW,"from_pretrained"),FW.forEach(t),wC=n(Nn,` to use `),ev=l(Nn,"CODE",{});var VW=r(ev);yC=n(VW,"torch_dtype=torch.float16"),VW.forEach(t),gC=n(Nn,". For details, please, see "),ac=l(Nn,"A",{href:!0});var YW=r(ac);bC=n(YW,"from_pretrained-torch-dtype"),YW.forEach(t),qC=n(Nn,"."),Nn.forEach(t),u3=c(e),tt=l(e,"H4",{class:!0});var CE=r(tt);un=l(CE,"A",{id:!0,class:!0,href:!0});var KW=r(un);sv=l(KW,"SPAN",{});var JW=r(sv);d(Er.$$.fragment,JW),JW.forEach(t),KW.forEach(t),EC=c(CE),tv=l(CE,"SPAN",{});var XW=r(tv);$C=n(XW,"Gathering Parameters"),XW.forEach(t),CE.forEach(t),c3=c(e),$r=l(e,"P",{});var jU=r($r);kC=n(jU,`Under ZeRO-3 on multiple GPUs no single GPU has all the parameters unless it\u2019s the parameters for the currently executing layer. So if you need to access all parameters from all layers at once there is a specific method to do it. Most likely you won\u2019t need it, but if you do please refer to `),kr=l(jU,"A",{href:!0,rel:!0});var QW=r(kr);PC=n(QW,"Gathering Parameters"),QW.forEach(t),jU.forEach(t),h3=c(e),cn=l(e,"P",{});var xE=r(cn);zC=n(xE,`We do however use it internally in several places, one such example is when loading pretrained model weights in `),av=l(xE,"CODE",{});var eF=r(av);DC=n(eF,"from_pretrained"),eF.forEach(t),OC=n(xE,`. We load one layer at a time and immediately partition it to all participating GPUs, as for very large models it won\u2019t be possible to load it on one GPU and then spread it out to multiple GPUs, due to memory limitations.`),xE.forEach(t),f3=c(e),nc=l(e,"P",{});var sF=r(nc);AC=n(sF,"Also under ZeRO-3, if you write your own code and run into a model parameter weight that looks like:"),sF.forEach(t),d3=c(e),d(Pr.$$.fragment,e),m3=c(e),fs=l(e,"P",{});var Oh=r(fs);TC=n(Oh,"stress on "),nv=l(Oh,"CODE",{});var tF=r(nv);SC=n(tF,"tensor([1.])"),tF.forEach(t),CC=n(Oh,", or if you get an error where it says the parameter is of size "),ov=l(Oh,"CODE",{});var aF=r(ov);xC=n(aF,"1"),aF.forEach(t),RC=n(Oh,`, instead of some much larger multi-dimensional shape, this means that the parameter is partitioned and what you see is a ZeRO-3 placeholder.`),Oh.forEach(t),_3=c(e),oc=l(e,"A",{id:!0}),r(oc).forEach(t),v3=c(e),at=l(e,"H3",{class:!0});var RE=r(at);hn=l(RE,"A",{id:!0,class:!0,href:!0});var nF=r(hn);lv=l(nF,"SPAN",{});var oF=r(lv);d(zr.$$.fragment,oF),oF.forEach(t),nF.forEach(t),IC=c(RE),rv=l(RE,"SPAN",{});var lF=r(rv);UC=n(lF,"ZeRO Inference"),lF.forEach(t),RE.forEach(t),j3=c(e),lc=l(e,"P",{});var rF=r(lc);GC=n(rF,`ZeRO Inference uses the same config as ZeRO-3 Training. You just don\u2019t need the optimizer and scheduler sections. In fact you can leave these in the config file if you want to share the same one with the training. They will just be ignored.`),rF.forEach(t),w3=c(e),fn=l(e,"P",{});var IE=r(fn);MC=n(IE,"Otherwise you just need to pass the usual "),rc=l(IE,"A",{href:!0});var pF=r(rc);LC=n(pF,"TrainingArguments"),pF.forEach(t),ZC=n(IE," arguments. For example:"),IE.forEach(t),y3=c(e),d(Dr.$$.fragment,e),g3=c(e),pc=l(e,"P",{});var iF=r(pc);NC=n(iF,`The only important thing is that you need to use a ZeRO-3 configuration, since ZeRO-2 provides no benefit whatsoever for the inference as only ZeRO-3 performs sharding of parameters, whereas ZeRO-1 shards gradients and optimizer states.`),iF.forEach(t),b3=c(e),dn=l(e,"P",{});var UE=r(dn);HC=n(UE,"Here is an example of running "),pv=l(UE,"CODE",{});var uF=r(pv);BC=n(uF,"run_translation.py"),uF.forEach(t),WC=n(UE," under DeepSpeed deploying all available GPUs:"),UE.forEach(t),q3=c(e),d(Or.$$.fragment,e),E3=c(e),ic=l(e,"P",{});var cF=r(ic);FC=n(cF,`Since for inference there is no need for additional large memory used by the optimizer states and the gradients you should be able to fit much larger batches and/or sequence length onto the same hardware.`),cF.forEach(t),$3=c(e),uc=l(e,"P",{});var hF=r(uc);VC=n(hF,`Additionally DeepSpeed is currently developing a related product called Deepspeed-Inference which has no relationship to the ZeRO technology, but instead uses tensor parallelism to scale models that can\u2019t fit onto a single GPU. This is a work in progress and we will provide the integration once that product is complete.`),hF.forEach(t),k3=c(e),nt=l(e,"H3",{class:!0});var GE=r(nt);mn=l(GE,"A",{id:!0,class:!0,href:!0});var fF=r(mn);iv=l(fF,"SPAN",{});var dF=r(iv);d(Ar.$$.fragment,dF),dF.forEach(t),fF.forEach(t),YC=c(GE),uv=l(GE,"SPAN",{});var mF=r(uv);KC=n(mF,"Memory Requirements"),mF.forEach(t),GE.forEach(t),P3=c(e),cc=l(e,"P",{});var _F=r(cc);JC=n(_F,"Since Deepspeed ZeRO can offload memory to CPU (and NVMe) the framework provides utils that allow one to tell how much CPU and GPU memory will be needed depending on the number of GPUs being used."),_F.forEach(t),z3=c(e),hc=l(e,"P",{});var vF=r(hc);XC=n(vF,"Let\u2019s estimate how much memory is needed to finetune \u201Cbigscience/T0_3B\u201D on a single GPU:"),vF.forEach(t),D3=c(e),d(Tr.$$.fragment,e),O3=c(e),fc=l(e,"P",{});var jF=r(fc);QC=n(jF,"So you can fit it on a single 80GB GPU and no CPU offload, or a tiny 8GB GPU but then need ~60GB of CPU memory. (Remember this is just the memory for params, optimizer states and gradients - you will need a bit more memory for cuda kernels, activations and temps.)"),jF.forEach(t),A3=c(e),dc=l(e,"P",{});var wF=r(dc);ex=n(wF,"Then it\u2019s a tradeoff of cost vs speed. It\u2019ll be cheaper to buy/rent a smaller GPU (or less GPUs since you can use multiple GPUs with Deepspeed ZeRO. But then it\u2019ll be slower, so even if you don\u2019t care about how fast something will be done, the slowdown has a direct impact on the duration of using the GPU and thus bigger cost. So experiment and compare which works the best."),wF.forEach(t),T3=c(e),mc=l(e,"P",{});var yF=r(mc);sx=n(yF,"If you have enough GPU memory make sure to disable the CPU/NVMe offload as it\u2019ll make everything faster."),yF.forEach(t),S3=c(e),_c=l(e,"P",{});var gF=r(_c);tx=n(gF,"For example, let\u2019s repeat the same for 2 GPUs:"),gF.forEach(t),C3=c(e),d(Sr.$$.fragment,e),x3=c(e),vc=l(e,"P",{});var bF=r(vc);ax=n(bF,"So here you\u2019d want 2x 32GB GPUs or higher without offloading to CPU."),bF.forEach(t),R3=c(e),_n=l(e,"P",{});var ME=r(_n);nx=n(ME,"For full information please see "),Cr=l(ME,"A",{href:!0,rel:!0});var qF=r(Cr);ox=n(qF,"memory estimators"),qF.forEach(t),lx=n(ME,"."),ME.forEach(t),I3=c(e),ot=l(e,"H3",{class:!0});var LE=r(ot);vn=l(LE,"A",{id:!0,class:!0,href:!0});var EF=r(vn);cv=l(EF,"SPAN",{});var $F=r(cv);d(xr.$$.fragment,$F),$F.forEach(t),EF.forEach(t),rx=c(LE),hv=l(LE,"SPAN",{});var kF=r(hv);px=n(kF,"Filing Issues"),kF.forEach(t),LE.forEach(t),U3=c(e),jc=l(e,"P",{});var PF=r(jc);ix=n(PF,"Here is how to file an issue so that we could quickly get to the bottom of the issue and help you to unblock your work."),PF.forEach(t),G3=c(e),wc=l(e,"P",{});var zF=r(wc);ux=n(zF,"In your report please always include:"),zF.forEach(t),M3=c(e),N=l(e,"OL",{});var Ee=r(N);fv=l(Ee,"LI",{});var DF=r(fv);dv=l(DF,"P",{});var OF=r(dv);cx=n(OF,"the full Deepspeed config file in the report"),OF.forEach(t),DF.forEach(t),hx=c(Ee),mv=l(Ee,"LI",{});var AF=r(mv);Oe=l(AF,"P",{});var Hn=r(Oe);fx=n(Hn,"either the command line arguments if you were using the "),yc=l(Hn,"A",{href:!0});var TF=r(yc);dx=n(TF,"Trainer"),TF.forEach(t),mx=n(Hn,` or `),gc=l(Hn,"A",{href:!0});var SF=r(gc);_x=n(SF,"TrainingArguments"),SF.forEach(t),vx=n(Hn,` arguments if you were scripting the Trainer setup yourself. Please do not dump the `),bc=l(Hn,"A",{href:!0});var CF=r(bc);jx=n(CF,"TrainingArguments"),CF.forEach(t),wx=n(Hn," as it has dozens of entries that are irrelevant."),Hn.forEach(t),AF.forEach(t),yx=c(Ee),Rr=l(Ee,"LI",{});var ZE=r(Rr);_v=l(ZE,"P",{});var xF=r(_v);gx=n(xF,"Output of:"),xF.forEach(t),bx=c(ZE),d(Ir.$$.fragment,ZE),ZE.forEach(t),qx=c(Ee),vv=l(Ee,"LI",{});var RF=r(vv);Ur=l(RF,"P",{});var NE=r(Ur);Ex=n(NE,`If possible include a link to a Google Colab notebook that we can reproduce the problem with. You can use this `),Gr=l(NE,"A",{href:!0,rel:!0});var IF=r(Gr);$x=n(IF,"notebook"),IF.forEach(t),kx=n(NE,` as a starting point.`),NE.forEach(t),RF.forEach(t),Px=c(Ee),jv=l(Ee,"LI",{});var UF=r(jv);wv=l(UF,"P",{});var GF=r(wv);zx=n(GF,"Unless it\u2019s impossible please always use a standard dataset that we can use and not something custom."),GF.forEach(t),UF.forEach(t),Dx=c(Ee),yv=l(Ee,"LI",{});var MF=r(yv);Mr=l(MF,"P",{});var HE=r(Mr);Ox=n(HE,"If possible try to use one of the existing "),Lr=l(HE,"A",{href:!0,rel:!0});var LF=r(Lr);Ax=n(LF,"examples"),LF.forEach(t),Tx=n(HE," to reproduce the problem with."),HE.forEach(t),MF.forEach(t),Ee.forEach(t),L3=c(e),qc=l(e,"P",{});var ZF=r(qc);Sx=n(ZF,"Things to consider:"),ZF.forEach(t),Z3=c(e),jn=l(e,"UL",{});var BE=r(jn);lt=l(BE,"LI",{});var Ah=r(lt);gv=l(Ah,"P",{});var NF=r(gv);Cx=n(NF,"Deepspeed is often not the cause of the problem."),NF.forEach(t),xx=c(Ah),bv=l(Ah,"P",{});var HF=r(bv);Rx=n(HF,`Some of the filed issues proved to be Deepspeed-unrelated. That is once Deepspeed was removed from the setup, the problem was still there.`),HF.forEach(t),Ix=c(Ah),qv=l(Ah,"P",{});var BF=r(qv);Ux=n(BF,`Therefore, if it\u2019s not absolutely obvious it\u2019s a DeepSpeed-related problem, as in you can see that there is an exception and you can see that DeepSpeed modules are involved, first re-test your setup without DeepSpeed in it. And only if the problem persists then do mentioned Deepspeed and supply all the required details.`),BF.forEach(t),Ah.forEach(t),Gx=c(BE),Ev=l(BE,"LI",{});var WF=r(Ev);Zr=l(WF,"P",{});var WE=r(Zr);Mx=n(WE,`If it\u2019s clear to you that the issue is in the DeepSpeed core and not the integration part, please file the Issue directly with `),Nr=l(WE,"A",{href:!0,rel:!0});var FF=r(Nr);Lx=n(FF,"Deepspeed"),FF.forEach(t),Zx=n(WE,`. If you aren\u2019t sure, please do not worry, either Issue tracker will do, we will figure it out once you posted it and redirect you to another Issue tracker if need be.`),WE.forEach(t),WF.forEach(t),BE.forEach(t),N3=c(e),rt=l(e,"H3",{class:!0});var FE=r(rt);wn=l(FE,"A",{id:!0,class:!0,href:!0});var VF=r(wn);$v=l(VF,"SPAN",{});var YF=r($v);d(Hr.$$.fragment,YF),YF.forEach(t),VF.forEach(t),Nx=c(FE),kv=l(FE,"SPAN",{});var KF=r(kv);Hx=n(KF,"Troubleshooting"),KF.forEach(t),FE.forEach(t),H3=c(e),pt=l(e,"H4",{class:!0});var VE=r(pt);yn=l(VE,"A",{id:!0,class:!0,href:!0});var JF=r(yn);Pv=l(JF,"SPAN",{});var XF=r(Pv);d(Br.$$.fragment,XF),XF.forEach(t),JF.forEach(t),Bx=c(VE),Wr=l(VE,"SPAN",{});var YE=r(Wr);Wx=n(YE,"the "),zv=l(YE,"CODE",{});var QF=r(zv);Fx=n(QF,"deepspeed"),QF.forEach(t),Vx=n(YE," process gets killed at startup without a traceback"),YE.forEach(t),VE.forEach(t),B3=c(e),H=l(e,"P",{});var $e=r(H);Yx=n($e,"If the "),Dv=l($e,"CODE",{});var eV=r(Dv);Kx=n(eV,"deepspeed"),eV.forEach(t),Jx=n($e,` process gets killed at launch time without a traceback, that usually means that the program tried to allocate more CPU memory than your system has or your process is allowed to allocate and the OS kernel killed that process. This is because your configuration file most likely has either `),Ov=l($e,"CODE",{});var sV=r(Ov);Xx=n(sV,"offload_optimizer"),sV.forEach(t),Qx=n($e," or "),Av=l($e,"CODE",{});var tV=r(Av);eR=n(tV,"offload_param"),tV.forEach(t),sR=n($e,` or both configured to offload to `),Tv=l($e,"CODE",{});var aV=r(Tv);tR=n(aV,"cpu"),aV.forEach(t),aR=n($e,`. If you have NVMe, experiment with offloading to NVMe if you\u2019re running under ZeRO-3. Here is how you can `),Fr=l($e,"A",{href:!0,rel:!0});var nV=r(Fr);nR=n(nV,"estimate how much memory is needed for a specific model"),nV.forEach(t),oR=n($e,"."),$e.forEach(t),W3=c(e),it=l(e,"H4",{class:!0});var KE=r(it);gn=l(KE,"A",{id:!0,class:!0,href:!0});var oV=r(gn);Sv=l(oV,"SPAN",{});var lV=r(Sv);d(Vr.$$.fragment,lV),lV.forEach(t),oV.forEach(t),lR=c(KE),Ec=l(KE,"SPAN",{});var wU=r(Ec);rR=n(wU,"training and/or eval/predict loss is "),Cv=l(wU,"CODE",{});var rV=r(Cv);pR=n(rV,"NaN"),rV.forEach(t),wU.forEach(t),KE.forEach(t),F3=c(e),$c=l(e,"P",{});var pV=r($c);iR=n(pV,"This often happens when one takes a model pre-trained in bf16 mixed precision mode and tries to use it under fp16 (with or without mixed precision). Most models trained on TPU and often the ones released by Google are in this category (e.g. almost all t5-based models). Here the solution is to either use fp32 or bf16 if your hardware supports it (TPU, Ampere GPUs or newer)."),pV.forEach(t),V3=c(e),kc=l(e,"P",{});var iV=r(kc);uR=n(iV,"The other problem may have to do with using fp16. When you configure this section:"),iV.forEach(t),Y3=c(e),d(Yr.$$.fragment,e),K3=c(e),bn=l(e,"P",{});var JE=r(bn);cR=n(JE,"and you see in your log that Deepspeed reports "),xv=l(JE,"CODE",{});var uV=r(xv);hR=n(uV,"OVERFLOW!"),uV.forEach(t),fR=n(JE," as follows:"),JE.forEach(t),J3=c(e),d(Kr.$$.fragment,e),X3=c(e),Pc=l(e,"P",{});var cV=r(Pc);dR=n(cV,"that means that the Deepspeed loss scaler can\u2019t figure out a scaling co-efficient that overcomes loss overflow."),cV.forEach(t),Q3=c(e),zc=l(e,"P",{});var hV=r(zc);mR=n(hV,"(the log was massaged to be more readable here.)"),hV.forEach(t),e0=c(e),ds=l(e,"P",{});var Th=r(ds);_R=n(Th,"In this case you usually need to raise the value of "),Rv=l(Th,"CODE",{});var fV=r(Rv);vR=n(fV,"initial_scale_power"),fV.forEach(t),jR=n(Th,". Setting it to "),Iv=l(Th,"CODE",{});var dV=r(Iv);wR=n(dV,'"initial_scale_power": 32'),dV.forEach(t),yR=n(Th," will typically resolve the problem."),Th.forEach(t),s0=c(e),ut=l(e,"H3",{class:!0});var XE=r(ut);qn=l(XE,"A",{id:!0,class:!0,href:!0});var mV=r(qn);Uv=l(mV,"SPAN",{});var _V=r(Uv);d(Jr.$$.fragment,_V),_V.forEach(t),mV.forEach(t),gR=c(XE),Gv=l(XE,"SPAN",{});var vV=r(Gv);bR=n(vV,"Notes"),vV.forEach(t),XE.forEach(t),t0=c(e),ms=l(e,"UL",{});var Sh=r(ms);ct=l(Sh,"LI",{});var Ch=r(ct);qR=n(Ch,"DeepSpeed works with the PyTorch "),Dc=l(Ch,"A",{href:!0});var jV=r(Dc);ER=n(jV,"Trainer"),jV.forEach(t),$R=n(Ch," but not TF "),Mv=l(Ch,"CODE",{});var wV=r(Mv);kR=n(wV,"TFTrainer"),wV.forEach(t),PR=n(Ch,"."),Ch.forEach(t),zR=c(Sh),Xr=l(Sh,"LI",{});var QE=r(Xr);DR=n(QE,"While DeepSpeed has a pip installable PyPI package, it is highly recommended that it gets installed from "),Qr=l(QE,"A",{href:!0,rel:!0});var yV=r(Qr);OR=n(yV,"source"),yV.forEach(t),AR=n(QE,` to best match your hardware and also if you need to enable certain features, like 1-bit Adam, which aren\u2019t available in the pypi distribution.`),QE.forEach(t),TR=c(Sh),ht=l(Sh,"LI",{});var xh=r(ht);SR=n(xh,"You don\u2019t have to use the "),Oc=l(xh,"A",{href:!0});var gV=r(Oc);CR=n(gV,"Trainer"),gV.forEach(t),xR=n(xh,` to use DeepSpeed with \u{1F917} Transformers - you can use any model with your own trainer, and you will have to adapt the latter according to `),ep=l(xh,"A",{href:!0,rel:!0});var bV=r(ep);RR=n(bV,"the DeepSpeed integration instructions"),bV.forEach(t),IR=n(xh,"."),xh.forEach(t),Sh.forEach(t),a0=c(e),ft=l(e,"H2",{class:!0});var e4=r(ft);En=l(e4,"A",{id:!0,class:!0,href:!0});var qV=r(En);Lv=l(qV,"SPAN",{});var EV=r(Lv);d(sp.$$.fragment,EV),EV.forEach(t),qV.forEach(t),UR=c(e4),Zv=l(e4,"SPAN",{});var $V=r(Zv);GR=n($V,"Non-Trainer Deepspeed Integration"),$V.forEach(t),e4.forEach(t),n0=c(e),fe=l(e,"P",{});var Bn=r(fe);MR=n(Bn,"The "),Ac=l(Bn,"A",{href:!0});var kV=r(Ac);LR=n(kV,"HfDeepSpeedConfig"),kV.forEach(t),ZR=n(Bn,` is used to integrate Deepspeed into the \u{1F917} Transformers core functionality, when `),Tc=l(Bn,"A",{href:!0});var PV=r(Tc);NR=n(PV,"Trainer"),PV.forEach(t),HR=n(Bn," is not used. The only thing that it does is handling Deepspeed ZeRO-3 param gathering and automatically splitting the model onto multiple gpus during "),Nv=l(Bn,"CODE",{});var zV=r(Nv);BR=n(zV,"from_pretrained"),zV.forEach(t),WR=n(Bn," call. Everything else you have to do by yourself."),Bn.forEach(t),o0=c(e),$n=l(e,"P",{});var s4=r($n);FR=n(s4,"When using "),Sc=l(s4,"A",{href:!0});var DV=r(Sc);VR=n(DV,"Trainer"),DV.forEach(t),YR=n(s4," everything is automatically taken care of."),s4.forEach(t),l0=c(e),_s=l(e,"P",{});var Rh=r(_s);KR=n(Rh,"When not using "),Cc=l(Rh,"A",{href:!0});var OV=r(Cc);JR=n(OV,"Trainer"),OV.forEach(t),XR=n(Rh,`, to efficiently deploy DeepSpeed ZeRO-3, you must instantiate the `),xc=l(Rh,"A",{href:!0});var AV=r(xc);QR=n(AV,"HfDeepSpeedConfig"),AV.forEach(t),eI=n(Rh," object before instantiating the model and keep that object alive."),Rh.forEach(t),r0=c(e),kn=l(e,"P",{});var t4=r(kn);sI=n(t4,"If you\u2019re using Deepspeed ZeRO-1 or ZeRO-2 you don\u2019t need to use "),Hv=l(t4,"CODE",{});var TV=r(Hv);tI=n(TV,"HfDeepSpeedConfig"),TV.forEach(t),aI=n(t4," at all."),t4.forEach(t),p0=c(e),Rc=l(e,"P",{});var SV=r(Rc);nI=n(SV,"For example for a pretrained model:"),SV.forEach(t),i0=c(e),d(tp.$$.fragment,e),u0=c(e),Ic=l(e,"P",{});var CV=r(Ic);oI=n(CV,"or for non-pretrained model:"),CV.forEach(t),c0=c(e),d(ap.$$.fragment,e),h0=c(e),de=l(e,"P",{});var Wn=r(de);lI=n(Wn,"Please note that if you\u2019re not using the "),Uc=l(Wn,"A",{href:!0});var xV=r(Uc);rI=n(xV,"Trainer"),xV.forEach(t),pI=n(Wn," integration, you\u2019re completely on your own. Basically follow the documentation on the "),np=l(Wn,"A",{href:!0,rel:!0});var RV=r(np);iI=n(RV,"Deepspeed"),RV.forEach(t),uI=n(Wn," website. Also you have to configure explicitly the config file - you can\u2019t use "),Bv=l(Wn,"CODE",{});var IV=r(Bv);cI=n(IV,'"auto"'),IV.forEach(t),hI=n(Wn," values and you will have to put real values instead."),Wn.forEach(t),f0=c(e),dt=l(e,"H2",{class:!0});var a4=r(dt);Pn=l(a4,"A",{id:!0,class:!0,href:!0});var UV=r(Pn);Wv=l(UV,"SPAN",{});var GV=r(Wv);d(op.$$.fragment,GV),GV.forEach(t),UV.forEach(t),fI=c(a4),Fv=l(a4,"SPAN",{});var MV=r(Fv);dI=n(MV,"HfDeepSpeedConfig"),MV.forEach(t),a4.forEach(t),d0=c(e),ee=l(e,"DIV",{class:!0});var Fn=r(ee);d(lp.$$.fragment,Fn),mI=c(Fn),Vv=l(Fn,"P",{});var LV=r(Vv);_I=n(LV,"This object contains a DeepSpeed configuration dictionary and can be quickly queried for things like zero stage."),LV.forEach(t),vI=c(Fn),Ae=l(Fn,"P",{});var Vn=r(Ae);jI=n(Vn,"A "),Yv=l(Vn,"CODE",{});var ZV=r(Yv);wI=n(ZV,"weakref"),ZV.forEach(t),yI=n(Vn,` of this object is stored in the module\u2019s globals to be able to access the config from areas where things like the Trainer object is not available (e.g. `),Kv=l(Vn,"CODE",{});var NV=r(Kv);gI=n(NV,"from_pretrained"),NV.forEach(t),bI=n(Vn," and "),Jv=l(Vn,"CODE",{});var HV=r(Jv);qI=n(HV,"_get_resized_embeddings"),HV.forEach(t),EI=n(Vn,`). Therefore it\u2019s important that this object remains alive while the program is still running.`),Vn.forEach(t),$I=c(Fn),me=l(Fn,"P",{});var vt=r(me);Gc=l(vt,"A",{href:!0});var BV=r(Gc);kI=n(BV,"Trainer"),BV.forEach(t),PI=n(vt," uses the "),Xv=l(vt,"CODE",{});var WV=r(Xv);zI=n(WV,"HfTrainerDeepSpeedConfig"),WV.forEach(t),DI=n(vt,` subclass instead. That subclass has logic to sync the configuration with values of `),Mc=l(vt,"A",{href:!0});var FV=r(Mc);OI=n(FV,"TrainingArguments"),FV.forEach(t),AI=n(vt," by replacing special placeholder values: "),Qv=l(vt,"CODE",{});var VV=r(Qv);TI=n(VV,'"auto"'),VV.forEach(t),SI=n(vt,`. Without this special logic the DeepSpeed configuration is not modified in any way.`),vt.forEach(t),Fn.forEach(t),m0=c(e),mt=l(e,"H3",{class:!0});var n4=r(mt);zn=l(n4,"A",{id:!0,class:!0,href:!0});var YV=r(zn);ej=l(YV,"SPAN",{});var KV=r(ej);d(rp.$$.fragment,KV),KV.forEach(t),YV.forEach(t),CI=c(n4),sj=l(n4,"SPAN",{});var JV=r(sj);xI=n(JV,"Custom DeepSpeed ZeRO Inference"),JV.forEach(t),n4.forEach(t),_0=c(e),Dn=l(e,"P",{});var o4=r(Dn);RI=n(o4,"Here is an example of how one could do DeepSpeed ZeRO Inference without using "),Lc=l(o4,"A",{href:!0});var XV=r(Lc);II=n(XV,"Trainer"),XV.forEach(t),UI=n(o4," when one can\u2019t fit a model onto a single GPU. The solution includes using additional GPUs or/and offloading GPU memory to CPU memory."),o4.forEach(t),v0=c(e),Zc=l(e,"P",{});var QV=r(Zc);GI=n(QV,"The important nuance to understand here is that the way ZeRO is designed you can process different inputs on different GPUs in parallel."),QV.forEach(t),j0=c(e),Nc=l(e,"P",{});var eY=r(Nc);MI=n(eY,"The example has copious notes and is self-documenting."),eY.forEach(t),w0=c(e),Hc=l(e,"P",{});var sY=r(Hc);LI=n(sY,"Make sure to:"),sY.forEach(t),y0=c(e),On=l(e,"OL",{});var l4=r(On);tj=l(l4,"LI",{});var tY=r(tj);ZI=n(tY,"disable CPU offload if you have enough GPU memory (since it slows things down)"),tY.forEach(t),NI=c(l4),aj=l(l4,"LI",{});var aY=r(aj);HI=n(aY,"enable bf16 if you own an Ampere or a newer GPU to make things faster. If you don\u2019t have that hardware you may enable fp16 as long as you don\u2019t use any model that was pre-trained in bf16 mixed precision (such as most t5 models). These usually overflow in fp16 and you will see garbage as output."),aY.forEach(t),l4.forEach(t),g0=c(e),d(pp.$$.fragment,e),b0=c(e),An=l(e,"P",{});var r4=r(An);BI=n(r4,"Let\u2019s save it as "),nj=l(r4,"CODE",{});var nY=r(nj);WI=n(nY,"t0.py"),nY.forEach(t),FI=n(r4," and run it:"),r4.forEach(t),q0=c(e),d(ip.$$.fragment,e),E0=c(e),Bc=l(e,"P",{});var oY=r(Bc);VI=n(oY,"This was a very basic example and you will want to adapt it to your needs."),oY.forEach(t),$0=c(e),_t=l(e,"H2",{class:!0});var p4=r(_t);Tn=l(p4,"A",{id:!0,class:!0,href:!0});var lY=r(Tn);oj=l(lY,"SPAN",{});var rY=r(oj);d(up.$$.fragment,rY),rY.forEach(t),lY.forEach(t),YI=c(p4),lj=l(p4,"SPAN",{});var pY=r(lj);KI=n(pY,"Main DeepSpeed Resources"),pY.forEach(t),p4.forEach(t),k0=c(e),_e=l(e,"UL",{});var Yn=r(_e);rj=l(Yn,"LI",{});var iY=r(rj);cp=l(iY,"A",{href:!0,rel:!0});var uY=r(cp);JI=n(uY,"Project\u2019s github"),uY.forEach(t),iY.forEach(t),XI=c(Yn),pj=l(Yn,"LI",{});var cY=r(pj);hp=l(cY,"A",{href:!0,rel:!0});var hY=r(hp);QI=n(hY,"Usage docs"),hY.forEach(t),cY.forEach(t),eU=c(Yn),ij=l(Yn,"LI",{});var fY=r(ij);fp=l(fY,"A",{href:!0,rel:!0});var dY=r(fp);sU=n(dY,"API docs"),dY.forEach(t),fY.forEach(t),tU=c(Yn),uj=l(Yn,"LI",{});var mY=r(uj);dp=l(mY,"A",{href:!0,rel:!0});var _Y=r(dp);aU=n(_Y,"Blog posts"),_Y.forEach(t),mY.forEach(t),Yn.forEach(t),P0=c(e),Wc=l(e,"P",{});var vY=r(Wc);nU=n(vY,"Papers:"),vY.forEach(t),z0=c(e),vs=l(e,"UL",{});var Ih=r(vs);cj=l(Ih,"LI",{});var jY=r(cj);mp=l(jY,"A",{href:!0,rel:!0});var wY=r(mp);oU=n(wY,"ZeRO: Memory Optimizations Toward Training Trillion Parameter Models"),wY.forEach(t),jY.forEach(t),lU=c(Ih),hj=l(Ih,"LI",{});var yY=r(hj);_p=l(yY,"A",{href:!0,rel:!0});var gY=r(_p);rU=n(gY,"ZeRO-Offload: Democratizing Billion-Scale Model Training"),gY.forEach(t),yY.forEach(t),pU=c(Ih),fj=l(Ih,"LI",{});var bY=r(fj);vp=l(bY,"A",{href:!0,rel:!0});var qY=r(vp);iU=n(qY,"ZeRO-Infinity: Breaking the GPU Memory Wall for Extreme Scale Deep Learning"),qY.forEach(t),bY.forEach(t),Ih.forEach(t),D0=c(e),js=l(e,"P",{});var Uh=r(js);uU=n(Uh,"Finally, please, remember that, HuggingFace "),Fc=l(Uh,"A",{href:!0});var EY=r(Fc);cU=n(EY,"Trainer"),EY.forEach(t),hU=n(Uh,` only integrates DeepSpeed, therefore if you have any problems or questions with regards to DeepSpeed usage, please, file an issue with `),jp=l(Uh,"A",{href:!0,rel:!0});var $Y=r(jp);fU=n($Y,"DeepSpeed GitHub"),$Y.forEach(t),dU=n(Uh,"."),Uh.forEach(t),this.h()},h(){h(g,"name","hf:doc:metadata"),h(g,"content",JSON.stringify(RY)),h(k,"id","deepspeed-integration"),h(k,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(k,"href","#deepspeed-integration"),h(b,"class","relative group"),h(q,"href","https://github.com/microsoft/DeepSpeed"),h(q,"rel","nofollow"),h(W,"href","https://arxiv.org/abs/1910.02054"),h(W,"rel","nofollow"),h(Kn,"href","https://arxiv.org/abs/2101.06840"),h(Kn,"rel","nofollow"),h(Jn,"href","https://arxiv.org/abs/2104.07857"),h(Jn,"rel","nofollow"),h(Xn,"href","https://github.com/microsoft/DeepSpeed"),h(Xn,"rel","nofollow"),h(zp,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(Dp,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(Op,"href","#nontrainer-deepspeed-integration"),h(Rp,"href","#zero-inference"),h(Up,"id","deepspeed-trainer-integration"),h(gt,"id","trainer-deepspeed-integration"),h(gt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(gt,"href","#trainer-deepspeed-integration"),h(qs,"class","relative group"),h(Gp,"id","deepspeed-installation"),h(bt,"id","installation"),h(bt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(bt,"href","#installation"),h(Es,"class","relative group"),h(oo,"href","https://github.com/microsoft/deepspeed#installation"),h(oo,"rel","nofollow"),h(lo,"href","https://www.deepspeed.ai/tutorials/advanced-install/"),h(lo,"rel","nofollow"),h(Lp,"href","trainer#cuda-extension-installation-notes"),h(uo,"href","https://developer.nvidia.com/cuda-gpus"),h(uo,"rel","nofollow"),h(mo,"href","https://github.com/microsoft/DeepSpeed/issues"),h(mo,"rel","nofollow"),h(Vp,"id","deepspeed-multi-gpu"),h(Dt,"id","deployment-with-multiple-gpus"),h(Dt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Dt,"href","#deployment-with-multiple-gpus"),h($s,"class","relative group"),h(Yp,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(vo,"href","https://www.deepspeed.ai/docs/config-json/"),h(vo,"rel","nofollow"),h(yo,"href","https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node"),h(yo,"rel","nofollow"),h(bo,"href","https://github.com/huggingface/transformers/issues/8771#issuecomment-759248400"),h(bo,"rel","nofollow"),h(Xp,"id","deepspeed-one-gpu"),h(xt,"id","deployment-with-one-gpu"),h(xt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(xt,"href","#deployment-with-one-gpu"),h(Ps,"class","relative group"),h(Qp,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h($o,"href","https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node"),h($o,"rel","nofollow"),h(Po,"href","https://github.com/huggingface/transformers/issues/8771#issuecomment-759176685"),h(Po,"rel","nofollow"),h(li,"id","deepspeed-notebook"),h(Gt,"id","deployment-in-notebooks"),h(Gt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Gt,"href","#deployment-in-notebooks"),h(Ds,"class","relative group"),h(ci,"id","deepspeed-config"),h(Ht,"id","configuration"),h(Ht,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Ht,"href","#configuration"),h(Os,"class","relative group"),h(Ro,"href","https://www.deepspeed.ai/docs/config-json/"),h(Ro,"rel","nofollow"),h(Io,"href","https://github.com/microsoft/DeepSpeedExamples"),h(Io,"rel","nofollow"),h(Mo,"href","https://github.com/microsoft/DeepSpeed"),h(Mo,"rel","nofollow"),h(fi,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(di,"id","deepspeed-config-passing"),h(Kt,"id","passing-configuration"),h(Kt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Kt,"href","#passing-configuration"),h(As,"class","relative group"),h(mi,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(_i,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments"),h(vi,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments"),h(yi,"id","deepspeed-config-shared"),h(Jt,"id","shared-configuration"),h(Jt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Jt,"href","#shared-configuration"),h(Ts,"class","relative group"),h(gi,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(bi,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(qi,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(Ei,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(ki,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments"),h(Pi,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments"),h(zi,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(Di,"id","deepspeed-zero"),h(ta,"id","zero"),h(ta,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(ta,"href","#zero"),h(Ss,"class","relative group"),h(Yo,"href","https://www.deepspeed.ai/tutorials/zero/"),h(Yo,"rel","nofollow"),h(Ko,"href","https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training"),h(Ko,"rel","nofollow"),h(Oi,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(Ti,"id","deepspeed-zero2-config"),h(na,"id","zero2-config"),h(na,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(na,"href","#zero2-config"),h(Cs,"class","relative group"),h(Ri,"id","deepspeed-zero3-config"),h(oa,"id","zero3-config"),h(oa,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(oa,"href","#zero3-config"),h(Rs,"class","relative group"),h(Mi,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(Zi,"id","deepspeed-nvme"),h(pa,"id","nvme-support"),h(pa,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(pa,"href","#nvme-support"),h(Is,"class","relative group"),h(fl,"href","https://www.deepspeed.ai/docs/config-json/#optimizer-offloading"),h(fl,"rel","nofollow"),h(dl,"href","https://www.deepspeed.ai/docs/config-json/#parameter-offloading"),h(dl,"rel","nofollow"),h(ml,"href","https://github.com/microsoft/DeepSpeed/issues/998"),h(ml,"rel","nofollow"),h(Bi,"id","deepspeed-zero2-zero3-performance"),h(ca,"id","zero2-vs-zero3-performance"),h(ca,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(ca,"href","#zero2-vs-zero3-performance"),h(Us,"class","relative group"),h(Vi,"id","deepspeed-zero2-example"),h(fa,"id","zero2-example"),h(fa,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(fa,"href","#zero2-example"),h(Ms,"class","relative group"),h(Yi,"id","deepspeed-zero3-example"),h(_a,"id","zero3-example"),h(_a,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(_a,"href","#zero3-example"),h(Ls,"class","relative group"),h(wa,"id","optimizer-and-scheduler"),h(wa,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(wa,"href","#optimizer-and-scheduler"),h(Zs,"class","relative group"),h(Ji,"id","deepspeed-optimizer"),h(ba,"id","optimizer"),h(ba,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(ba,"href","#optimizer"),h(Ns,"class","relative group"),h(kl,"href","https://www.deepspeed.ai/docs/config-json/#optimizer-parameters"),h(kl,"rel","nofollow"),h(Xi,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(tu,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(nu,"id","deepspeed-scheduler"),h(Ea,"id","scheduler"),h(Ea,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Ea,"href","#scheduler"),h(Hs,"class","relative group"),h(xl,"href","https://www.deepspeed.ai/docs/config-json/#scheduler-parameters"),h(xl,"rel","nofollow"),h(lu,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(ru,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(iu,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(uu,"id","deepspeed-fp32"),h(Oa,"id","fp32-precision"),h(Oa,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Oa,"href","#fp32-precision"),h(Bs,"class","relative group"),h(Zl,"href","https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"),h(Zl,"rel","nofollow"),h(hu,"id","deepspeed-amp"),h(Sa,"id","automatic-mixed-precision"),h(Sa,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Sa,"href","#automatic-mixed-precision"),h(Ws,"class","relative group"),h(Ca,"id","fp16"),h(Ca,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Ca,"href","#fp16"),h(Fs,"class","relative group"),h(mu,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(vu,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(Fl,"href","https://www.deepspeed.ai/docs/config-json/#fp16-training-options"),h(Fl,"rel","nofollow"),h(Ia,"id","bf16"),h(Ia,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Ia,"href","#bf16"),h(Vs,"class","relative group"),h(Ga,"id","apex"),h(Ga,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Ga,"href","#apex"),h(Ys,"class","relative group"),h(bu,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(Eu,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(er,"href","https://www.deepspeed.ai/docs/config-json/#automatic-mixed-precision-amp-training-options"),h(er,"rel","nofollow"),h($u,"id","deepspeed-bs"),h(Na,"id","batch-size"),h(Na,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Na,"href","#batch-size"),h(Ks,"class","relative group"),h(Pu,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(Du,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(Ou,"id","deepspeed-grad-acc"),h(Ba,"id","gradient-accumulation"),h(Ba,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Ba,"href","#gradient-accumulation"),h(Js,"class","relative group"),h(Tu,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(Cu,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(xu,"id","deepspeed-grad-clip"),h(Fa,"id","gradient-clipping"),h(Fa,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Fa,"href","#gradient-clipping"),h(Xs,"class","relative group"),h(Iu,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(Gu,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(Mu,"id","deepspeed-weight-extraction"),h(Ya,"id","getting-the-model-weights-out"),h(Ya,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Ya,"href","#getting-the-model-weights-out"),h(Qs,"class","relative group"),h(hr,"href","https://huggingface.co/models"),h(hr,"rel","nofollow"),h(nn,"id","zero3-and-infinity-nuances"),h(nn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(nn,"href","#zero3-and-infinity-nuances"),h(et,"class","relative group"),h(on,"id","constructing-massive-models"),h(on,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(on,"href","#constructing-massive-models"),h(st,"class","relative group"),h(ec,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments"),h(sc,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments"),h(qr,"href","https://deepspeed.readthedocs.io/en/latest/zero3.html#constructing-massive-models"),h(qr,"rel","nofollow"),h(ac,"href","#from_pretrained-torch-dtype"),h(un,"id","gathering-parameters"),h(un,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(un,"href","#gathering-parameters"),h(tt,"class","relative group"),h(kr,"href","https://deepspeed.readthedocs.io/en/latest/zero3.html#manual-parameter-coordination"),h(kr,"rel","nofollow"),h(oc,"id","deepspeed-zero-inference"),h(hn,"id","zero-inference"),h(hn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(hn,"href","#zero-inference"),h(at,"class","relative group"),h(rc,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments"),h(mn,"id","memory-requirements"),h(mn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(mn,"href","#memory-requirements"),h(nt,"class","relative group"),h(Cr,"href","https://deepspeed.readthedocs.io/en/latest/memory.html"),h(Cr,"rel","nofollow"),h(vn,"id","filing-issues"),h(vn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(vn,"href","#filing-issues"),h(ot,"class","relative group"),h(yc,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(gc,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments"),h(bc,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments"),h(Gr,"href","https://github.com/stas00/porting/blob/master/transformers/deepspeed/DeepSpeed_on_colab_CLI.ipynb"),h(Gr,"rel","nofollow"),h(Lr,"href","https://github.com/huggingface/transformers/tree/main/examples/pytorch"),h(Lr,"rel","nofollow"),h(Nr,"href","https://github.com/microsoft/DeepSpeed/"),h(Nr,"rel","nofollow"),h(wn,"id","troubleshooting"),h(wn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(wn,"href","#troubleshooting"),h(rt,"class","relative group"),h(yn,"id","the-deepspeed-process-gets-killed-at-startup-without-a-traceback"),h(yn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(yn,"href","#the-deepspeed-process-gets-killed-at-startup-without-a-traceback"),h(pt,"class","relative group"),h(Fr,"href","https://deepspeed.readthedocs.io/en/latest/memory.html"),h(Fr,"rel","nofollow"),h(gn,"id","training-andor-evalpredict-loss-is-nan"),h(gn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(gn,"href","#training-andor-evalpredict-loss-is-nan"),h(it,"class","relative group"),h(qn,"id","notes"),h(qn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(qn,"href","#notes"),h(ut,"class","relative group"),h(Dc,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(Qr,"href","https://github.com/microsoft/deepspeed#installation"),h(Qr,"rel","nofollow"),h(Oc,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(ep,"href","https://www.deepspeed.ai/getting-started/#writing-deepspeed-models"),h(ep,"rel","nofollow"),h(En,"id","nontrainer-deepspeed-integration"),h(En,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(En,"href","#nontrainer-deepspeed-integration"),h(ft,"class","relative group"),h(Ac,"href","/docs/transformers/pr_19429/en/main_classes/deepspeed#transformers.deepspeed.HfDeepSpeedConfig"),h(Tc,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(Sc,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(Cc,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(xc,"href","/docs/transformers/pr_19429/en/main_classes/deepspeed#transformers.deepspeed.HfDeepSpeedConfig"),h(Uc,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(np,"href","https://www.deepspeed.ai/"),h(np,"rel","nofollow"),h(Pn,"id","transformers.deepspeed.HfDeepSpeedConfig"),h(Pn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Pn,"href","#transformers.deepspeed.HfDeepSpeedConfig"),h(dt,"class","relative group"),h(Gc,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(Mc,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments"),h(ee,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),h(zn,"id","custom-deepspeed-zero-inference"),h(zn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(zn,"href","#custom-deepspeed-zero-inference"),h(mt,"class","relative group"),h(Lc,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(Tn,"id","main-deepspeed-resources"),h(Tn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(Tn,"href","#main-deepspeed-resources"),h(_t,"class","relative group"),h(cp,"href","https://github.com/microsoft/deepspeed"),h(cp,"rel","nofollow"),h(hp,"href","https://www.deepspeed.ai/getting-started/"),h(hp,"rel","nofollow"),h(fp,"href","https://deepspeed.readthedocs.io/en/latest/index.html"),h(fp,"rel","nofollow"),h(dp,"href","https://www.microsoft.com/en-us/research/search/?q=deepspeed"),h(dp,"rel","nofollow"),h(mp,"href","https://arxiv.org/abs/1910.02054"),h(mp,"rel","nofollow"),h(_p,"href","https://arxiv.org/abs/2101.06840"),h(_p,"rel","nofollow"),h(vp,"href","https://arxiv.org/abs/2104.07857"),h(vp,"rel","nofollow"),h(Fc,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(jp,"href","https://github.com/microsoft/DeepSpeed/issues"),h(jp,"rel","nofollow")},m(e,p){s(document.head,g),i(e,S,p),i(e,b,p),s(b,k),s(k,X),m(z,X,null),s(b,C),s(b,Q),s(Q,x),i(e,te,p),i(e,T,p),s(T,q),s(q,E),s(T,gs),s(T,W),s(W,bs),s(T,i4),i(e,wj,p),i(e,R,p),s(R,Gh),s(Gh,u4),s(R,c4),s(R,Mh),s(Mh,h4),s(R,f4),s(R,Lh),s(Lh,d4),s(R,m4),s(R,Zh),s(Zh,_4),s(R,v4),s(R,Nh),s(Nh,j4),s(R,w4),s(R,Hh),s(Hh,y4),i(e,yj,p),i(e,Te,p),s(Te,g4),s(Te,Kn),s(Kn,b4),s(Te,q4),s(Te,Jn),s(Jn,E4),s(Te,$4),i(e,gj,p),i(e,kp,p),s(kp,k4),i(e,bj,p),i(e,Pp,p),s(Pp,P4),i(e,qj,p),i(e,wt,p),s(wt,z4),s(wt,Xn),s(Xn,D4),s(wt,O4),i(e,Ej,p),i(e,yt,p),s(yt,Qn),s(Qn,A4),s(Qn,zp),s(zp,T4),s(Qn,S4),s(yt,C4),s(yt,F),s(F,x4),s(F,Dp),s(Dp,R4),s(F,I4),s(F,Bh),s(Bh,U4),s(F,G4),s(F,Wh),s(Wh,M4),s(F,L4),s(F,Fh),s(Fh,Z4),s(F,N4),s(F,Op),s(Op,H4),s(F,B4),i(e,$j,p),i(e,Ap,p),s(Ap,W4),i(e,kj,p),i(e,Tp,p),s(Tp,F4),i(e,Pj,p),i(e,Sp,p),s(Sp,Vh),s(Vh,V4),i(e,zj,p),i(e,Cp,p),s(Cp,Y4),i(e,Dj,p),i(e,xp,p),s(xp,eo),s(eo,K4),s(eo,Rp),s(Rp,J4),s(eo,X4),i(e,Oj,p),i(e,Ip,p),s(Ip,Q4),i(e,Aj,p),i(e,Up,p),i(e,Tj,p),i(e,qs,p),s(qs,gt),s(gt,Yh),m(so,Yh,null),s(qs,e6),s(qs,Kh),s(Kh,s6),i(e,Sj,p),i(e,Gp,p),i(e,Cj,p),i(e,Es,p),s(Es,bt),s(bt,Jh),m(to,Jh,null),s(Es,t6),s(Es,Xh),s(Xh,a6),i(e,xj,p),i(e,Mp,p),s(Mp,n6),i(e,Rj,p),m(ao,e,p),i(e,Ij,p),i(e,Se,p),s(Se,o6),s(Se,Qh),s(Qh,l6),s(Se,r6),s(Se,ef),s(ef,p6),s(Se,i6),i(e,Uj,p),m(no,e,p),i(e,Gj,p),i(e,Ce,p),s(Ce,u6),s(Ce,oo),s(oo,c6),s(Ce,h6),s(Ce,lo),s(lo,f6),s(Ce,d6),i(e,Mj,p),i(e,qt,p),s(qt,m6),s(qt,Lp),s(Lp,_6),s(qt,v6),i(e,Lj,p),i(e,Zp,p),s(Zp,j6),i(e,Zj,p),i(e,Np,p),s(Np,w6),i(e,Nj,p),m(ro,e,p),i(e,Hj,p),i(e,xe,p),s(xe,y6),s(xe,sf),s(sf,g6),s(xe,b6),s(xe,tf),s(tf,q6),s(xe,E6),i(e,Bj,p),i(e,Et,p),s(Et,$6),s(Et,af),s(af,k6),s(Et,P6),i(e,Wj,p),m(po,e,p),i(e,Fj,p),i(e,ke,p),s(ke,z6),s(ke,nf),s(nf,D6),s(ke,O6),s(ke,of),s(of,A6),s(ke,T6),s(ke,lf),s(lf,S6),i(e,Vj,p),i(e,Hp,p),s(Hp,C6),i(e,Yj,p),m(io,e,p),i(e,Kj,p),i(e,Re,p),s(Re,x6),s(Re,rf),s(rf,R6),s(Re,I6),s(Re,pf),s(pf,U6),s(Re,G6),i(e,Jj,p),i(e,$t,p),s($t,M6),s($t,uf),s(uf,L6),s($t,Z6),i(e,Xj,p),i(e,Ie,p),s(Ie,N6),s(Ie,cf),s(cf,H6),s(Ie,B6),s(Ie,uo),s(uo,W6),s(Ie,F6),i(e,Qj,p),i(e,Bp,p),s(Bp,V6),i(e,ew,p),m(co,e,p),i(e,sw,p),i(e,Wp,p),s(Wp,Y6),i(e,tw,p),m(ho,e,p),i(e,aw,p),i(e,Fp,p),s(Fp,K6),i(e,nw,p),m(fo,e,p),i(e,ow,p),i(e,kt,p),s(kt,J6),s(kt,hf),s(hf,X6),s(kt,Q6),i(e,lw,p),i(e,Pt,p),s(Pt,e$),s(Pt,ff),s(ff,s$),s(Pt,t$),i(e,rw,p),i(e,zt,p),s(zt,a$),s(zt,mo),s(mo,n$),s(zt,o$),i(e,pw,p),i(e,Vp,p),i(e,iw,p),i(e,$s,p),s($s,Dt),s(Dt,df),m(_o,df,null),s($s,l$),s($s,mf),s(mf,r$),i(e,uw,p),i(e,Ot,p),s(Ot,p$),s(Ot,Yp),s(Yp,i$),s(Ot,u$),i(e,cw,p),i(e,At,p),s(At,ks),s(ks,c$),s(ks,_f),s(_f,h$),s(ks,f$),s(ks,vf),s(vf,d$),s(ks,m$),s(At,_$),s(At,Pe),s(Pe,v$),s(Pe,jf),s(jf,j$),s(Pe,w$),s(Pe,wf),s(wf,y$),s(Pe,g$),s(Pe,vo),s(vo,b$),s(Pe,q$),i(e,hw,p),i(e,Kp,p),s(Kp,E$),i(e,fw,p),m(jo,e,p),i(e,dw,p),i(e,Jp,p),s(Jp,$$),i(e,mw,p),m(wo,e,p),i(e,_w,p),i(e,I,p),s(I,k$),s(I,yf),s(yf,P$),s(I,z$),s(I,gf),s(gf,D$),s(I,O$),s(I,bf),s(bf,A$),s(I,T$),s(I,qf),s(qf,S$),s(I,C$),s(I,yo),s(yo,x$),s(I,R$),i(e,vw,p),i(e,ae,p),s(ae,I$),s(ae,Ef),s(Ef,U$),s(ae,G$),s(ae,$f),s($f,M$),s(ae,L$),s(ae,kf),s(kf,Z$),s(ae,N$),i(e,jw,p),i(e,Tt,p),s(Tt,H$),s(Tt,Pf),s(Pf,B$),s(Tt,W$),i(e,ww,p),m(go,e,p),i(e,yw,p),i(e,St,p),s(St,F$),s(St,zf),s(zf,V$),s(St,Y$),i(e,gw,p),i(e,Ct,p),s(Ct,K$),s(Ct,bo),s(bo,J$),s(Ct,X$),i(e,bw,p),i(e,Xp,p),i(e,qw,p),i(e,Ps,p),s(Ps,xt),s(xt,Df),m(qo,Df,null),s(Ps,Q$),s(Ps,Of),s(Of,e5),i(e,Ew,p),i(e,Rt,p),s(Rt,s5),s(Rt,Qp),s(Qp,t5),s(Rt,a5),i(e,$w,p),m(Eo,e,p),i(e,kw,p),i(e,Ue,p),s(Ue,n5),s(Ue,Af),s(Af,o5),s(Ue,l5),s(Ue,$o),s($o,r5),s(Ue,p5),i(e,Pw,p),i(e,ei,p),s(ei,i5),i(e,zw,p),i(e,It,p),s(It,Tf),s(Tf,u5),s(It,c5),s(It,Sf),s(Sf,h5),i(e,Dw,p),i(e,si,p),s(si,f5),i(e,Ow,p),m(ko,e,p),i(e,Aw,p),i(e,ti,p),s(ti,d5),i(e,Tw,p),i(e,Ut,p),s(Ut,m5),s(Ut,Po),s(Po,_5),s(Ut,v5),i(e,Sw,p),i(e,ai,p),s(ai,j5),i(e,Cw,p),i(e,ni,p),s(ni,w5),i(e,xw,p),i(e,oi,p),s(oi,zs),s(zs,zo),s(zo,y5),s(zo,Cf),s(Cf,g5),s(zo,b5),s(zs,q5),m(Do,zs,null),s(zs,E5),s(zs,xf),s(xf,$5),i(e,Rw,p),i(e,li,p),i(e,Iw,p),i(e,Ds,p),s(Ds,Gt),s(Gt,Rf),m(Oo,Rf,null),s(Ds,k5),s(Ds,If),s(If,P5),i(e,Uw,p),i(e,Mt,p),s(Mt,z5),s(Mt,Uf),s(Uf,D5),s(Mt,O5),i(e,Gw,p),i(e,ri,p),s(ri,A5),i(e,Mw,p),m(Ao,e,p),i(e,Lw,p),i(e,Lt,p),s(Lt,T5),s(Lt,Gf),s(Gf,S5),s(Lt,C5),i(e,Zw,p),i(e,pi,p),s(pi,x5),i(e,Nw,p),i(e,ii,p),s(ii,R5),i(e,Hw,p),m(To,e,p),i(e,Bw,p),i(e,Ge,p),s(Ge,I5),s(Ge,Mf),s(Mf,U5),s(Ge,G5),s(Ge,Lf),s(Lf,M5),s(Ge,L5),i(e,Ww,p),m(So,e,p),i(e,Fw,p),i(e,Zt,p),s(Zt,Z5),s(Zt,Zf),s(Zf,N5),s(Zt,H5),i(e,Vw,p),m(Co,e,p),i(e,Yw,p),i(e,ui,p),s(ui,B5),i(e,Kw,p),i(e,Nt,p),s(Nt,W5),s(Nt,Nf),s(Nf,F5),s(Nt,V5),i(e,Jw,p),i(e,ci,p),i(e,Xw,p),i(e,Os,p),s(Os,Ht),s(Ht,Hf),m(xo,Hf,null),s(Os,Y5),s(Os,Bf),s(Bf,K5),i(e,Qw,p),i(e,Bt,p),s(Bt,J5),s(Bt,Ro),s(Ro,X5),s(Bt,Q5),i(e,ey,p),i(e,Wt,p),s(Wt,e9),s(Wt,Io),s(Io,s9),s(Wt,t9),i(e,sy,p),m(Uo,e,p),i(e,ty,p),i(e,Ft,p),s(Ft,a9),s(Ft,Wf),s(Wf,n9),s(Ft,o9),i(e,ay,p),m(Go,e,p),i(e,ny,p),i(e,Vt,p),s(Vt,l9),s(Vt,Mo),s(Mo,r9),s(Vt,p9),i(e,oy,p),i(e,hi,p),s(hi,i9),i(e,ly,p),i(e,ne,p),s(ne,u9),s(ne,Ff),s(Ff,c9),s(ne,h9),s(ne,Vf),s(Vf,f9),s(ne,d9),s(ne,Yf),s(Yf,m9),s(ne,_9),i(e,ry,p),m(Lo,e,p),i(e,py,p),i(e,Yt,p),s(Yt,v9),s(Yt,fi),s(fi,j9),s(Yt,w9),i(e,iy,p),i(e,di,p),i(e,uy,p),i(e,As,p),s(As,Kt),s(Kt,Kf),m(Zo,Kf,null),s(As,y9),s(As,Jf),s(Jf,g9),i(e,cy,p),i(e,U,p),s(U,b9),s(U,mi),s(mi,q9),s(U,E9),s(U,_i),s(_i,$9),s(U,k9),s(U,Xf),s(Xf,P9),s(U,z9),s(U,Qf),s(Qf,D9),s(U,O9),s(U,vi),s(vi,A9),s(U,T9),i(e,hy,p),i(e,ji,p),s(ji,S9),i(e,fy,p),m(No,e,p),i(e,dy,p),i(e,wi,p),s(wi,C9),i(e,my,p),m(Ho,e,p),i(e,_y,p),i(e,yi,p),i(e,vy,p),i(e,Ts,p),s(Ts,Jt),s(Jt,ed),m(Bo,ed,null),s(Ts,x9),s(Ts,sd),s(sd,R9),i(e,jy,p),m(Xt,e,p),i(e,wy,p),i(e,Me,p),s(Me,I9),s(Me,gi),s(gi,U9),s(Me,G9),s(Me,bi),s(bi,M9),s(Me,L9),i(e,yy,p),i(e,Qt,p),s(Qt,Z9),s(Qt,qi),s(qi,N9),s(Qt,H9),i(e,gy,p),i(e,Le,p),s(Le,B9),s(Le,td),s(td,W9),s(Le,F9),s(Le,Ei),s(Ei,V9),s(Le,Y9),i(e,by,p),i(e,$i,p),s($i,K9),i(e,qy,p),i(e,ea,p),s(ea,J9),s(ea,ki),s(ki,X9),s(ea,Q9),i(e,Ey,p),i(e,sa,p),s(sa,ad),s(ad,e8),s(sa,s8),s(sa,Wo),s(Wo,t8),s(Wo,Pi),s(Pi,a8),s(Wo,n8),i(e,$y,p),i(e,oe,p),s(oe,o8),s(oe,nd),s(nd,l8),s(oe,r8),s(oe,zi),s(zi,p8),s(oe,i8),s(oe,od),s(od,u8),s(oe,c8),i(e,ky,p),i(e,Di,p),i(e,Py,p),i(e,Ss,p),s(Ss,ta),s(ta,ld),m(Fo,ld,null),s(Ss,h8),s(Ss,rd),s(rd,f8),i(e,zy,p),i(e,Vo,p),s(Vo,Yo),s(Yo,d8),s(Vo,m8),i(e,Dy,p),i(e,Ze,p),s(Ze,_8),s(Ze,pd),s(pd,v8),s(Ze,j8),s(Ze,Ko),s(Ko,w8),s(Ze,y8),i(e,Oy,p),i(e,aa,p),s(aa,g8),s(aa,Oi),s(Oi,b8),s(aa,q8),i(e,Ay,p),i(e,Ai,p),s(Ai,E8),i(e,Ty,p),i(e,Ti,p),i(e,Sy,p),i(e,Cs,p),s(Cs,na),s(na,id),m(Jo,id,null),s(Cs,$8),s(Cs,ud),s(ud,k8),i(e,Cy,p),i(e,Si,p),s(Si,P8),i(e,xy,p),m(Xo,e,p),i(e,Ry,p),i(e,Ci,p),s(Ci,cd),s(cd,z8),i(e,Iy,p),i(e,Ne,p),s(Ne,xs),s(xs,D8),s(xs,hd),s(hd,O8),s(xs,A8),s(xs,fd),s(fd,T8),s(xs,S8),s(Ne,C8),s(Ne,G),s(G,dd),s(dd,x8),s(G,R8),s(G,md),s(md,I8),s(G,U8),s(G,_d),s(_d,G8),s(G,M8),s(G,vd),s(vd,L8),s(G,Z8),s(G,jd),s(jd,N8),s(G,H8),s(G,wd),s(wd,B8),s(G,W8),s(Ne,F8),s(Ne,yd),s(yd,V8),i(e,Uy,p),i(e,He,p),s(He,Y8),s(He,gd),s(gd,K8),s(He,J8),s(He,bd),s(bd,X8),s(He,Q8),i(e,Gy,p),m(Qo,e,p),i(e,My,p),i(e,xi,p),s(xi,ek),i(e,Ly,p),i(e,Ri,p),i(e,Zy,p),i(e,Rs,p),s(Rs,oa),s(oa,qd),m(el,qd,null),s(Rs,sk),s(Rs,Ed),s(Ed,tk),i(e,Ny,p),i(e,Ii,p),s(Ii,ak),i(e,Hy,p),m(sl,e,p),i(e,By,p),i(e,V,p),s(V,nk),s(V,$d),s($d,ok),s(V,lk),s(V,kd),s(kd,rk),s(V,pk),s(V,Pd),s(Pd,ik),s(V,uk),s(V,zd),s(zd,ck),s(V,hk),i(e,Wy,p),i(e,Be,p),s(Be,fk),s(Be,Dd),s(Dd,dk),s(Be,mk),s(Be,Od),s(Od,_k),s(Be,vk),i(e,Fy,p),i(e,Ui,p),s(Ui,Ad),s(Ad,jk),i(e,Vy,p),i(e,la,p),s(la,tl),s(tl,Td),s(Td,wk),s(tl,yk),s(tl,Sd),s(Sd,gk),s(la,bk),s(la,al),s(al,Cd),s(Cd,qk),s(al,Ek),s(al,xd),s(xd,$k),i(e,Yy,p),i(e,M,p),s(M,kk),s(M,Rd),s(Rd,Pk),s(M,zk),s(M,Id),s(Id,Dk),s(M,Ok),s(M,Ud),s(Ud,Ak),s(M,Tk),s(M,Gd),s(Gd,Sk),s(M,Ck),s(M,Md),s(Md,xk),s(M,Rk),i(e,Ky,p),i(e,ze,p),s(ze,Ld),s(Ld,Ik),s(ze,Uk),s(ze,Zd),s(Zd,Gk),s(ze,Mk),s(ze,Nd),s(Nd,Lk),s(ze,Zk),i(e,Jy,p),i(e,Gi,p),s(Gi,Nk),i(e,Xy,p),i(e,We,p),s(We,nl),s(nl,Hd),s(Hd,Hk),s(nl,Bk),s(nl,Bd),s(Bd,Wk),s(We,Fk),s(We,ol),s(ol,Wd),s(Wd,Vk),s(ol,Yk),s(ol,Fd),s(Fd,Kk),s(We,Jk),s(We,ll),s(ll,Vd),s(Vd,Xk),s(ll,Qk),s(ll,Yd),s(Yd,e7),i(e,Qy,p),i(e,Fe,p),s(Fe,s7),s(Fe,Kd),s(Kd,t7),s(Fe,a7),s(Fe,Mi),s(Mi,n7),s(Fe,o7),i(e,eg,p),i(e,rl,p),s(rl,Jd),s(Jd,l7),s(rl,r7),i(e,sg,p),i(e,le,p),s(le,p7),s(le,Xd),s(Xd,i7),s(le,u7),s(le,Qd),s(Qd,c7),s(le,h7),s(le,em),s(em,f7),s(le,d7),i(e,tg,p),i(e,Li,p),s(Li,pl),s(pl,sm),s(sm,m7),s(pl,_7),s(pl,tm),s(tm,v7),i(e,ag,p),i(e,De,p),s(De,am),s(am,j7),s(De,w7),s(De,nm),s(nm,y7),s(De,g7),s(De,om),s(om,b7),s(De,q7),i(e,ng,p),i(e,Ve,p),s(Ve,E7),s(Ve,lm),s(lm,$7),s(Ve,k7),s(Ve,rm),s(rm,P7),s(Ve,z7),i(e,og,p),i(e,ra,p),s(ra,il),s(il,D7),s(il,pm),s(pm,O7),s(il,A7),s(ra,T7),s(ra,ul),s(ul,S7),s(ul,im),s(im,C7),s(ul,x7),i(e,lg,p),i(e,Zi,p),i(e,rg,p),i(e,Is,p),s(Is,pa),s(pa,um),m(cl,um,null),s(Is,R7),s(Is,cm),s(cm,I7),i(e,pg,p),i(e,Ni,p),s(Ni,U7),i(e,ig,p),i(e,Hi,p),s(Hi,G7),i(e,ug,p),m(hl,e,p),i(e,cg,p),i(e,ia,p),s(ia,M7),s(ia,hm),s(hm,L7),s(ia,Z7),i(e,hg,p),i(e,Ye,p),s(Ye,N7),s(Ye,fl),s(fl,H7),s(Ye,B7),s(Ye,dl),s(dl,W7),s(Ye,F7),i(e,fg,p),i(e,ua,p),s(ua,V7),s(ua,fm),s(fm,Y7),s(ua,K7),i(e,dg,p),i(e,Ke,p),s(Ke,J7),s(Ke,dm),s(dm,X7),s(Ke,Q7),s(Ke,ml),s(ml,eP),s(Ke,sP),i(e,mg,p),i(e,Bi,p),i(e,_g,p),i(e,Us,p),s(Us,ca),s(ca,mm),m(_l,mm,null),s(Us,tP),s(Us,_m),s(_m,aP),i(e,vg,p),i(e,Wi,p),s(Wi,nP),i(e,jg,p),i(e,Fi,p),s(Fi,oP),i(e,wg,p),i(e,ha,p),s(ha,Gs),s(Gs,lP),s(Gs,vm),s(vm,rP),s(Gs,pP),s(Gs,jm),s(jm,iP),s(Gs,uP),s(ha,cP),s(ha,vl),s(vl,hP),s(vl,wm),s(wm,fP),s(vl,dP),i(e,yg,p),i(e,Je,p),s(Je,mP),s(Je,ym),s(ym,_P),s(Je,vP),s(Je,gm),s(gm,jP),s(Je,wP),i(e,gg,p),i(e,Vi,p),i(e,bg,p),i(e,Ms,p),s(Ms,fa),s(fa,bm),m(jl,bm,null),s(Ms,yP),s(Ms,qm),s(qm,gP),i(e,qg,p),i(e,da,p),s(da,bP),s(da,Em),s(Em,qP),s(da,EP),i(e,Eg,p),m(wl,e,p),i(e,$g,p),i(e,ma,p),s(ma,$P),s(ma,$m),s($m,kP),s(ma,PP),i(e,kg,p),m(yl,e,p),i(e,Pg,p),i(e,Yi,p),i(e,zg,p),i(e,Ls,p),s(Ls,_a),s(_a,km),m(gl,km,null),s(Ls,zP),s(Ls,Pm),s(Pm,DP),i(e,Dg,p),i(e,va,p),s(va,OP),s(va,zm),s(zm,AP),s(va,TP),i(e,Og,p),m(bl,e,p),i(e,Ag,p),i(e,ja,p),s(ja,SP),s(ja,Dm),s(Dm,CP),s(ja,xP),i(e,Tg,p),m(ql,e,p),i(e,Sg,p),i(e,Zs,p),s(Zs,wa),s(wa,Om),m(El,Om,null),s(Zs,RP),s(Zs,Am),s(Am,IP),i(e,Cg,p),i(e,ya,p),s(ya,UP),s(ya,Tm),s(Tm,GP),s(ya,MP),i(e,xg,p),i(e,Ki,p),s(Ki,LP),i(e,Rg,p),i(e,ga,p),s(ga,ZP),s(ga,Sm),s(Sm,NP),s(ga,HP),i(e,Ig,p),i(e,Ji,p),i(e,Ug,p),i(e,Ns,p),s(Ns,ba),s(ba,Cm),m($l,Cm,null),s(Ns,BP),s(Ns,xm),s(xm,WP),i(e,Gg,p),i(e,Xe,p),s(Xe,FP),s(Xe,Rm),s(Rm,VP),s(Xe,YP),s(Xe,kl),s(kl,KP),s(Xe,JP),i(e,Mg,p),i(e,$,p),s($,XP),s($,Im),s(Im,QP),s($,ez),s($,Xi),s(Xi,sz),s($,tz),s($,Um),s(Um,az),s($,nz),s($,Gm),s(Gm,oz),s($,lz),s($,Mm),s(Mm,rz),s($,pz),s($,Lm),s(Lm,iz),s($,uz),s($,Zm),s(Zm,cz),s($,hz),s($,Nm),s(Nm,fz),s($,dz),i(e,Lg,p),i(e,Qe,p),s(Qe,mz),s(Qe,Hm),s(Hm,_z),s(Qe,vz),s(Qe,Bm),s(Bm,jz),s(Qe,wz),i(e,Zg,p),m(Pl,e,p),i(e,Ng,p),i(e,Qi,p),s(Qi,yz),i(e,Hg,p),i(e,re,p),s(re,zl),s(zl,Wm),s(Wm,gz),s(zl,bz),s(zl,Fm),s(Fm,qz),s(re,Ez),s(re,Dl),s(Dl,Vm),s(Vm,$z),s(Dl,kz),s(Dl,Ym),s(Ym,Pz),s(re,zz),s(re,Ol),s(Ol,Km),s(Km,Dz),s(Ol,Oz),s(Ol,Jm),s(Jm,Az),s(re,Tz),s(re,Al),s(Al,Xm),s(Xm,Sz),s(Al,Cz),s(Al,Qm),s(Qm,xz),i(e,Bg,p),i(e,eu,p),s(eu,Rz),i(e,Wg,p),i(e,su,p),s(su,Iz),i(e,Fg,p),m(Tl,e,p),i(e,Vg,p),i(e,qa,p),s(qa,Uz),s(qa,tu),s(tu,Gz),s(qa,Mz),i(e,Yg,p),i(e,au,p),s(au,Lz),i(e,Kg,p),m(Sl,e,p),i(e,Jg,p),i(e,pe,p),s(pe,Zz),s(pe,e_),s(e_,Nz),s(pe,Hz),s(pe,s_),s(s_,Bz),s(pe,Wz),s(pe,t_),s(t_,Fz),s(pe,Vz),i(e,Xg,p),i(e,nu,p),i(e,Qg,p),i(e,Hs,p),s(Hs,Ea),s(Ea,a_),m(Cl,a_,null),s(Hs,Yz),s(Hs,n_),s(n_,Kz),i(e,e2,p),i(e,L,p),s(L,Jz),s(L,o_),s(o_,Xz),s(L,Qz),s(L,l_),s(l_,eD),s(L,sD),s(L,r_),s(r_,tD),s(L,aD),s(L,p_),s(p_,nD),s(L,oD),s(L,xl),s(xl,lD),s(L,rD),i(e,s2,p),i(e,ou,p),s(ou,pD),i(e,t2,p),i(e,$a,p),s($a,Rl),s(Rl,i_),s(i_,iD),s(Rl,uD),s(Rl,u_),s(u_,cD),s($a,hD),s($a,es),s(es,c_),s(c_,fD),s(es,dD),s(es,h_),s(h_,mD),s(es,_D),s(es,f_),s(f_,vD),s(es,jD),i(e,a2,p),i(e,D,p),s(D,wD),s(D,d_),s(d_,yD),s(D,gD),s(D,lu),s(lu,bD),s(D,qD),s(D,m_),s(m_,ED),s(D,$D),s(D,__),s(__,kD),s(D,PD),s(D,v_),s(v_,zD),s(D,DD),s(D,j_),s(j_,OD),s(D,AD),i(e,n2,p),i(e,ss,p),s(ss,TD),s(ss,w_),s(w_,SD),s(ss,CD),s(ss,y_),s(y_,xD),s(ss,RD),i(e,o2,p),m(Il,e,p),i(e,l2,p),i(e,ts,p),s(ts,ID),s(ts,g_),s(g_,UD),s(ts,GD),s(ts,ru),s(ru,MD),s(ts,LD),i(e,r2,p),i(e,ie,p),s(ie,ka),s(ka,b_),s(b_,ZD),s(ka,ND),s(ka,q_),s(q_,HD),s(ka,BD),s(ie,WD),s(ie,Pa),s(Pa,E_),s(E_,FD),s(Pa,VD),s(Pa,$_),s($_,YD),s(Pa,KD),s(ie,JD),s(ie,as),s(as,k_),s(k_,XD),s(as,QD),s(as,P_),s(P_,eO),s(as,sO),s(as,z_),s(z_,tO),s(as,aO),s(ie,nO),s(ie,ns),s(ns,D_),s(D_,oO),s(ns,lO),s(ns,O_),s(O_,rO),s(ns,pO),s(ns,A_),s(A_,iO),s(ns,uO),i(e,p2,p),i(e,pu,p),s(pu,cO),i(e,i2,p),m(Ul,e,p),i(e,u2,p),i(e,za,p),s(za,hO),s(za,iu),s(iu,fO),s(za,dO),i(e,c2,p),i(e,Da,p),s(Da,mO),s(Da,T_),s(T_,_O),s(Da,vO),i(e,h2,p),m(Gl,e,p),i(e,f2,p),i(e,Y,p),s(Y,jO),s(Y,S_),s(S_,wO),s(Y,yO),s(Y,C_),s(C_,gO),s(Y,bO),s(Y,x_),s(x_,qO),s(Y,EO),s(Y,R_),s(R_,$O),s(Y,kO),i(e,d2,p),i(e,uu,p),i(e,m2,p),i(e,Bs,p),s(Bs,Oa),s(Oa,I_),m(Ml,I_,null),s(Bs,PO),s(Bs,U_),s(U_,zO),i(e,_2,p),i(e,cu,p),s(cu,DO),i(e,v2,p),i(e,Aa,p),s(Aa,OO),s(Aa,G_),s(G_,AO),s(Aa,TO),i(e,j2,p),m(Ll,e,p),i(e,w2,p),i(e,Ta,p),s(Ta,SO),s(Ta,Zl),s(Zl,CO),s(Ta,xO),i(e,y2,p),i(e,ue,p),s(ue,RO),s(ue,M_),s(M_,IO),s(ue,UO),s(ue,L_),s(L_,GO),s(ue,MO),s(ue,Z_),s(Z_,LO),s(ue,ZO),i(e,g2,p),i(e,hu,p),i(e,b2,p),i(e,Ws,p),s(Ws,Sa),s(Sa,N_),m(Nl,N_,null),s(Ws,NO),s(Ws,H_),s(H_,HO),i(e,q2,p),i(e,fu,p),s(fu,BO),i(e,E2,p),i(e,Fs,p),s(Fs,Ca),s(Ca,B_),m(Hl,B_,null),s(Fs,WO),s(Fs,W_),s(W_,FO),i(e,$2,p),i(e,du,p),s(du,VO),i(e,k2,p),m(Bl,e,p),i(e,P2,p),i(e,os,p),s(os,YO),s(os,mu),s(mu,KO),s(os,JO),s(os,F_),s(F_,XO),s(os,QO),i(e,z2,p),i(e,ls,p),s(ls,eA),s(ls,V_),s(V_,sA),s(ls,tA),s(ls,Y_),s(Y_,aA),s(ls,nA),i(e,D2,p),i(e,_u,p),s(_u,oA),i(e,O2,p),m(Wl,e,p),i(e,A2,p),i(e,xa,p),s(xa,lA),s(xa,vu),s(vu,rA),s(xa,pA),i(e,T2,p),i(e,Ra,p),s(Ra,iA),s(Ra,Fl),s(Fl,uA),s(Ra,cA),i(e,S2,p),i(e,Vs,p),s(Vs,Ia),s(Ia,K_),m(Vl,K_,null),s(Vs,hA),s(Vs,J_),s(J_,fA),i(e,C2,p),i(e,ju,p),s(ju,dA),i(e,x2,p),m(Yl,e,p),i(e,R2,p),i(e,wu,p),s(wu,mA),i(e,I2,p),i(e,rs,p),s(rs,_A),s(rs,X_),s(X_,vA),s(rs,jA),s(rs,Q_),s(Q_,wA),s(rs,yA),i(e,U2,p),i(e,yu,p),s(yu,gA),i(e,G2,p),m(Kl,e,p),i(e,M2,p),m(Ua,e,p),i(e,L2,p),i(e,Ys,p),s(Ys,Ga),s(Ga,e1),m(Jl,e1,null),s(Ys,bA),s(Ys,s1),s(s1,qA),i(e,Z2,p),i(e,gu,p),s(gu,EA),i(e,N2,p),m(Xl,e,p),i(e,H2,p),i(e,ce,p),s(ce,$A),s(ce,bu),s(bu,kA),s(ce,PA),s(ce,t1),s(t1,zA),s(ce,DA),s(ce,a1),s(a1,OA),s(ce,AA),i(e,B2,p),i(e,Ma,p),s(Ma,TA),s(Ma,n1),s(n1,SA),s(Ma,CA),i(e,W2,p),i(e,qu,p),s(qu,xA),i(e,F2,p),m(Ql,e,p),i(e,V2,p),i(e,La,p),s(La,RA),s(La,Eu),s(Eu,IA),s(La,UA),i(e,Y2,p),i(e,Za,p),s(Za,GA),s(Za,er),s(er,MA),s(Za,LA),i(e,K2,p),i(e,$u,p),i(e,J2,p),i(e,Ks,p),s(Ks,Na),s(Na,o1),m(sr,o1,null),s(Ks,ZA),s(Ks,l1),s(l1,NA),i(e,X2,p),i(e,ku,p),s(ku,HA),i(e,Q2,p),m(tr,e,p),i(e,eb,p),i(e,Z,p),s(Z,BA),s(Z,Pu),s(Pu,WA),s(Z,FA),s(Z,r1),s(r1,VA),s(Z,YA),s(Z,p1),s(p1,KA),s(Z,JA),s(Z,i1),s(i1,XA),s(Z,QA),s(Z,u1),s(u1,eT),s(Z,sT),i(e,sb,p),i(e,zu,p),s(zu,tT),i(e,tb,p),m(ar,e,p),i(e,ab,p),i(e,Ha,p),s(Ha,aT),s(Ha,Du),s(Du,nT),s(Ha,oT),i(e,nb,p),i(e,Ou,p),i(e,ob,p),i(e,Js,p),s(Js,Ba),s(Ba,c1),m(nr,c1,null),s(Js,lT),s(Js,h1),s(h1,rT),i(e,lb,p),i(e,Au,p),s(Au,pT),i(e,rb,p),m(or,e,p),i(e,pb,p),i(e,ps,p),s(ps,iT),s(ps,Tu),s(Tu,uT),s(ps,cT),s(ps,f1),s(f1,hT),s(ps,fT),i(e,ib,p),i(e,Su,p),s(Su,dT),i(e,ub,p),m(lr,e,p),i(e,cb,p),i(e,Wa,p),s(Wa,mT),s(Wa,Cu),s(Cu,_T),s(Wa,vT),i(e,hb,p),i(e,xu,p),i(e,fb,p),i(e,Xs,p),s(Xs,Fa),s(Fa,d1),m(rr,d1,null),s(Xs,jT),s(Xs,m1),s(m1,wT),i(e,db,p),i(e,Ru,p),s(Ru,yT),i(e,mb,p),m(pr,e,p),i(e,_b,p),i(e,is,p),s(is,gT),s(is,Iu),s(Iu,bT),s(is,qT),s(is,_1),s(_1,ET),s(is,$T),i(e,vb,p),i(e,Uu,p),s(Uu,kT),i(e,jb,p),m(ir,e,p),i(e,wb,p),i(e,Va,p),s(Va,PT),s(Va,Gu),s(Gu,zT),s(Va,DT),i(e,yb,p),i(e,Mu,p),i(e,gb,p),i(e,Qs,p),s(Qs,Ya),s(Ya,v1),m(ur,v1,null),s(Qs,OT),s(Qs,j1),s(j1,AT),i(e,bb,p),i(e,Ka,p),s(Ka,TT),s(Ka,w1),s(w1,ST),s(Ka,CT),i(e,qb,p),i(e,Lu,p),s(Lu,y1),s(y1,xT),i(e,Eb,p),i(e,Ja,p),s(Ja,RT),s(Ja,g1),s(g1,IT),s(Ja,UT),i(e,$b,p),i(e,O,p),s(O,GT),s(O,b1),s(b1,MT),s(O,LT),s(O,q1),s(q1,ZT),s(O,NT),s(O,E1),s(E1,HT),s(O,BT),s(O,$1),s($1,WT),s(O,FT),s(O,k1),s(k1,VT),s(O,YT),s(O,P1),s(P1,KT),s(O,JT),i(e,kb,p),m(cr,e,p),i(e,Pb,p),i(e,Zu,p),s(Zu,z1),s(z1,XT),i(e,zb,p),i(e,Xa,p),s(Xa,QT),s(Xa,hr),s(hr,eS),s(Xa,sS),i(e,Db,p),i(e,Nu,p),s(Nu,D1),s(D1,tS),i(e,Ob,p),i(e,Hu,p),s(Hu,aS),i(e,Ab,p),i(e,Bu,p),s(Bu,nS),i(e,Tb,p),m(fr,e,p),i(e,Sb,p),i(e,us,p),s(us,oS),s(us,O1),s(O1,lS),s(us,rS),s(us,A1),s(A1,pS),s(us,iS),i(e,Cb,p),m(dr,e,p),i(e,xb,p),m(Qa,e,p),i(e,Rb,p),i(e,en,p),s(en,uS),s(en,T1),s(T1,cS),s(en,hS),i(e,Ib,p),i(e,sn,p),s(sn,fS),s(sn,S1),s(S1,dS),s(sn,mS),i(e,Ub,p),m(mr,e,p),i(e,Gb,p),i(e,Wu,p),s(Wu,C1),s(C1,_S),i(e,Mb,p),i(e,cs,p),s(cs,vS),s(cs,x1),s(x1,jS),s(cs,wS),s(cs,R1),s(R1,yS),s(cs,gS),i(e,Lb,p),i(e,Fu,p),s(Fu,bS),i(e,Zb,p),m(_r,e,p),i(e,Nb,p),i(e,tn,p),s(tn,qS),s(tn,I1),s(I1,ES),s(tn,$S),i(e,Hb,p),m(vr,e,p),i(e,Bb,p),i(e,an,p),s(an,kS),s(an,U1),s(U1,PS),s(an,zS),i(e,Wb,p),i(e,Vu,p),s(Vu,DS),i(e,Fb,p),i(e,jr,p),s(jr,G1),s(G1,OS),s(jr,AS),i(e,Vb,p),i(e,hs,p),s(hs,TS),s(hs,M1),s(M1,SS),s(hs,CS),s(hs,L1),s(L1,xS),s(hs,RS),i(e,Yb,p),i(e,Yu,p),s(Yu,IS),i(e,Kb,p),i(e,et,p),s(et,nn),s(nn,Z1),m(wr,Z1,null),s(et,US),s(et,N1),s(N1,GS),i(e,Jb,p),i(e,Ku,p),s(Ku,MS),i(e,Xb,p),i(e,Ju,p),s(Ju,LS),i(e,Qb,p),i(e,Xu,p),s(Xu,ZS),i(e,e3,p),i(e,st,p),s(st,on),s(on,H1),m(yr,H1,null),s(st,NS),s(st,B1),s(B1,HS),i(e,s3,p),i(e,ln,p),s(ln,BS),s(ln,W1),s(W1,WS),s(ln,FS),i(e,t3,p),m(gr,e,p),i(e,a3,p),i(e,Qu,p),s(Qu,VS),i(e,n3,p),i(e,P,p),s(P,YS),s(P,F1),s(F1,KS),s(P,JS),s(P,V1),s(V1,XS),s(P,QS),s(P,Y1),s(Y1,eC),s(P,sC),s(P,ec),s(ec,tC),s(P,aC),s(P,sc),s(sc,nC),s(P,oC),s(P,K1),s(K1,lC),s(P,rC),s(P,J1),s(J1,pC),s(P,iC),i(e,o3,p),m(br,e,p),i(e,l3,p),i(e,rn,p),s(rn,uC),s(rn,X1),s(X1,cC),s(rn,hC),i(e,r3,p),i(e,tc,p),s(tc,fC),i(e,p3,p),i(e,pn,p),s(pn,dC),s(pn,qr),s(qr,mC),s(pn,_C),i(e,i3,p),i(e,he,p),s(he,vC),s(he,Q1),s(Q1,jC),s(he,wC),s(he,ev),s(ev,yC),s(he,gC),s(he,ac),s(ac,bC),s(he,qC),i(e,u3,p),i(e,tt,p),s(tt,un),s(un,sv),m(Er,sv,null),s(tt,EC),s(tt,tv),s(tv,$C),i(e,c3,p),i(e,$r,p),s($r,kC),s($r,kr),s(kr,PC),i(e,h3,p),i(e,cn,p),s(cn,zC),s(cn,av),s(av,DC),s(cn,OC),i(e,f3,p),i(e,nc,p),s(nc,AC),i(e,d3,p),m(Pr,e,p),i(e,m3,p),i(e,fs,p),s(fs,TC),s(fs,nv),s(nv,SC),s(fs,CC),s(fs,ov),s(ov,xC),s(fs,RC),i(e,_3,p),i(e,oc,p),i(e,v3,p),i(e,at,p),s(at,hn),s(hn,lv),m(zr,lv,null),s(at,IC),s(at,rv),s(rv,UC),i(e,j3,p),i(e,lc,p),s(lc,GC),i(e,w3,p),i(e,fn,p),s(fn,MC),s(fn,rc),s(rc,LC),s(fn,ZC),i(e,y3,p),m(Dr,e,p),i(e,g3,p),i(e,pc,p),s(pc,NC),i(e,b3,p),i(e,dn,p),s(dn,HC),s(dn,pv),s(pv,BC),s(dn,WC),i(e,q3,p),m(Or,e,p),i(e,E3,p),i(e,ic,p),s(ic,FC),i(e,$3,p),i(e,uc,p),s(uc,VC),i(e,k3,p),i(e,nt,p),s(nt,mn),s(mn,iv),m(Ar,iv,null),s(nt,YC),s(nt,uv),s(uv,KC),i(e,P3,p),i(e,cc,p),s(cc,JC),i(e,z3,p),i(e,hc,p),s(hc,XC),i(e,D3,p),m(Tr,e,p),i(e,O3,p),i(e,fc,p),s(fc,QC),i(e,A3,p),i(e,dc,p),s(dc,ex),i(e,T3,p),i(e,mc,p),s(mc,sx),i(e,S3,p),i(e,_c,p),s(_c,tx),i(e,C3,p),m(Sr,e,p),i(e,x3,p),i(e,vc,p),s(vc,ax),i(e,R3,p),i(e,_n,p),s(_n,nx),s(_n,Cr),s(Cr,ox),s(_n,lx),i(e,I3,p),i(e,ot,p),s(ot,vn),s(vn,cv),m(xr,cv,null),s(ot,rx),s(ot,hv),s(hv,px),i(e,U3,p),i(e,jc,p),s(jc,ix),i(e,G3,p),i(e,wc,p),s(wc,ux),i(e,M3,p),i(e,N,p),s(N,fv),s(fv,dv),s(dv,cx),s(N,hx),s(N,mv),s(mv,Oe),s(Oe,fx),s(Oe,yc),s(yc,dx),s(Oe,mx),s(Oe,gc),s(gc,_x),s(Oe,vx),s(Oe,bc),s(bc,jx),s(Oe,wx),s(N,yx),s(N,Rr),s(Rr,_v),s(_v,gx),s(Rr,bx),m(Ir,Rr,null),s(N,qx),s(N,vv),s(vv,Ur),s(Ur,Ex),s(Ur,Gr),s(Gr,$x),s(Ur,kx),s(N,Px),s(N,jv),s(jv,wv),s(wv,zx),s(N,Dx),s(N,yv),s(yv,Mr),s(Mr,Ox),s(Mr,Lr),s(Lr,Ax),s(Mr,Tx),i(e,L3,p),i(e,qc,p),s(qc,Sx),i(e,Z3,p),i(e,jn,p),s(jn,lt),s(lt,gv),s(gv,Cx),s(lt,xx),s(lt,bv),s(bv,Rx),s(lt,Ix),s(lt,qv),s(qv,Ux),s(jn,Gx),s(jn,Ev),s(Ev,Zr),s(Zr,Mx),s(Zr,Nr),s(Nr,Lx),s(Zr,Zx),i(e,N3,p),i(e,rt,p),s(rt,wn),s(wn,$v),m(Hr,$v,null),s(rt,Nx),s(rt,kv),s(kv,Hx),i(e,H3,p),i(e,pt,p),s(pt,yn),s(yn,Pv),m(Br,Pv,null),s(pt,Bx),s(pt,Wr),s(Wr,Wx),s(Wr,zv),s(zv,Fx),s(Wr,Vx),i(e,B3,p),i(e,H,p),s(H,Yx),s(H,Dv),s(Dv,Kx),s(H,Jx),s(H,Ov),s(Ov,Xx),s(H,Qx),s(H,Av),s(Av,eR),s(H,sR),s(H,Tv),s(Tv,tR),s(H,aR),s(H,Fr),s(Fr,nR),s(H,oR),i(e,W3,p),i(e,it,p),s(it,gn),s(gn,Sv),m(Vr,Sv,null),s(it,lR),s(it,Ec),s(Ec,rR),s(Ec,Cv),s(Cv,pR),i(e,F3,p),i(e,$c,p),s($c,iR),i(e,V3,p),i(e,kc,p),s(kc,uR),i(e,Y3,p),m(Yr,e,p),i(e,K3,p),i(e,bn,p),s(bn,cR),s(bn,xv),s(xv,hR),s(bn,fR),i(e,J3,p),m(Kr,e,p),i(e,X3,p),i(e,Pc,p),s(Pc,dR),i(e,Q3,p),i(e,zc,p),s(zc,mR),i(e,e0,p),i(e,ds,p),s(ds,_R),s(ds,Rv),s(Rv,vR),s(ds,jR),s(ds,Iv),s(Iv,wR),s(ds,yR),i(e,s0,p),i(e,ut,p),s(ut,qn),s(qn,Uv),m(Jr,Uv,null),s(ut,gR),s(ut,Gv),s(Gv,bR),i(e,t0,p),i(e,ms,p),s(ms,ct),s(ct,qR),s(ct,Dc),s(Dc,ER),s(ct,$R),s(ct,Mv),s(Mv,kR),s(ct,PR),s(ms,zR),s(ms,Xr),s(Xr,DR),s(Xr,Qr),s(Qr,OR),s(Xr,AR),s(ms,TR),s(ms,ht),s(ht,SR),s(ht,Oc),s(Oc,CR),s(ht,xR),s(ht,ep),s(ep,RR),s(ht,IR),i(e,a0,p),i(e,ft,p),s(ft,En),s(En,Lv),m(sp,Lv,null),s(ft,UR),s(ft,Zv),s(Zv,GR),i(e,n0,p),i(e,fe,p),s(fe,MR),s(fe,Ac),s(Ac,LR),s(fe,ZR),s(fe,Tc),s(Tc,NR),s(fe,HR),s(fe,Nv),s(Nv,BR),s(fe,WR),i(e,o0,p),i(e,$n,p),s($n,FR),s($n,Sc),s(Sc,VR),s($n,YR),i(e,l0,p),i(e,_s,p),s(_s,KR),s(_s,Cc),s(Cc,JR),s(_s,XR),s(_s,xc),s(xc,QR),s(_s,eI),i(e,r0,p),i(e,kn,p),s(kn,sI),s(kn,Hv),s(Hv,tI),s(kn,aI),i(e,p0,p),i(e,Rc,p),s(Rc,nI),i(e,i0,p),m(tp,e,p),i(e,u0,p),i(e,Ic,p),s(Ic,oI),i(e,c0,p),m(ap,e,p),i(e,h0,p),i(e,de,p),s(de,lI),s(de,Uc),s(Uc,rI),s(de,pI),s(de,np),s(np,iI),s(de,uI),s(de,Bv),s(Bv,cI),s(de,hI),i(e,f0,p),i(e,dt,p),s(dt,Pn),s(Pn,Wv),m(op,Wv,null),s(dt,fI),s(dt,Fv),s(Fv,dI),i(e,d0,p),i(e,ee,p),m(lp,ee,null),s(ee,mI),s(ee,Vv),s(Vv,_I),s(ee,vI),s(ee,Ae),s(Ae,jI),s(Ae,Yv),s(Yv,wI),s(Ae,yI),s(Ae,Kv),s(Kv,gI),s(Ae,bI),s(Ae,Jv),s(Jv,qI),s(Ae,EI),s(ee,$I),s(ee,me),s(me,Gc),s(Gc,kI),s(me,PI),s(me,Xv),s(Xv,zI),s(me,DI),s(me,Mc),s(Mc,OI),s(me,AI),s(me,Qv),s(Qv,TI),s(me,SI),i(e,m0,p),i(e,mt,p),s(mt,zn),s(zn,ej),m(rp,ej,null),s(mt,CI),s(mt,sj),s(sj,xI),i(e,_0,p),i(e,Dn,p),s(Dn,RI),s(Dn,Lc),s(Lc,II),s(Dn,UI),i(e,v0,p),i(e,Zc,p),s(Zc,GI),i(e,j0,p),i(e,Nc,p),s(Nc,MI),i(e,w0,p),i(e,Hc,p),s(Hc,LI),i(e,y0,p),i(e,On,p),s(On,tj),s(tj,ZI),s(On,NI),s(On,aj),s(aj,HI),i(e,g0,p),m(pp,e,p),i(e,b0,p),i(e,An,p),s(An,BI),s(An,nj),s(nj,WI),s(An,FI),i(e,q0,p),m(ip,e,p),i(e,E0,p),i(e,Bc,p),s(Bc,VI),i(e,$0,p),i(e,_t,p),s(_t,Tn),s(Tn,oj),m(up,oj,null),s(_t,YI),s(_t,lj),s(lj,KI),i(e,k0,p),i(e,_e,p),s(_e,rj),s(rj,cp),s(cp,JI),s(_e,XI),s(_e,pj),s(pj,hp),s(hp,QI),s(_e,eU),s(_e,ij),s(ij,fp),s(fp,sU),s(_e,tU),s(_e,uj),s(uj,dp),s(dp,aU),i(e,P0,p),i(e,Wc,p),s(Wc,nU),i(e,z0,p),i(e,vs,p),s(vs,cj),s(cj,mp),s(mp,oU),s(vs,lU),s(vs,hj),s(hj,_p),s(_p,rU),s(vs,pU),s(vs,fj),s(fj,vp),s(vp,iU),i(e,D0,p),i(e,js,p),s(js,uU),s(js,Fc),s(Fc,cU),s(js,hU),s(js,jp),s(jp,fU),s(js,dU),O0=!0},p(e,[p]){const wp={};p&2&&(wp.$$scope={dirty:p,ctx:e}),Xt.$set(wp);const dj={};p&2&&(dj.$$scope={dirty:p,ctx:e}),Ua.$set(dj);const mj={};p&2&&(mj.$$scope={dirty:p,ctx:e}),Qa.$set(mj)},i(e){O0||(_(z.$$.fragment,e),_(so.$$.fragment,e),_(to.$$.fragment,e),_(ao.$$.fragment,e),_(no.$$.fragment,e),_(ro.$$.fragment,e),_(po.$$.fragment,e),_(io.$$.fragment,e),_(co.$$.fragment,e),_(ho.$$.fragment,e),_(fo.$$.fragment,e),_(_o.$$.fragment,e),_(jo.$$.fragment,e),_(wo.$$.fragment,e),_(go.$$.fragment,e),_(qo.$$.fragment,e),_(Eo.$$.fragment,e),_(ko.$$.fragment,e),_(Do.$$.fragment,e),_(Oo.$$.fragment,e),_(Ao.$$.fragment,e),_(To.$$.fragment,e),_(So.$$.fragment,e),_(Co.$$.fragment,e),_(xo.$$.fragment,e),_(Uo.$$.fragment,e),_(Go.$$.fragment,e),_(Lo.$$.fragment,e),_(Zo.$$.fragment,e),_(No.$$.fragment,e),_(Ho.$$.fragment,e),_(Bo.$$.fragment,e),_(Xt.$$.fragment,e),_(Fo.$$.fragment,e),_(Jo.$$.fragment,e),_(Xo.$$.fragment,e),_(Qo.$$.fragment,e),_(el.$$.fragment,e),_(sl.$$.fragment,e),_(cl.$$.fragment,e),_(hl.$$.fragment,e),_(_l.$$.fragment,e),_(jl.$$.fragment,e),_(wl.$$.fragment,e),_(yl.$$.fragment,e),_(gl.$$.fragment,e),_(bl.$$.fragment,e),_(ql.$$.fragment,e),_(El.$$.fragment,e),_($l.$$.fragment,e),_(Pl.$$.fragment,e),_(Tl.$$.fragment,e),_(Sl.$$.fragment,e),_(Cl.$$.fragment,e),_(Il.$$.fragment,e),_(Ul.$$.fragment,e),_(Gl.$$.fragment,e),_(Ml.$$.fragment,e),_(Ll.$$.fragment,e),_(Nl.$$.fragment,e),_(Hl.$$.fragment,e),_(Bl.$$.fragment,e),_(Wl.$$.fragment,e),_(Vl.$$.fragment,e),_(Yl.$$.fragment,e),_(Kl.$$.fragment,e),_(Ua.$$.fragment,e),_(Jl.$$.fragment,e),_(Xl.$$.fragment,e),_(Ql.$$.fragment,e),_(sr.$$.fragment,e),_(tr.$$.fragment,e),_(ar.$$.fragment,e),_(nr.$$.fragment,e),_(or.$$.fragment,e),_(lr.$$.fragment,e),_(rr.$$.fragment,e),_(pr.$$.fragment,e),_(ir.$$.fragment,e),_(ur.$$.fragment,e),_(cr.$$.fragment,e),_(fr.$$.fragment,e),_(dr.$$.fragment,e),_(Qa.$$.fragment,e),_(mr.$$.fragment,e),_(_r.$$.fragment,e),_(vr.$$.fragment,e),_(wr.$$.fragment,e),_(yr.$$.fragment,e),_(gr.$$.fragment,e),_(br.$$.fragment,e),_(Er.$$.fragment,e),_(Pr.$$.fragment,e),_(zr.$$.fragment,e),_(Dr.$$.fragment,e),_(Or.$$.fragment,e),_(Ar.$$.fragment,e),_(Tr.$$.fragment,e),_(Sr.$$.fragment,e),_(xr.$$.fragment,e),_(Ir.$$.fragment,e),_(Hr.$$.fragment,e),_(Br.$$.fragment,e),_(Vr.$$.fragment,e),_(Yr.$$.fragment,e),_(Kr.$$.fragment,e),_(Jr.$$.fragment,e),_(sp.$$.fragment,e),_(tp.$$.fragment,e),_(ap.$$.fragment,e),_(op.$$.fragment,e),_(lp.$$.fragment,e),_(rp.$$.fragment,e),_(pp.$$.fragment,e),_(ip.$$.fragment,e),_(up.$$.fragment,e),O0=!0)},o(e){v(z.$$.fragment,e),v(so.$$.fragment,e),v(to.$$.fragment,e),v(ao.$$.fragment,e),v(no.$$.fragment,e),v(ro.$$.fragment,e),v(po.$$.fragment,e),v(io.$$.fragment,e),v(co.$$.fragment,e),v(ho.$$.fragment,e),v(fo.$$.fragment,e),v(_o.$$.fragment,e),v(jo.$$.fragment,e),v(wo.$$.fragment,e),v(go.$$.fragment,e),v(qo.$$.fragment,e),v(Eo.$$.fragment,e),v(ko.$$.fragment,e),v(Do.$$.fragment,e),v(Oo.$$.fragment,e),v(Ao.$$.fragment,e),v(To.$$.fragment,e),v(So.$$.fragment,e),v(Co.$$.fragment,e),v(xo.$$.fragment,e),v(Uo.$$.fragment,e),v(Go.$$.fragment,e),v(Lo.$$.fragment,e),v(Zo.$$.fragment,e),v(No.$$.fragment,e),v(Ho.$$.fragment,e),v(Bo.$$.fragment,e),v(Xt.$$.fragment,e),v(Fo.$$.fragment,e),v(Jo.$$.fragment,e),v(Xo.$$.fragment,e),v(Qo.$$.fragment,e),v(el.$$.fragment,e),v(sl.$$.fragment,e),v(cl.$$.fragment,e),v(hl.$$.fragment,e),v(_l.$$.fragment,e),v(jl.$$.fragment,e),v(wl.$$.fragment,e),v(yl.$$.fragment,e),v(gl.$$.fragment,e),v(bl.$$.fragment,e),v(ql.$$.fragment,e),v(El.$$.fragment,e),v($l.$$.fragment,e),v(Pl.$$.fragment,e),v(Tl.$$.fragment,e),v(Sl.$$.fragment,e),v(Cl.$$.fragment,e),v(Il.$$.fragment,e),v(Ul.$$.fragment,e),v(Gl.$$.fragment,e),v(Ml.$$.fragment,e),v(Ll.$$.fragment,e),v(Nl.$$.fragment,e),v(Hl.$$.fragment,e),v(Bl.$$.fragment,e),v(Wl.$$.fragment,e),v(Vl.$$.fragment,e),v(Yl.$$.fragment,e),v(Kl.$$.fragment,e),v(Ua.$$.fragment,e),v(Jl.$$.fragment,e),v(Xl.$$.fragment,e),v(Ql.$$.fragment,e),v(sr.$$.fragment,e),v(tr.$$.fragment,e),v(ar.$$.fragment,e),v(nr.$$.fragment,e),v(or.$$.fragment,e),v(lr.$$.fragment,e),v(rr.$$.fragment,e),v(pr.$$.fragment,e),v(ir.$$.fragment,e),v(ur.$$.fragment,e),v(cr.$$.fragment,e),v(fr.$$.fragment,e),v(dr.$$.fragment,e),v(Qa.$$.fragment,e),v(mr.$$.fragment,e),v(_r.$$.fragment,e),v(vr.$$.fragment,e),v(wr.$$.fragment,e),v(yr.$$.fragment,e),v(gr.$$.fragment,e),v(br.$$.fragment,e),v(Er.$$.fragment,e),v(Pr.$$.fragment,e),v(zr.$$.fragment,e),v(Dr.$$.fragment,e),v(Or.$$.fragment,e),v(Ar.$$.fragment,e),v(Tr.$$.fragment,e),v(Sr.$$.fragment,e),v(xr.$$.fragment,e),v(Ir.$$.fragment,e),v(Hr.$$.fragment,e),v(Br.$$.fragment,e),v(Vr.$$.fragment,e),v(Yr.$$.fragment,e),v(Kr.$$.fragment,e),v(Jr.$$.fragment,e),v(sp.$$.fragment,e),v(tp.$$.fragment,e),v(ap.$$.fragment,e),v(op.$$.fragment,e),v(lp.$$.fragment,e),v(rp.$$.fragment,e),v(pp.$$.fragment,e),v(ip.$$.fragment,e),v(up.$$.fragment,e),O0=!1},d(e){t(g),e&&t(S),e&&t(b),j(z),e&&t(te),e&&t(T),e&&t(wj),e&&t(R),e&&t(yj),e&&t(Te),e&&t(gj),e&&t(kp),e&&t(bj),e&&t(Pp),e&&t(qj),e&&t(wt),e&&t(Ej),e&&t(yt),e&&t($j),e&&t(Ap),e&&t(kj),e&&t(Tp),e&&t(Pj),e&&t(Sp),e&&t(zj),e&&t(Cp),e&&t(Dj),e&&t(xp),e&&t(Oj),e&&t(Ip),e&&t(Aj),e&&t(Up),e&&t(Tj),e&&t(qs),j(so),e&&t(Sj),e&&t(Gp),e&&t(Cj),e&&t(Es),j(to),e&&t(xj),e&&t(Mp),e&&t(Rj),j(ao,e),e&&t(Ij),e&&t(Se),e&&t(Uj),j(no,e),e&&t(Gj),e&&t(Ce),e&&t(Mj),e&&t(qt),e&&t(Lj),e&&t(Zp),e&&t(Zj),e&&t(Np),e&&t(Nj),j(ro,e),e&&t(Hj),e&&t(xe),e&&t(Bj),e&&t(Et),e&&t(Wj),j(po,e),e&&t(Fj),e&&t(ke),e&&t(Vj),e&&t(Hp),e&&t(Yj),j(io,e),e&&t(Kj),e&&t(Re),e&&t(Jj),e&&t($t),e&&t(Xj),e&&t(Ie),e&&t(Qj),e&&t(Bp),e&&t(ew),j(co,e),e&&t(sw),e&&t(Wp),e&&t(tw),j(ho,e),e&&t(aw),e&&t(Fp),e&&t(nw),j(fo,e),e&&t(ow),e&&t(kt),e&&t(lw),e&&t(Pt),e&&t(rw),e&&t(zt),e&&t(pw),e&&t(Vp),e&&t(iw),e&&t($s),j(_o),e&&t(uw),e&&t(Ot),e&&t(cw),e&&t(At),e&&t(hw),e&&t(Kp),e&&t(fw),j(jo,e),e&&t(dw),e&&t(Jp),e&&t(mw),j(wo,e),e&&t(_w),e&&t(I),e&&t(vw),e&&t(ae),e&&t(jw),e&&t(Tt),e&&t(ww),j(go,e),e&&t(yw),e&&t(St),e&&t(gw),e&&t(Ct),e&&t(bw),e&&t(Xp),e&&t(qw),e&&t(Ps),j(qo),e&&t(Ew),e&&t(Rt),e&&t($w),j(Eo,e),e&&t(kw),e&&t(Ue),e&&t(Pw),e&&t(ei),e&&t(zw),e&&t(It),e&&t(Dw),e&&t(si),e&&t(Ow),j(ko,e),e&&t(Aw),e&&t(ti),e&&t(Tw),e&&t(Ut),e&&t(Sw),e&&t(ai),e&&t(Cw),e&&t(ni),e&&t(xw),e&&t(oi),j(Do),e&&t(Rw),e&&t(li),e&&t(Iw),e&&t(Ds),j(Oo),e&&t(Uw),e&&t(Mt),e&&t(Gw),e&&t(ri),e&&t(Mw),j(Ao,e),e&&t(Lw),e&&t(Lt),e&&t(Zw),e&&t(pi),e&&t(Nw),e&&t(ii),e&&t(Hw),j(To,e),e&&t(Bw),e&&t(Ge),e&&t(Ww),j(So,e),e&&t(Fw),e&&t(Zt),e&&t(Vw),j(Co,e),e&&t(Yw),e&&t(ui),e&&t(Kw),e&&t(Nt),e&&t(Jw),e&&t(ci),e&&t(Xw),e&&t(Os),j(xo),e&&t(Qw),e&&t(Bt),e&&t(ey),e&&t(Wt),e&&t(sy),j(Uo,e),e&&t(ty),e&&t(Ft),e&&t(ay),j(Go,e),e&&t(ny),e&&t(Vt),e&&t(oy),e&&t(hi),e&&t(ly),e&&t(ne),e&&t(ry),j(Lo,e),e&&t(py),e&&t(Yt),e&&t(iy),e&&t(di),e&&t(uy),e&&t(As),j(Zo),e&&t(cy),e&&t(U),e&&t(hy),e&&t(ji),e&&t(fy),j(No,e),e&&t(dy),e&&t(wi),e&&t(my),j(Ho,e),e&&t(_y),e&&t(yi),e&&t(vy),e&&t(Ts),j(Bo),e&&t(jy),j(Xt,e),e&&t(wy),e&&t(Me),e&&t(yy),e&&t(Qt),e&&t(gy),e&&t(Le),e&&t(by),e&&t($i),e&&t(qy),e&&t(ea),e&&t(Ey),e&&t(sa),e&&t($y),e&&t(oe),e&&t(ky),e&&t(Di),e&&t(Py),e&&t(Ss),j(Fo),e&&t(zy),e&&t(Vo),e&&t(Dy),e&&t(Ze),e&&t(Oy),e&&t(aa),e&&t(Ay),e&&t(Ai),e&&t(Ty),e&&t(Ti),e&&t(Sy),e&&t(Cs),j(Jo),e&&t(Cy),e&&t(Si),e&&t(xy),j(Xo,e),e&&t(Ry),e&&t(Ci),e&&t(Iy),e&&t(Ne),e&&t(Uy),e&&t(He),e&&t(Gy),j(Qo,e),e&&t(My),e&&t(xi),e&&t(Ly),e&&t(Ri),e&&t(Zy),e&&t(Rs),j(el),e&&t(Ny),e&&t(Ii),e&&t(Hy),j(sl,e),e&&t(By),e&&t(V),e&&t(Wy),e&&t(Be),e&&t(Fy),e&&t(Ui),e&&t(Vy),e&&t(la),e&&t(Yy),e&&t(M),e&&t(Ky),e&&t(ze),e&&t(Jy),e&&t(Gi),e&&t(Xy),e&&t(We),e&&t(Qy),e&&t(Fe),e&&t(eg),e&&t(rl),e&&t(sg),e&&t(le),e&&t(tg),e&&t(Li),e&&t(ag),e&&t(De),e&&t(ng),e&&t(Ve),e&&t(og),e&&t(ra),e&&t(lg),e&&t(Zi),e&&t(rg),e&&t(Is),j(cl),e&&t(pg),e&&t(Ni),e&&t(ig),e&&t(Hi),e&&t(ug),j(hl,e),e&&t(cg),e&&t(ia),e&&t(hg),e&&t(Ye),e&&t(fg),e&&t(ua),e&&t(dg),e&&t(Ke),e&&t(mg),e&&t(Bi),e&&t(_g),e&&t(Us),j(_l),e&&t(vg),e&&t(Wi),e&&t(jg),e&&t(Fi),e&&t(wg),e&&t(ha),e&&t(yg),e&&t(Je),e&&t(gg),e&&t(Vi),e&&t(bg),e&&t(Ms),j(jl),e&&t(qg),e&&t(da),e&&t(Eg),j(wl,e),e&&t($g),e&&t(ma),e&&t(kg),j(yl,e),e&&t(Pg),e&&t(Yi),e&&t(zg),e&&t(Ls),j(gl),e&&t(Dg),e&&t(va),e&&t(Og),j(bl,e),e&&t(Ag),e&&t(ja),e&&t(Tg),j(ql,e),e&&t(Sg),e&&t(Zs),j(El),e&&t(Cg),e&&t(ya),e&&t(xg),e&&t(Ki),e&&t(Rg),e&&t(ga),e&&t(Ig),e&&t(Ji),e&&t(Ug),e&&t(Ns),j($l),e&&t(Gg),e&&t(Xe),e&&t(Mg),e&&t($),e&&t(Lg),e&&t(Qe),e&&t(Zg),j(Pl,e),e&&t(Ng),e&&t(Qi),e&&t(Hg),e&&t(re),e&&t(Bg),e&&t(eu),e&&t(Wg),e&&t(su),e&&t(Fg),j(Tl,e),e&&t(Vg),e&&t(qa),e&&t(Yg),e&&t(au),e&&t(Kg),j(Sl,e),e&&t(Jg),e&&t(pe),e&&t(Xg),e&&t(nu),e&&t(Qg),e&&t(Hs),j(Cl),e&&t(e2),e&&t(L),e&&t(s2),e&&t(ou),e&&t(t2),e&&t($a),e&&t(a2),e&&t(D),e&&t(n2),e&&t(ss),e&&t(o2),j(Il,e),e&&t(l2),e&&t(ts),e&&t(r2),e&&t(ie),e&&t(p2),e&&t(pu),e&&t(i2),j(Ul,e),e&&t(u2),e&&t(za),e&&t(c2),e&&t(Da),e&&t(h2),j(Gl,e),e&&t(f2),e&&t(Y),e&&t(d2),e&&t(uu),e&&t(m2),e&&t(Bs),j(Ml),e&&t(_2),e&&t(cu),e&&t(v2),e&&t(Aa),e&&t(j2),j(Ll,e),e&&t(w2),e&&t(Ta),e&&t(y2),e&&t(ue),e&&t(g2),e&&t(hu),e&&t(b2),e&&t(Ws),j(Nl),e&&t(q2),e&&t(fu),e&&t(E2),e&&t(Fs),j(Hl),e&&t($2),e&&t(du),e&&t(k2),j(Bl,e),e&&t(P2),e&&t(os),e&&t(z2),e&&t(ls),e&&t(D2),e&&t(_u),e&&t(O2),j(Wl,e),e&&t(A2),e&&t(xa),e&&t(T2),e&&t(Ra),e&&t(S2),e&&t(Vs),j(Vl),e&&t(C2),e&&t(ju),e&&t(x2),j(Yl,e),e&&t(R2),e&&t(wu),e&&t(I2),e&&t(rs),e&&t(U2),e&&t(yu),e&&t(G2),j(Kl,e),e&&t(M2),j(Ua,e),e&&t(L2),e&&t(Ys),j(Jl),e&&t(Z2),e&&t(gu),e&&t(N2),j(Xl,e),e&&t(H2),e&&t(ce),e&&t(B2),e&&t(Ma),e&&t(W2),e&&t(qu),e&&t(F2),j(Ql,e),e&&t(V2),e&&t(La),e&&t(Y2),e&&t(Za),e&&t(K2),e&&t($u),e&&t(J2),e&&t(Ks),j(sr),e&&t(X2),e&&t(ku),e&&t(Q2),j(tr,e),e&&t(eb),e&&t(Z),e&&t(sb),e&&t(zu),e&&t(tb),j(ar,e),e&&t(ab),e&&t(Ha),e&&t(nb),e&&t(Ou),e&&t(ob),e&&t(Js),j(nr),e&&t(lb),e&&t(Au),e&&t(rb),j(or,e),e&&t(pb),e&&t(ps),e&&t(ib),e&&t(Su),e&&t(ub),j(lr,e),e&&t(cb),e&&t(Wa),e&&t(hb),e&&t(xu),e&&t(fb),e&&t(Xs),j(rr),e&&t(db),e&&t(Ru),e&&t(mb),j(pr,e),e&&t(_b),e&&t(is),e&&t(vb),e&&t(Uu),e&&t(jb),j(ir,e),e&&t(wb),e&&t(Va),e&&t(yb),e&&t(Mu),e&&t(gb),e&&t(Qs),j(ur),e&&t(bb),e&&t(Ka),e&&t(qb),e&&t(Lu),e&&t(Eb),e&&t(Ja),e&&t($b),e&&t(O),e&&t(kb),j(cr,e),e&&t(Pb),e&&t(Zu),e&&t(zb),e&&t(Xa),e&&t(Db),e&&t(Nu),e&&t(Ob),e&&t(Hu),e&&t(Ab),e&&t(Bu),e&&t(Tb),j(fr,e),e&&t(Sb),e&&t(us),e&&t(Cb),j(dr,e),e&&t(xb),j(Qa,e),e&&t(Rb),e&&t(en),e&&t(Ib),e&&t(sn),e&&t(Ub),j(mr,e),e&&t(Gb),e&&t(Wu),e&&t(Mb),e&&t(cs),e&&t(Lb),e&&t(Fu),e&&t(Zb),j(_r,e),e&&t(Nb),e&&t(tn),e&&t(Hb),j(vr,e),e&&t(Bb),e&&t(an),e&&t(Wb),e&&t(Vu),e&&t(Fb),e&&t(jr),e&&t(Vb),e&&t(hs),e&&t(Yb),e&&t(Yu),e&&t(Kb),e&&t(et),j(wr),e&&t(Jb),e&&t(Ku),e&&t(Xb),e&&t(Ju),e&&t(Qb),e&&t(Xu),e&&t(e3),e&&t(st),j(yr),e&&t(s3),e&&t(ln),e&&t(t3),j(gr,e),e&&t(a3),e&&t(Qu),e&&t(n3),e&&t(P),e&&t(o3),j(br,e),e&&t(l3),e&&t(rn),e&&t(r3),e&&t(tc),e&&t(p3),e&&t(pn),e&&t(i3),e&&t(he),e&&t(u3),e&&t(tt),j(Er),e&&t(c3),e&&t($r),e&&t(h3),e&&t(cn),e&&t(f3),e&&t(nc),e&&t(d3),j(Pr,e),e&&t(m3),e&&t(fs),e&&t(_3),e&&t(oc),e&&t(v3),e&&t(at),j(zr),e&&t(j3),e&&t(lc),e&&t(w3),e&&t(fn),e&&t(y3),j(Dr,e),e&&t(g3),e&&t(pc),e&&t(b3),e&&t(dn),e&&t(q3),j(Or,e),e&&t(E3),e&&t(ic),e&&t($3),e&&t(uc),e&&t(k3),e&&t(nt),j(Ar),e&&t(P3),e&&t(cc),e&&t(z3),e&&t(hc),e&&t(D3),j(Tr,e),e&&t(O3),e&&t(fc),e&&t(A3),e&&t(dc),e&&t(T3),e&&t(mc),e&&t(S3),e&&t(_c),e&&t(C3),j(Sr,e),e&&t(x3),e&&t(vc),e&&t(R3),e&&t(_n),e&&t(I3),e&&t(ot),j(xr),e&&t(U3),e&&t(jc),e&&t(G3),e&&t(wc),e&&t(M3),e&&t(N),j(Ir),e&&t(L3),e&&t(qc),e&&t(Z3),e&&t(jn),e&&t(N3),e&&t(rt),j(Hr),e&&t(H3),e&&t(pt),j(Br),e&&t(B3),e&&t(H),e&&t(W3),e&&t(it),j(Vr),e&&t(F3),e&&t($c),e&&t(V3),e&&t(kc),e&&t(Y3),j(Yr,e),e&&t(K3),e&&t(bn),e&&t(J3),j(Kr,e),e&&t(X3),e&&t(Pc),e&&t(Q3),e&&t(zc),e&&t(e0),e&&t(ds),e&&t(s0),e&&t(ut),j(Jr),e&&t(t0),e&&t(ms),e&&t(a0),e&&t(ft),j(sp),e&&t(n0),e&&t(fe),e&&t(o0),e&&t($n),e&&t(l0),e&&t(_s),e&&t(r0),e&&t(kn),e&&t(p0),e&&t(Rc),e&&t(i0),j(tp,e),e&&t(u0),e&&t(Ic),e&&t(c0),j(ap,e),e&&t(h0),e&&t(de),e&&t(f0),e&&t(dt),j(op),e&&t(d0),e&&t(ee),j(lp),e&&t(m0),e&&t(mt),j(rp),e&&t(_0),e&&t(Dn),e&&t(v0),e&&t(Zc),e&&t(j0),e&&t(Nc),e&&t(w0),e&&t(Hc),e&&t(y0),e&&t(On),e&&t(g0),j(pp,e),e&&t(b0),e&&t(An),e&&t(q0),j(ip,e),e&&t(E0),e&&t(Bc),e&&t($0),e&&t(_t),j(up),e&&t(k0),e&&t(_e),e&&t(P0),e&&t(Wc),e&&t(z0),e&&t(vs),e&&t(D0),e&&t(js)}}}const RY={local:"deepspeed-integration",sections:[{local:"trainer-deepspeed-integration",sections:[{local:"installation",title:"Installation"},{local:"deployment-with-multiple-gpus",title:"Deployment with multiple GPUs"},{local:"deployment-with-one-gpu",title:"Deployment with one GPU"},{local:"deployment-in-notebooks",title:"Deployment in Notebooks"},{local:"configuration",title:"Configuration"},{local:"passing-configuration",title:"Passing Configuration"},{local:"shared-configuration",title:"Shared Configuration"},{local:"zero",sections:[{local:"zero2-config",title:"ZeRO-2 Config"},{local:"zero3-config",title:"ZeRO-3 Config"}],title:"ZeRO"},{local:"nvme-support",sections:[{local:"zero2-vs-zero3-performance",title:"ZeRO-2 vs ZeRO-3 Performance"},{local:"zero2-example",title:"ZeRO-2 Example"},{local:"zero3-example",title:"ZeRO-3 Example"}],title:"NVMe Support"},{local:"optimizer-and-scheduler",sections:[{local:"optimizer",title:"Optimizer"},{local:"scheduler",title:"Scheduler"}],title:"Optimizer and Scheduler"},{local:"fp32-precision",title:"fp32 Precision"},{local:"automatic-mixed-precision",title:"Automatic Mixed Precision"},{local:"fp16",title:"fp16"},{local:"bf16",title:"bf16"},{local:"apex",title:"apex"},{local:"batch-size",title:"Batch Size"},{local:"gradient-accumulation",title:"Gradient Accumulation"},{local:"gradient-clipping",title:"Gradient Clipping"},{local:"getting-the-model-weights-out",title:"Getting The Model Weights Out"},{local:"zero3-and-infinity-nuances",sections:[{local:"constructing-massive-models",title:"Constructing Massive Models"},{local:"gathering-parameters",title:"Gathering Parameters"}],title:"ZeRO-3 and Infinity Nuances"},{local:"zero-inference",title:"ZeRO Inference"},{local:"memory-requirements",title:"Memory Requirements"},{local:"filing-issues",title:"Filing Issues"},{local:"troubleshooting",sections:[{local:"the-deepspeed-process-gets-killed-at-startup-without-a-traceback",title:"the `deepspeed` process gets killed at startup without a traceback"},{local:"training-andor-evalpredict-loss-is-nan",title:"training and/or eval/predict loss is `NaN`"}],title:"Troubleshooting"},{local:"notes",title:"Notes"}],title:"Trainer Deepspeed Integration"},{local:"nontrainer-deepspeed-integration",title:"Non-Trainer Deepspeed Integration"},{local:"transformers.deepspeed.HfDeepSpeedConfig",sections:[{local:"custom-deepspeed-zero-inference",title:"Custom DeepSpeed ZeRO Inference"}],title:"HfDeepSpeedConfig"},{local:"main-deepspeed-resources",title:"Main DeepSpeed Resources"}],title:"DeepSpeed Integration"};function IY(jt){return OY(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class NY extends kY{constructor(g){super();PY(this,g,IY,xY,zY,{})}}export{NY as default,RY as metadata};
23
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/main_classes/data_collator.mdx-hf-doc-builder.js
import{S as ad,i as od,s as rd,e as o,k as i,w as h,t as l,M as nd,c as r,d as t,m as d,a as n,x as g,h as s,b as c,G as e,g as p,y as u,q as _,o as v,B as b,v as ld}from"../../chunks/vendor-hf-doc-builder.js";import{T as td}from"../../chunks/Tip-hf-doc-builder.js";import{D as T}from"../../chunks/Docstring-hf-doc-builder.js";import{I as $e}from"../../chunks/IconCopyLink-hf-doc-builder.js";function sd(wt){let f,W,k,P,I,$,H,O,S,V,E,F,D,R;return{c(){f=o("p"),W=l(`For best performance, this data collator should be used with a dataset having items that are dictionaries or BatchEncoding, with the `),k=o("code"),P=l('"special_tokens_mask"'),I=l(" key, as returned by a "),$=o("a"),H=l("PreTrainedTokenizer"),O=l(` or a `),S=o("a"),V=l("PreTrainedTokenizerFast"),E=l(" with the argument "),F=o("code"),D=l("return_special_tokens_mask=True"),R=l("."),this.h()},l(j){f=r(j,"P",{});var y=n(f);W=s(y,`For best performance, this data collator should be used with a dataset having items that are dictionaries or BatchEncoding, with the `),k=r(y,"CODE",{});var Ct=n(k);P=s(Ct,'"special_tokens_mask"'),Ct.forEach(t),I=s(y," key, as returned by a "),$=r(y,"A",{href:!0});var Tt=n($);H=s(Tt,"PreTrainedTokenizer"),Tt.forEach(t),O=s(y,` or a `),S=r(y,"A",{href:!0});var We=n(S);V=s(We,"PreTrainedTokenizerFast"),We.forEach(t),E=s(y," with the argument "),F=r(y,"CODE",{});var U=n(F);D=s(U,"return_special_tokens_mask=True"),U.forEach(t),R=s(y,"."),y.forEach(t),this.h()},h(){c($,"href","/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),c(S,"href","/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast")},m(j,y){p(j,f,y),e(f,W),e(f,k),e(k,P),e(f,I),e(f,$),e($,H),e(f,O),e(f,S),e(S,V),e(f,E),e(f,F),e(F,D),e(f,R)},d(j){j&&t(f)}}}function id(wt){let f,W,k,P,I,$,H,O,S,V,E;return{c(){f=o("p"),W=l("This collator relies on details of the implementation of subword tokenization by "),k=o("a"),P=l("BertTokenizer"),I=l(`, specifically that subword tokens are prefixed with `),$=o("em"),H=l("##"),O=l(`. For tokenizers that do not adhere to this scheme, this collator will produce an output that is roughly equivalent to `),S=o("code"),V=l(".DataCollatorForLanguageModeling"),E=l("."),this.h()},l(F){f=r(F,"P",{});var D=n(f);W=s(D,"This collator relies on details of the implementation of subword tokenization by "),k=r(D,"A",{href:!0});var R=n(k);P=s(R,"BertTokenizer"),R.forEach(t),I=s(D,`, specifically that subword tokens are prefixed with `),$=r(D,"EM",{});var j=n($);H=s(j,"##"),j.forEach(t),O=s(D,`. For tokenizers that do not adhere to this scheme, this collator will produce an output that is roughly equivalent to `),S=r(D,"CODE",{});var y=n(S);V=s(y,".DataCollatorForLanguageModeling"),y.forEach(t),E=s(D,"."),D.forEach(t),this.h()},h(){c(k,"href","/docs/transformers/pr_19429/en/model_doc/bert#transformers.BertTokenizer")},m(F,D){p(F,f,D),e(f,W),e(f,k),e(k,P),e(f,I),e(f,$),e($,H),e(f,O),e(f,S),e(S,V),e(f,E)},d(F){F&&t(f)}}}function dd(wt){let f,W,k,P,I,$,H,O,S,V,E,F,D,R,j,y,Ct,Tt,We,U,rr,Pt,nr,lr,po,X,sr,St,ir,dr,Ft,cr,mr,fo,ne,De,Rt,Ve,pr,Xt,fr,ho,N,je,hr,Gt,gr,ur,Be,Lt,Jt,_r,vr,br,zt,Qt,kr,yr,$r,Yt,Dr,go,le,xe,Zt,Ke,xr,ea,Er,uo,L,He,wr,ta,Cr,Tr,Ue,At,aa,Pr,Sr,Fr,qt,oa,Lr,zr,Ar,ra,qr,Mr,na,Ir,_o,se,Ee,la,Re,Or,sa,Nr,vo,ie,Xe,Wr,ia,Vr,bo,de,we,da,Ge,jr,ca,Br,ko,ce,Je,Kr,ma,Hr,yo,me,Ce,pa,Qe,Ur,fa,Rr,$o,pe,Ye,Xr,ha,Gr,Do,fe,Te,ga,Ze,Jr,ua,Qr,xo,w,et,Yr,_a,Zr,en,Pe,tn,Se,tt,an,va,on,rn,Fe,at,nn,ba,ln,sn,Le,ot,dn,ka,cn,Eo,he,ze,ya,rt,mn,$a,pn,wo,x,nt,fn,Da,hn,gn,lt,xa,un,_n,Ea,vn,bn,Ae,kn,qe,st,yn,wa,$n,Dn,Me,it,xn,Ca,En,wn,Ie,dt,Cn,Ta,Tn,Co,ge,Oe,Pa,ct,Pn,Sa,Sn,To,C,mt,Fn,Fa,Ln,zn,pt,La,An,qn,za,Mn,In,G,ft,On,Aa,Nn,Wn,z,ht,Vn,qa,jn,Bn,Kn,ue,Hn,Ma,Un,Rn,Ia,Xn,Gn,Jn,gt,Qn,Oa,Yn,Zn,el,J,tl,Na,al,ol,Wa,rl,nl,Va,ll,sl,_e,il,ja,dl,cl,Ba,ml,pl,fl,Q,ut,hl,Ka,gl,ul,A,_t,_l,Ha,vl,bl,kl,ve,yl,Ua,$l,Dl,Ra,xl,El,wl,vt,Cl,Xa,Tl,Pl,Sl,Y,Fl,Ga,Ll,zl,Ja,Al,ql,Qa,Ml,Il,be,Ol,Ya,Nl,Wl,Za,Vl,jl,Bl,Z,bt,Kl,eo,Hl,Ul,q,kt,Rl,to,Xl,Gl,Jl,ke,Ql,ao,Yl,Zl,oo,es,ts,as,yt,os,ro,rs,ns,ls,ee,ss,no,is,ds,lo,cs,ms,so,ps,fs,ye,hs,io,gs,us,co,_s,vs,Po;return $=new $e({}),Ve=new $e({}),je=new T({props:{name:"transformers.default_data_collator",anchor:"transformers.default_data_collator",parameters:[{name:"features",val:": typing.List[InputDataClass]"},{name:"return_tensors",val:" = 'pt'"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L49"}}),Ke=new $e({}),He=new T({props:{name:"class transformers.DefaultDataCollator",anchor:"transformers.DefaultDataCollator",parameters:[{name:"return_tensors",val:": str = 'pt'"}],parametersDescription:[{anchor:"transformers.DefaultDataCollator.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code>) &#x2014; The type of Tensor to return. Allowable values are &#x201C;np&#x201D;, &#x201C;pt&#x201D; and &#x201C;tf&#x201D;.`,name:"return_tensors"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L75"}}),Re=new $e({}),Xe=new T({props:{name:"class transformers.DataCollatorWithPadding",anchor:"transformers.DataCollatorWithPadding",parameters:[{name:"tokenizer",val:": PreTrainedTokenizerBase"},{name:"padding",val:": typing.Union[bool, str, transformers.utils.generic.PaddingStrategy] = True"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_tensors",val:": str = 'pt'"}],parametersDescription:[{anchor:"transformers.DataCollatorWithPadding.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> or <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>) &#x2014; The tokenizer used for encoding the data.`,name:"tokenizer"},{anchor:"transformers.DataCollatorWithPadding.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Select a strategy to pad the returned sequences (according to the model&#x2019;s padding side and padding index) among:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code> (default): Pad to the longest sequence in the batch (or no padding if only a single sequence is provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code>: No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.DataCollatorWithPadding.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Maximum length of the returned list and optionally padding length (see above).`,name:"max_length"},{anchor:"transformers.DataCollatorWithPadding.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value.</p> <p>This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.DataCollatorWithPadding.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code>) &#x2014; The type of Tensor to return. Allowable values are &#x201C;np&#x201D;, &#x201C;pt&#x201D; and &#x201C;tf&#x201D;.`,name:"return_tensors"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L213"}}),Ge=new $e({}),Je=new T({props:{name:"class transformers.DataCollatorForTokenClassification",anchor:"transformers.DataCollatorForTokenClassification",parameters:[{name:"tokenizer",val:": PreTrainedTokenizerBase"},{name:"padding",val:": typing.Union[bool, str, transformers.utils.generic.PaddingStrategy] = True"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"label_pad_token_id",val:": int = -100"},{name:"return_tensors",val:": str = 'pt'"}],parametersDescription:[{anchor:"transformers.DataCollatorForTokenClassification.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> or <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>) &#x2014; The tokenizer used for encoding the data.`,name:"tokenizer"},{anchor:"transformers.DataCollatorForTokenClassification.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Select a strategy to pad the returned sequences (according to the model&#x2019;s padding side and padding index) among:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence is provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.DataCollatorForTokenClassification.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Maximum length of the returned list and optionally padding length (see above).`,name:"max_length"},{anchor:"transformers.DataCollatorForTokenClassification.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value.</p> <p>This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.DataCollatorForTokenClassification.label_pad_token_id",description:`<strong>label_pad_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to -100) &#x2014; The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).`,name:"label_pad_token_id"},{anchor:"transformers.DataCollatorForTokenClassification.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code>) &#x2014; The type of Tensor to return. Allowable values are &#x201C;np&#x201D;, &#x201C;pt&#x201D; and &#x201C;tf&#x201D;.`,name:"return_tensors"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L264"}}),Qe=new $e({}),Ye=new T({props:{name:"class transformers.DataCollatorForSeq2Seq",anchor:"transformers.DataCollatorForSeq2Seq",parameters:[{name:"tokenizer",val:": PreTrainedTokenizerBase"},{name:"model",val:": typing.Optional[typing.Any] = None"},{name:"padding",val:": typing.Union[bool, str, transformers.utils.generic.PaddingStrategy] = True"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"label_pad_token_id",val:": int = -100"},{name:"return_tensors",val:": str = 'pt'"}],parametersDescription:[{anchor:"transformers.DataCollatorForSeq2Seq.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> or <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>) &#x2014; The tokenizer used for encoding the data.`,name:"tokenizer"},{anchor:"transformers.DataCollatorForSeq2Seq.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>) &#x2014; The model that is being trained. If set and has the <em>prepare_decoder_input_ids_from_labels</em>, use it to prepare the <em>decoder_input_ids</em></p> <p>This is useful when using <em>label_smoothing</em> to avoid calculating loss twice.`,name:"model"},{anchor:"transformers.DataCollatorForSeq2Seq.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Select a strategy to pad the returned sequences (according to the model&#x2019;s padding side and padding index) among:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence is provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.DataCollatorForSeq2Seq.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Maximum length of the returned list and optionally padding length (see above).`,name:"max_length"},{anchor:"transformers.DataCollatorForSeq2Seq.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value.</p> <p>This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.DataCollatorForSeq2Seq.label_pad_token_id",description:`<strong>label_pad_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to -100) &#x2014; The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).`,name:"label_pad_token_id"},{anchor:"transformers.DataCollatorForSeq2Seq.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code>) &#x2014; The type of Tensor to return. Allowable values are &#x201C;np&#x201D;, &#x201C;pt&#x201D; and &#x201C;tf&#x201D;.`,name:"return_tensors"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L514"}}),Ze=new $e({}),et=new T({props:{name:"class transformers.DataCollatorForLanguageModeling",anchor:"transformers.DataCollatorForLanguageModeling",parameters:[{name:"tokenizer",val:": PreTrainedTokenizerBase"},{name:"mlm",val:": bool = True"},{name:"mlm_probability",val:": float = 0.15"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"tf_experimental_compile",val:": bool = False"},{name:"return_tensors",val:": str = 'pt'"}],parametersDescription:[{anchor:"transformers.DataCollatorForLanguageModeling.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> or <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>) &#x2014; The tokenizer used for encoding the data.`,name:"tokenizer"},{anchor:"transformers.DataCollatorForLanguageModeling.mlm",description:`<strong>mlm</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to use masked language modeling. If set to <code>False</code>, the labels are the same as the inputs with the padding tokens ignored (by setting them to -100). Otherwise, the labels are -100 for non-masked tokens and the value to predict for the masked token.`,name:"mlm"},{anchor:"transformers.DataCollatorForLanguageModeling.mlm_probability",description:`<strong>mlm_probability</strong> (<code>float</code>, <em>optional</em>, defaults to 0.15) &#x2014; The probability with which to (randomly) mask tokens in the input, when <code>mlm</code> is set to <code>True</code>.`,name:"mlm_probability"},{anchor:"transformers.DataCollatorForLanguageModeling.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value.`,name:"pad_to_multiple_of"},{anchor:"transformers.DataCollatorForLanguageModeling.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code>) &#x2014; The type of Tensor to return. Allowable values are &#x201C;np&#x201D;, &#x201C;pt&#x201D; and &#x201C;tf&#x201D;.`,name:"return_tensors"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L607"}}),Pe=new td({props:{$$slots:{default:[sd]},$$scope:{ctx:wt}}}),tt=new T({props:{name:"numpy_mask_tokens",anchor:"transformers.DataCollatorForLanguageModeling.numpy_mask_tokens",parameters:[{name:"inputs",val:": typing.Any"},{name:"special_tokens_mask",val:": typing.Optional[typing.Any] = None"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L805"}}),at=new T({props:{name:"tf_mask_tokens",anchor:"transformers.DataCollatorForLanguageModeling.tf_mask_tokens",parameters:[{name:"inputs",val:": typing.Any"},{name:"vocab_size",val:""},{name:"mask_token_id",val:""},{name:"special_tokens_mask",val:": typing.Optional[typing.Any] = None"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L659"}}),ot=new T({props:{name:"torch_mask_tokens",anchor:"transformers.DataCollatorForLanguageModeling.torch_mask_tokens",parameters:[{name:"inputs",val:": typing.Any"},{name:"special_tokens_mask",val:": typing.Optional[typing.Any] = None"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L748"}}),rt=new $e({}),nt=new T({props:{name:"class transformers.DataCollatorForWholeWordMask",anchor:"transformers.DataCollatorForWholeWordMask",parameters:[{name:"tokenizer",val:": PreTrainedTokenizerBase"},{name:"mlm",val:": bool = True"},{name:"mlm_probability",val:": float = 0.15"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"tf_experimental_compile",val:": bool = False"},{name:"return_tensors",val:": str = 'pt'"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L846"}}),Ae=new td({props:{$$slots:{default:[id]},$$scope:{ctx:wt}}}),st=new T({props:{name:"numpy_mask_tokens",anchor:"transformers.DataCollatorForWholeWordMask.numpy_mask_tokens",parameters:[{name:"inputs",val:": typing.Any"},{name:"mask_labels",val:": typing.Any"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L1074"}}),it=new T({props:{name:"tf_mask_tokens",anchor:"transformers.DataCollatorForWholeWordMask.tf_mask_tokens",parameters:[{name:"inputs",val:": typing.Any"},{name:"mask_labels",val:": typing.Any"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L1032"}}),dt=new T({props:{name:"torch_mask_tokens",anchor:"transformers.DataCollatorForWholeWordMask.torch_mask_tokens",parameters:[{name:"inputs",val:": typing.Any"},{name:"mask_labels",val:": typing.Any"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L992"}}),ct=new $e({}),mt=new T({props:{name:"class transformers.DataCollatorForPermutationLanguageModeling",anchor:"transformers.DataCollatorForPermutationLanguageModeling",parameters:[{name:"tokenizer",val:": PreTrainedTokenizerBase"},{name:"plm_probability",val:": float = 0.16666666666666666"},{name:"max_span_length",val:": int = 5"},{name:"return_tensors",val:": str = 'pt'"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L1201"}}),ft=new T({props:{name:"numpy_mask_tokens",anchor:"transformers.DataCollatorForPermutationLanguageModeling.numpy_mask_tokens",parameters:[{name:"inputs",val:": typing.Any"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L1444"}}),ut=new T({props:{name:"tf_mask_tokens",anchor:"transformers.DataCollatorForPermutationLanguageModeling.tf_mask_tokens",parameters:[{name:"inputs",val:": typing.Any"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L1334"}}),bt=new T({props:{name:"torch_mask_tokens",anchor:"transformers.DataCollatorForPermutationLanguageModeling.torch_mask_tokens",parameters:[{name:"inputs",val:": typing.Any"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L1235"}}),{c(){f=o("meta"),W=i(),k=o("h1"),P=o("a"),I=o("span"),h($.$$.fragment),H=i(),O=o("span"),S=l("Data Collator"),V=i(),E=o("p"),F=l(`Data collators are objects that will form a batch by using a list of dataset elements as input. These elements are of the same type as the elements of `),D=o("code"),R=l("train_dataset"),j=l(" or "),y=o("code"),Ct=l("eval_dataset"),Tt=l("."),We=i(),U=o("p"),rr=l(`To be able to build batches, data collators may apply some processing (like padding). Some of them (like `),Pt=o("a"),nr=l("DataCollatorForLanguageModeling"),lr=l(`) also apply some random data augmentation (like random masking) on the formed batch.`),po=i(),X=o("p"),sr=l("Examples of use can be found in the "),St=o("a"),ir=l("example scripts"),dr=l(" or "),Ft=o("a"),cr=l("example notebooks"),mr=l("."),fo=i(),ne=o("h2"),De=o("a"),Rt=o("span"),h(Ve.$$.fragment),pr=i(),Xt=o("span"),fr=l("Default data collator"),ho=i(),N=o("div"),h(je.$$.fragment),hr=i(),Gt=o("p"),gr=l(`Very simple data collator that simply collates batches of dict-like objects and performs special handling for potential keys named:`),ur=i(),Be=o("ul"),Lt=o("li"),Jt=o("code"),_r=l("label"),vr=l(": handles a single value (int or float) per object"),br=i(),zt=o("li"),Qt=o("code"),kr=l("label_ids"),yr=l(": handles a list of values per object"),$r=i(),Yt=o("p"),Dr=l(`Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs to the model. See glue and ner for example of how it\u2019s useful.`),go=i(),le=o("h2"),xe=o("a"),Zt=o("span"),h(Ke.$$.fragment),xr=i(),ea=o("span"),Er=l("DefaultDataCollator"),uo=i(),L=o("div"),h(He.$$.fragment),wr=i(),ta=o("p"),Cr=l(`Very simple data collator that simply collates batches of dict-like objects and performs special handling for potential keys named:`),Tr=i(),Ue=o("ul"),At=o("li"),aa=o("code"),Pr=l("label"),Sr=l(": handles a single value (int or float) per object"),Fr=i(),qt=o("li"),oa=o("code"),Lr=l("label_ids"),zr=l(": handles a list of values per object"),Ar=i(),ra=o("p"),qr=l(`Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs to the model. See glue and ner for example of how it\u2019s useful.`),Mr=i(),na=o("p"),Ir=l(`This is an object (like other data collators) rather than a pure function like default_data_collator. This can be helpful if you need to set a return_tensors value at initialization.`),_o=i(),se=o("h2"),Ee=o("a"),la=o("span"),h(Re.$$.fragment),Or=i(),sa=o("span"),Nr=l("DataCollatorWithPadding"),vo=i(),ie=o("div"),h(Xe.$$.fragment),Wr=i(),ia=o("p"),Vr=l("Data collator that will dynamically pad the inputs received."),bo=i(),de=o("h2"),we=o("a"),da=o("span"),h(Ge.$$.fragment),jr=i(),ca=o("span"),Br=l("DataCollatorForTokenClassification"),ko=i(),ce=o("div"),h(Je.$$.fragment),Kr=i(),ma=o("p"),Hr=l("Data collator that will dynamically pad the inputs received, as well as the labels."),yo=i(),me=o("h2"),Ce=o("a"),pa=o("span"),h(Qe.$$.fragment),Ur=i(),fa=o("span"),Rr=l("DataCollatorForSeq2Seq"),$o=i(),pe=o("div"),h(Ye.$$.fragment),Xr=i(),ha=o("p"),Gr=l("Data collator that will dynamically pad the inputs received, as well as the labels."),Do=i(),fe=o("h2"),Te=o("a"),ga=o("span"),h(Ze.$$.fragment),Jr=i(),ua=o("span"),Qr=l("DataCollatorForLanguageModeling"),xo=i(),w=o("div"),h(et.$$.fragment),Yr=i(),_a=o("p"),Zr=l(`Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they are not all of the same length.`),en=i(),h(Pe.$$.fragment),tn=i(),Se=o("div"),h(tt.$$.fragment),an=i(),va=o("p"),on=l("Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original."),rn=i(),Fe=o("div"),h(at.$$.fragment),nn=i(),ba=o("p"),ln=l("Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original."),sn=i(),Le=o("div"),h(ot.$$.fragment),dn=i(),ka=o("p"),cn=l("Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original."),Eo=i(),he=o("h2"),ze=o("a"),ya=o("span"),h(rt.$$.fragment),mn=i(),$a=o("span"),pn=l("DataCollatorForWholeWordMask"),wo=i(),x=o("div"),h(nt.$$.fragment),fn=i(),Da=o("p"),hn=l("Data collator used for language modeling that masks entire words."),gn=i(),lt=o("ul"),xa=o("li"),un=l("collates batches of tensors, honoring their tokenizer\u2019s pad_token"),_n=i(),Ea=o("li"),vn=l("preprocesses batches for masked language modeling"),bn=i(),h(Ae.$$.fragment),kn=i(),qe=o("div"),h(st.$$.fragment),yn=i(),wa=o("p"),$n=l(`Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set \u2018mask_labels\u2019 means we use whole word mask (wwm), we directly mask idxs according to it\u2019s ref.`),Dn=i(),Me=o("div"),h(it.$$.fragment),xn=i(),Ca=o("p"),En=l(`Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set \u2018mask_labels\u2019 means we use whole word mask (wwm), we directly mask idxs according to it\u2019s ref.`),wn=i(),Ie=o("div"),h(dt.$$.fragment),Cn=i(),Ta=o("p"),Tn=l(`Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set \u2018mask_labels\u2019 means we use whole word mask (wwm), we directly mask idxs according to it\u2019s ref.`),Co=i(),ge=o("h2"),Oe=o("a"),Pa=o("span"),h(ct.$$.fragment),Pn=i(),Sa=o("span"),Sn=l("DataCollatorForPermutationLanguageModeling"),To=i(),C=o("div"),h(mt.$$.fragment),Fn=i(),Fa=o("p"),Ln=l("Data collator used for permutation language modeling."),zn=i(),pt=o("ul"),La=o("li"),An=l("collates batches of tensors, honoring their tokenizer\u2019s pad_token"),qn=i(),za=o("li"),Mn=l("preprocesses batches for permutation language modeling with procedures specific to XLNet"),In=i(),G=o("div"),h(ft.$$.fragment),On=i(),Aa=o("p"),Nn=l("The masked tokens to be predicted for a particular sequence are determined by the following algorithm:"),Wn=i(),z=o("ol"),ht=o("li"),Vn=l("Start from the beginning of the sequence by setting "),qa=o("code"),jn=l("cur_len = 0"),Bn=l(" (number of tokens processed so far)."),Kn=i(),ue=o("li"),Hn=l("Sample a "),Ma=o("code"),Un=l("span_length"),Rn=l(" from the interval "),Ia=o("code"),Xn=l("[1, max_span_length]"),Gn=l(" (length of span of tokens to be masked)"),Jn=i(),gt=o("li"),Qn=l("Reserve a context of length "),Oa=o("code"),Yn=l("context_length = span_length / plm_probability"),Zn=l(` to surround span to be masked`),el=i(),J=o("li"),tl=l("Sample a starting point "),Na=o("code"),al=l("start_index"),ol=l(" from the interval "),Wa=o("code"),rl=l("[cur_len, cur_len + context_length - span_length]"),nl=l(" and mask tokens "),Va=o("code"),ll=l("start_index:start_index + span_length"),sl=i(),_e=o("li"),il=l("Set "),ja=o("code"),dl=l("cur_len = cur_len + context_length"),cl=l(". If "),Ba=o("code"),ml=l("cur_len < max_len"),pl=l(` (i.e. there are tokens remaining in the sequence to be processed), repeat from Step 1.`),fl=i(),Q=o("div"),h(ut.$$.fragment),hl=i(),Ka=o("p"),gl=l("The masked tokens to be predicted for a particular sequence are determined by the following algorithm:"),ul=i(),A=o("ol"),_t=o("li"),_l=l("Start from the beginning of the sequence by setting "),Ha=o("code"),vl=l("cur_len = 0"),bl=l(" (number of tokens processed so far)."),kl=i(),ve=o("li"),yl=l("Sample a "),Ua=o("code"),$l=l("span_length"),Dl=l(" from the interval "),Ra=o("code"),xl=l("[1, max_span_length]"),El=l(" (length of span of tokens to be masked)"),wl=i(),vt=o("li"),Cl=l("Reserve a context of length "),Xa=o("code"),Tl=l("context_length = span_length / plm_probability"),Pl=l(` to surround span to be masked`),Sl=i(),Y=o("li"),Fl=l("Sample a starting point "),Ga=o("code"),Ll=l("start_index"),zl=l(" from the interval "),Ja=o("code"),Al=l("[cur_len, cur_len + context_length - span_length]"),ql=l(" and mask tokens "),Qa=o("code"),Ml=l("start_index:start_index + span_length"),Il=i(),be=o("li"),Ol=l("Set "),Ya=o("code"),Nl=l("cur_len = cur_len + context_length"),Wl=l(". If "),Za=o("code"),Vl=l("cur_len < max_len"),jl=l(` (i.e. there are tokens remaining in the sequence to be processed), repeat from Step 1.`),Bl=i(),Z=o("div"),h(bt.$$.fragment),Kl=i(),eo=o("p"),Hl=l("The masked tokens to be predicted for a particular sequence are determined by the following algorithm:"),Ul=i(),q=o("ol"),kt=o("li"),Rl=l("Start from the beginning of the sequence by setting "),to=o("code"),Xl=l("cur_len = 0"),Gl=l(" (number of tokens processed so far)."),Jl=i(),ke=o("li"),Ql=l("Sample a "),ao=o("code"),Yl=l("span_length"),Zl=l(" from the interval "),oo=o("code"),es=l("[1, max_span_length]"),ts=l(" (length of span of tokens to be masked)"),as=i(),yt=o("li"),os=l("Reserve a context of length "),ro=o("code"),rs=l("context_length = span_length / plm_probability"),ns=l(` to surround span to be masked`),ls=i(),ee=o("li"),ss=l("Sample a starting point "),no=o("code"),is=l("start_index"),ds=l(" from the interval "),lo=o("code"),cs=l("[cur_len, cur_len + context_length - span_length]"),ms=l(" and mask tokens "),so=o("code"),ps=l("start_index:start_index + span_length"),fs=i(),ye=o("li"),hs=l("Set "),io=o("code"),gs=l("cur_len = cur_len + context_length"),us=l(". If "),co=o("code"),_s=l("cur_len < max_len"),vs=l(` (i.e. there are tokens remaining in the sequence to be processed), repeat from Step 1.`),this.h()},l(a){const m=nd('[data-svelte="svelte-1phssyn"]',document.head);f=r(m,"META",{name:!0,content:!0}),m.forEach(t),W=d(a),k=r(a,"H1",{class:!0});var $t=n(k);P=r($t,"A",{id:!0,class:!0,href:!0});var mo=n(P);I=r(mo,"SPAN",{});var Ds=n(I);g($.$$.fragment,Ds),Ds.forEach(t),mo.forEach(t),H=d($t),O=r($t,"SPAN",{});var xs=n(O);S=s(xs,"Data Collator"),xs.forEach(t),$t.forEach(t),V=d(a),E=r(a,"P",{});var Mt=n(E);F=s(Mt,`Data collators are objects that will form a batch by using a list of dataset elements as input. These elements are of the same type as the elements of `),D=r(Mt,"CODE",{});var Es=n(D);R=s(Es,"train_dataset"),Es.forEach(t),j=s(Mt," or "),y=r(Mt,"CODE",{});var ws=n(y);Ct=s(ws,"eval_dataset"),ws.forEach(t),Tt=s(Mt,"."),Mt.forEach(t),We=d(a),U=r(a,"P",{});var So=n(U);rr=s(So,`To be able to build batches, data collators may apply some processing (like padding). Some of them (like `),Pt=r(So,"A",{href:!0});var Cs=n(Pt);nr=s(Cs,"DataCollatorForLanguageModeling"),Cs.forEach(t),lr=s(So,`) also apply some random data augmentation (like random masking) on the formed batch.`),So.forEach(t),po=d(a),X=r(a,"P",{});var It=n(X);sr=s(It,"Examples of use can be found in the "),St=r(It,"A",{href:!0});var Ts=n(St);ir=s(Ts,"example scripts"),Ts.forEach(t),dr=s(It," or "),Ft=r(It,"A",{href:!0});var Ps=n(Ft);cr=s(Ps,"example notebooks"),Ps.forEach(t),mr=s(It,"."),It.forEach(t),fo=d(a),ne=r(a,"H2",{class:!0});var Fo=n(ne);De=r(Fo,"A",{id:!0,class:!0,href:!0});var Ss=n(De);Rt=r(Ss,"SPAN",{});var Fs=n(Rt);g(Ve.$$.fragment,Fs),Fs.forEach(t),Ss.forEach(t),pr=d(Fo),Xt=r(Fo,"SPAN",{});var Ls=n(Xt);fr=s(Ls,"Default data collator"),Ls.forEach(t),Fo.forEach(t),ho=d(a),N=r(a,"DIV",{class:!0});var Ne=n(N);g(je.$$.fragment,Ne),hr=d(Ne),Gt=r(Ne,"P",{});var zs=n(Gt);gr=s(zs,`Very simple data collator that simply collates batches of dict-like objects and performs special handling for potential keys named:`),zs.forEach(t),ur=d(Ne),Be=r(Ne,"UL",{});var Lo=n(Be);Lt=r(Lo,"LI",{});var bs=n(Lt);Jt=r(bs,"CODE",{});var As=n(Jt);_r=s(As,"label"),As.forEach(t),vr=s(bs,": handles a single value (int or float) per object"),bs.forEach(t),br=d(Lo),zt=r(Lo,"LI",{});var ks=n(zt);Qt=r(ks,"CODE",{});var qs=n(Qt);kr=s(qs,"label_ids"),qs.forEach(t),yr=s(ks,": handles a list of values per object"),ks.forEach(t),Lo.forEach(t),$r=d(Ne),Yt=r(Ne,"P",{});var Ms=n(Yt);Dr=s(Ms,`Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs to the model. See glue and ner for example of how it\u2019s useful.`),Ms.forEach(t),Ne.forEach(t),go=d(a),le=r(a,"H2",{class:!0});var zo=n(le);xe=r(zo,"A",{id:!0,class:!0,href:!0});var Is=n(xe);Zt=r(Is,"SPAN",{});var Os=n(Zt);g(Ke.$$.fragment,Os),Os.forEach(t),Is.forEach(t),xr=d(zo),ea=r(zo,"SPAN",{});var Ns=n(ea);Er=s(Ns,"DefaultDataCollator"),Ns.forEach(t),zo.forEach(t),uo=d(a),L=r(a,"DIV",{class:!0});var te=n(L);g(He.$$.fragment,te),wr=d(te),ta=r(te,"P",{});var Ws=n(ta);Cr=s(Ws,`Very simple data collator that simply collates batches of dict-like objects and performs special handling for potential keys named:`),Ws.forEach(t),Tr=d(te),Ue=r(te,"UL",{});var Ao=n(Ue);At=r(Ao,"LI",{});var ys=n(At);aa=r(ys,"CODE",{});var Vs=n(aa);Pr=s(Vs,"label"),Vs.forEach(t),Sr=s(ys,": handles a single value (int or float) per object"),ys.forEach(t),Fr=d(Ao),qt=r(Ao,"LI",{});var $s=n(qt);oa=r($s,"CODE",{});var js=n(oa);Lr=s(js,"label_ids"),js.forEach(t),zr=s($s,": handles a list of values per object"),$s.forEach(t),Ao.forEach(t),Ar=d(te),ra=r(te,"P",{});var Bs=n(ra);qr=s(Bs,`Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs to the model. See glue and ner for example of how it\u2019s useful.`),Bs.forEach(t),Mr=d(te),na=r(te,"P",{});var Ks=n(na);Ir=s(Ks,`This is an object (like other data collators) rather than a pure function like default_data_collator. This can be helpful if you need to set a return_tensors value at initialization.`),Ks.forEach(t),te.forEach(t),_o=d(a),se=r(a,"H2",{class:!0});var qo=n(se);Ee=r(qo,"A",{id:!0,class:!0,href:!0});var Hs=n(Ee);la=r(Hs,"SPAN",{});var Us=n(la);g(Re.$$.fragment,Us),Us.forEach(t),Hs.forEach(t),Or=d(qo),sa=r(qo,"SPAN",{});var Rs=n(sa);Nr=s(Rs,"DataCollatorWithPadding"),Rs.forEach(t),qo.forEach(t),vo=d(a),ie=r(a,"DIV",{class:!0});var Mo=n(ie);g(Xe.$$.fragment,Mo),Wr=d(Mo),ia=r(Mo,"P",{});var Xs=n(ia);Vr=s(Xs,"Data collator that will dynamically pad the inputs received."),Xs.forEach(t),Mo.forEach(t),bo=d(a),de=r(a,"H2",{class:!0});var Io=n(de);we=r(Io,"A",{id:!0,class:!0,href:!0});var Gs=n(we);da=r(Gs,"SPAN",{});var Js=n(da);g(Ge.$$.fragment,Js),Js.forEach(t),Gs.forEach(t),jr=d(Io),ca=r(Io,"SPAN",{});var Qs=n(ca);Br=s(Qs,"DataCollatorForTokenClassification"),Qs.forEach(t),Io.forEach(t),ko=d(a),ce=r(a,"DIV",{class:!0});var Oo=n(ce);g(Je.$$.fragment,Oo),Kr=d(Oo),ma=r(Oo,"P",{});var Ys=n(ma);Hr=s(Ys,"Data collator that will dynamically pad the inputs received, as well as the labels."),Ys.forEach(t),Oo.forEach(t),yo=d(a),me=r(a,"H2",{class:!0});var No=n(me);Ce=r(No,"A",{id:!0,class:!0,href:!0});var Zs=n(Ce);pa=r(Zs,"SPAN",{});var ei=n(pa);g(Qe.$$.fragment,ei),ei.forEach(t),Zs.forEach(t),Ur=d(No),fa=r(No,"SPAN",{});var ti=n(fa);Rr=s(ti,"DataCollatorForSeq2Seq"),ti.forEach(t),No.forEach(t),$o=d(a),pe=r(a,"DIV",{class:!0});var Wo=n(pe);g(Ye.$$.fragment,Wo),Xr=d(Wo),ha=r(Wo,"P",{});var ai=n(ha);Gr=s(ai,"Data collator that will dynamically pad the inputs received, as well as the labels."),ai.forEach(t),Wo.forEach(t),Do=d(a),fe=r(a,"H2",{class:!0});var Vo=n(fe);Te=r(Vo,"A",{id:!0,class:!0,href:!0});var oi=n(Te);ga=r(oi,"SPAN",{});var ri=n(ga);g(Ze.$$.fragment,ri),ri.forEach(t),oi.forEach(t),Jr=d(Vo),ua=r(Vo,"SPAN",{});var ni=n(ua);Qr=s(ni,"DataCollatorForLanguageModeling"),ni.forEach(t),Vo.forEach(t),xo=d(a),w=r(a,"DIV",{class:!0});var B=n(w);g(et.$$.fragment,B),Yr=d(B),_a=r(B,"P",{});var li=n(_a);Zr=s(li,`Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they are not all of the same length.`),li.forEach(t),en=d(B),g(Pe.$$.fragment,B),tn=d(B),Se=r(B,"DIV",{class:!0});var jo=n(Se);g(tt.$$.fragment,jo),an=d(jo),va=r(jo,"P",{});var si=n(va);on=s(si,"Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original."),si.forEach(t),jo.forEach(t),rn=d(B),Fe=r(B,"DIV",{class:!0});var Bo=n(Fe);g(at.$$.fragment,Bo),nn=d(Bo),ba=r(Bo,"P",{});var ii=n(ba);ln=s(ii,"Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original."),ii.forEach(t),Bo.forEach(t),sn=d(B),Le=r(B,"DIV",{class:!0});var Ko=n(Le);g(ot.$$.fragment,Ko),dn=d(Ko),ka=r(Ko,"P",{});var di=n(ka);cn=s(di,"Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original."),di.forEach(t),Ko.forEach(t),B.forEach(t),Eo=d(a),he=r(a,"H2",{class:!0});var Ho=n(he);ze=r(Ho,"A",{id:!0,class:!0,href:!0});var ci=n(ze);ya=r(ci,"SPAN",{});var mi=n(ya);g(rt.$$.fragment,mi),mi.forEach(t),ci.forEach(t),mn=d(Ho),$a=r(Ho,"SPAN",{});var pi=n($a);pn=s(pi,"DataCollatorForWholeWordMask"),pi.forEach(t),Ho.forEach(t),wo=d(a),x=r(a,"DIV",{class:!0});var M=n(x);g(nt.$$.fragment,M),fn=d(M),Da=r(M,"P",{});var fi=n(Da);hn=s(fi,"Data collator used for language modeling that masks entire words."),fi.forEach(t),gn=d(M),lt=r(M,"UL",{});var Uo=n(lt);xa=r(Uo,"LI",{});var hi=n(xa);un=s(hi,"collates batches of tensors, honoring their tokenizer\u2019s pad_token"),hi.forEach(t),_n=d(Uo),Ea=r(Uo,"LI",{});var gi=n(Ea);vn=s(gi,"preprocesses batches for masked language modeling"),gi.forEach(t),Uo.forEach(t),bn=d(M),g(Ae.$$.fragment,M),kn=d(M),qe=r(M,"DIV",{class:!0});var Ro=n(qe);g(st.$$.fragment,Ro),yn=d(Ro),wa=r(Ro,"P",{});var ui=n(wa);$n=s(ui,`Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set \u2018mask_labels\u2019 means we use whole word mask (wwm), we directly mask idxs according to it\u2019s ref.`),ui.forEach(t),Ro.forEach(t),Dn=d(M),Me=r(M,"DIV",{class:!0});var Xo=n(Me);g(it.$$.fragment,Xo),xn=d(Xo),Ca=r(Xo,"P",{});var _i=n(Ca);En=s(_i,`Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set \u2018mask_labels\u2019 means we use whole word mask (wwm), we directly mask idxs according to it\u2019s ref.`),_i.forEach(t),Xo.forEach(t),wn=d(M),Ie=r(M,"DIV",{class:!0});var Go=n(Ie);g(dt.$$.fragment,Go),Cn=d(Go),Ta=r(Go,"P",{});var vi=n(Ta);Tn=s(vi,`Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set \u2018mask_labels\u2019 means we use whole word mask (wwm), we directly mask idxs according to it\u2019s ref.`),vi.forEach(t),Go.forEach(t),M.forEach(t),Co=d(a),ge=r(a,"H2",{class:!0});var Jo=n(ge);Oe=r(Jo,"A",{id:!0,class:!0,href:!0});var bi=n(Oe);Pa=r(bi,"SPAN",{});var ki=n(Pa);g(ct.$$.fragment,ki),ki.forEach(t),bi.forEach(t),Pn=d(Jo),Sa=r(Jo,"SPAN",{});var yi=n(Sa);Sn=s(yi,"DataCollatorForPermutationLanguageModeling"),yi.forEach(t),Jo.forEach(t),To=d(a),C=r(a,"DIV",{class:!0});var K=n(C);g(mt.$$.fragment,K),Fn=d(K),Fa=r(K,"P",{});var $i=n(Fa);Ln=s($i,"Data collator used for permutation language modeling."),$i.forEach(t),zn=d(K),pt=r(K,"UL",{});var Qo=n(pt);La=r(Qo,"LI",{});var Di=n(La);An=s(Di,"collates batches of tensors, honoring their tokenizer\u2019s pad_token"),Di.forEach(t),qn=d(Qo),za=r(Qo,"LI",{});var xi=n(za);Mn=s(xi,"preprocesses batches for permutation language modeling with procedures specific to XLNet"),xi.forEach(t),Qo.forEach(t),In=d(K),G=r(K,"DIV",{class:!0});var Ot=n(G);g(ft.$$.fragment,Ot),On=d(Ot),Aa=r(Ot,"P",{});var Ei=n(Aa);Nn=s(Ei,"The masked tokens to be predicted for a particular sequence are determined by the following algorithm:"),Ei.forEach(t),Wn=d(Ot),z=r(Ot,"OL",{start:!0});var ae=n(z);ht=r(ae,"LI",{});var Yo=n(ht);Vn=s(Yo,"Start from the beginning of the sequence by setting "),qa=r(Yo,"CODE",{});var wi=n(qa);jn=s(wi,"cur_len = 0"),wi.forEach(t),Bn=s(Yo," (number of tokens processed so far)."),Yo.forEach(t),Kn=d(ae),ue=r(ae,"LI",{});var Nt=n(ue);Hn=s(Nt,"Sample a "),Ma=r(Nt,"CODE",{});var Ci=n(Ma);Un=s(Ci,"span_length"),Ci.forEach(t),Rn=s(Nt," from the interval "),Ia=r(Nt,"CODE",{});var Ti=n(Ia);Xn=s(Ti,"[1, max_span_length]"),Ti.forEach(t),Gn=s(Nt," (length of span of tokens to be masked)"),Nt.forEach(t),Jn=d(ae),gt=r(ae,"LI",{});var Zo=n(gt);Qn=s(Zo,"Reserve a context of length "),Oa=r(Zo,"CODE",{});var Pi=n(Oa);Yn=s(Pi,"context_length = span_length / plm_probability"),Pi.forEach(t),Zn=s(Zo,` to surround span to be masked`),Zo.forEach(t),el=d(ae),J=r(ae,"LI",{});var Dt=n(J);tl=s(Dt,"Sample a starting point "),Na=r(Dt,"CODE",{});var Si=n(Na);al=s(Si,"start_index"),Si.forEach(t),ol=s(Dt," from the interval "),Wa=r(Dt,"CODE",{});var Fi=n(Wa);rl=s(Fi,"[cur_len, cur_len + context_length - span_length]"),Fi.forEach(t),nl=s(Dt," and mask tokens "),Va=r(Dt,"CODE",{});var Li=n(Va);ll=s(Li,"start_index:start_index + span_length"),Li.forEach(t),Dt.forEach(t),sl=d(ae),_e=r(ae,"LI",{});var Wt=n(_e);il=s(Wt,"Set "),ja=r(Wt,"CODE",{});var zi=n(ja);dl=s(zi,"cur_len = cur_len + context_length"),zi.forEach(t),cl=s(Wt,". If "),Ba=r(Wt,"CODE",{});var Ai=n(Ba);ml=s(Ai,"cur_len < max_len"),Ai.forEach(t),pl=s(Wt,` (i.e. there are tokens remaining in the sequence to be processed), repeat from Step 1.`),Wt.forEach(t),ae.forEach(t),Ot.forEach(t),fl=d(K),Q=r(K,"DIV",{class:!0});var Vt=n(Q);g(ut.$$.fragment,Vt),hl=d(Vt),Ka=r(Vt,"P",{});var qi=n(Ka);gl=s(qi,"The masked tokens to be predicted for a particular sequence are determined by the following algorithm:"),qi.forEach(t),ul=d(Vt),A=r(Vt,"OL",{start:!0});var oe=n(A);_t=r(oe,"LI",{});var er=n(_t);_l=s(er,"Start from the beginning of the sequence by setting "),Ha=r(er,"CODE",{});var Mi=n(Ha);vl=s(Mi,"cur_len = 0"),Mi.forEach(t),bl=s(er," (number of tokens processed so far)."),er.forEach(t),kl=d(oe),ve=r(oe,"LI",{});var jt=n(ve);yl=s(jt,"Sample a "),Ua=r(jt,"CODE",{});var Ii=n(Ua);$l=s(Ii,"span_length"),Ii.forEach(t),Dl=s(jt," from the interval "),Ra=r(jt,"CODE",{});var Oi=n(Ra);xl=s(Oi,"[1, max_span_length]"),Oi.forEach(t),El=s(jt," (length of span of tokens to be masked)"),jt.forEach(t),wl=d(oe),vt=r(oe,"LI",{});var tr=n(vt);Cl=s(tr,"Reserve a context of length "),Xa=r(tr,"CODE",{});var Ni=n(Xa);Tl=s(Ni,"context_length = span_length / plm_probability"),Ni.forEach(t),Pl=s(tr,` to surround span to be masked`),tr.forEach(t),Sl=d(oe),Y=r(oe,"LI",{});var xt=n(Y);Fl=s(xt,"Sample a starting point "),Ga=r(xt,"CODE",{});var Wi=n(Ga);Ll=s(Wi,"start_index"),Wi.forEach(t),zl=s(xt," from the interval "),Ja=r(xt,"CODE",{});var Vi=n(Ja);Al=s(Vi,"[cur_len, cur_len + context_length - span_length]"),Vi.forEach(t),ql=s(xt," and mask tokens "),Qa=r(xt,"CODE",{});var ji=n(Qa);Ml=s(ji,"start_index:start_index + span_length"),ji.forEach(t),xt.forEach(t),Il=d(oe),be=r(oe,"LI",{});var Bt=n(be);Ol=s(Bt,"Set "),Ya=r(Bt,"CODE",{});var Bi=n(Ya);Nl=s(Bi,"cur_len = cur_len + context_length"),Bi.forEach(t),Wl=s(Bt,". If "),Za=r(Bt,"CODE",{});var Ki=n(Za);Vl=s(Ki,"cur_len < max_len"),Ki.forEach(t),jl=s(Bt,` (i.e. there are tokens remaining in the sequence to be processed), repeat from Step 1.`),Bt.forEach(t),oe.forEach(t),Vt.forEach(t),Bl=d(K),Z=r(K,"DIV",{class:!0});var Kt=n(Z);g(bt.$$.fragment,Kt),Kl=d(Kt),eo=r(Kt,"P",{});var Hi=n(eo);Hl=s(Hi,"The masked tokens to be predicted for a particular sequence are determined by the following algorithm:"),Hi.forEach(t),Ul=d(Kt),q=r(Kt,"OL",{start:!0});var re=n(q);kt=r(re,"LI",{});var ar=n(kt);Rl=s(ar,"Start from the beginning of the sequence by setting "),to=r(ar,"CODE",{});var Ui=n(to);Xl=s(Ui,"cur_len = 0"),Ui.forEach(t),Gl=s(ar," (number of tokens processed so far)."),ar.forEach(t),Jl=d(re),ke=r(re,"LI",{});var Ht=n(ke);Ql=s(Ht,"Sample a "),ao=r(Ht,"CODE",{});var Ri=n(ao);Yl=s(Ri,"span_length"),Ri.forEach(t),Zl=s(Ht," from the interval "),oo=r(Ht,"CODE",{});var Xi=n(oo);es=s(Xi,"[1, max_span_length]"),Xi.forEach(t),ts=s(Ht," (length of span of tokens to be masked)"),Ht.forEach(t),as=d(re),yt=r(re,"LI",{});var or=n(yt);os=s(or,"Reserve a context of length "),ro=r(or,"CODE",{});var Gi=n(ro);rs=s(Gi,"context_length = span_length / plm_probability"),Gi.forEach(t),ns=s(or,` to surround span to be masked`),or.forEach(t),ls=d(re),ee=r(re,"LI",{});var Et=n(ee);ss=s(Et,"Sample a starting point "),no=r(Et,"CODE",{});var Ji=n(no);is=s(Ji,"start_index"),Ji.forEach(t),ds=s(Et," from the interval "),lo=r(Et,"CODE",{});var Qi=n(lo);cs=s(Qi,"[cur_len, cur_len + context_length - span_length]"),Qi.forEach(t),ms=s(Et," and mask tokens "),so=r(Et,"CODE",{});var Yi=n(so);ps=s(Yi,"start_index:start_index + span_length"),Yi.forEach(t),Et.forEach(t),fs=d(re),ye=r(re,"LI",{});var Ut=n(ye);hs=s(Ut,"Set "),io=r(Ut,"CODE",{});var Zi=n(io);gs=s(Zi,"cur_len = cur_len + context_length"),Zi.forEach(t),us=s(Ut,". If "),co=r(Ut,"CODE",{});var ed=n(co);_s=s(ed,"cur_len < max_len"),ed.forEach(t),vs=s(Ut,` (i.e. there are tokens remaining in the sequence to be processed), repeat from Step 1.`),Ut.forEach(t),re.forEach(t),Kt.forEach(t),K.forEach(t),this.h()},h(){c(f,"name","hf:doc:metadata"),c(f,"content",JSON.stringify(cd)),c(P,"id","data-collator"),c(P,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(P,"href","#data-collator"),c(k,"class","relative group"),c(Pt,"href","/docs/transformers/pr_19429/en/main_classes/data_collator#transformers.DataCollatorForLanguageModeling"),c(St,"href","../examples"),c(Ft,"href","../notebooks"),c(De,"id","transformers.default_data_collator"),c(De,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(De,"href","#transformers.default_data_collator"),c(ne,"class","relative group"),c(N,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(xe,"id","transformers.DefaultDataCollator"),c(xe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(xe,"href","#transformers.DefaultDataCollator"),c(le,"class","relative group"),c(L,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Ee,"id","transformers.DataCollatorWithPadding"),c(Ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ee,"href","#transformers.DataCollatorWithPadding"),c(se,"class","relative group"),c(ie,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(we,"id","transformers.DataCollatorForTokenClassification"),c(we,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(we,"href","#transformers.DataCollatorForTokenClassification"),c(de,"class","relative group"),c(ce,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Ce,"id","transformers.DataCollatorForSeq2Seq"),c(Ce,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ce,"href","#transformers.DataCollatorForSeq2Seq"),c(me,"class","relative group"),c(pe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Te,"id","transformers.DataCollatorForLanguageModeling"),c(Te,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Te,"href","#transformers.DataCollatorForLanguageModeling"),c(fe,"class","relative group"),c(Se,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Fe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Le,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(w,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(ze,"id","transformers.DataCollatorForWholeWordMask"),c(ze,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ze,"href","#transformers.DataCollatorForWholeWordMask"),c(he,"class","relative group"),c(qe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Me,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Ie,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(x,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(Oe,"id","transformers.DataCollatorForPermutationLanguageModeling"),c(Oe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Oe,"href","#transformers.DataCollatorForPermutationLanguageModeling"),c(ge,"class","relative group"),c(z,"start","0"),c(G,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(A,"start","0"),c(Q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(q,"start","0"),c(Z,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),c(C,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(a,m){e(document.head,f),p(a,W,m),p(a,k,m),e(k,P),e(P,I),u($,I,null),e(k,H),e(k,O),e(O,S),p(a,V,m),p(a,E,m),e(E,F),e(E,D),e(D,R),e(E,j),e(E,y),e(y,Ct),e(E,Tt),p(a,We,m),p(a,U,m),e(U,rr),e(U,Pt),e(Pt,nr),e(U,lr),p(a,po,m),p(a,X,m),e(X,sr),e(X,St),e(St,ir),e(X,dr),e(X,Ft),e(Ft,cr),e(X,mr),p(a,fo,m),p(a,ne,m),e(ne,De),e(De,Rt),u(Ve,Rt,null),e(ne,pr),e(ne,Xt),e(Xt,fr),p(a,ho,m),p(a,N,m),u(je,N,null),e(N,hr),e(N,Gt),e(Gt,gr),e(N,ur),e(N,Be),e(Be,Lt),e(Lt,Jt),e(Jt,_r),e(Lt,vr),e(Be,br),e(Be,zt),e(zt,Qt),e(Qt,kr),e(zt,yr),e(N,$r),e(N,Yt),e(Yt,Dr),p(a,go,m),p(a,le,m),e(le,xe),e(xe,Zt),u(Ke,Zt,null),e(le,xr),e(le,ea),e(ea,Er),p(a,uo,m),p(a,L,m),u(He,L,null),e(L,wr),e(L,ta),e(ta,Cr),e(L,Tr),e(L,Ue),e(Ue,At),e(At,aa),e(aa,Pr),e(At,Sr),e(Ue,Fr),e(Ue,qt),e(qt,oa),e(oa,Lr),e(qt,zr),e(L,Ar),e(L,ra),e(ra,qr),e(L,Mr),e(L,na),e(na,Ir),p(a,_o,m),p(a,se,m),e(se,Ee),e(Ee,la),u(Re,la,null),e(se,Or),e(se,sa),e(sa,Nr),p(a,vo,m),p(a,ie,m),u(Xe,ie,null),e(ie,Wr),e(ie,ia),e(ia,Vr),p(a,bo,m),p(a,de,m),e(de,we),e(we,da),u(Ge,da,null),e(de,jr),e(de,ca),e(ca,Br),p(a,ko,m),p(a,ce,m),u(Je,ce,null),e(ce,Kr),e(ce,ma),e(ma,Hr),p(a,yo,m),p(a,me,m),e(me,Ce),e(Ce,pa),u(Qe,pa,null),e(me,Ur),e(me,fa),e(fa,Rr),p(a,$o,m),p(a,pe,m),u(Ye,pe,null),e(pe,Xr),e(pe,ha),e(ha,Gr),p(a,Do,m),p(a,fe,m),e(fe,Te),e(Te,ga),u(Ze,ga,null),e(fe,Jr),e(fe,ua),e(ua,Qr),p(a,xo,m),p(a,w,m),u(et,w,null),e(w,Yr),e(w,_a),e(_a,Zr),e(w,en),u(Pe,w,null),e(w,tn),e(w,Se),u(tt,Se,null),e(Se,an),e(Se,va),e(va,on),e(w,rn),e(w,Fe),u(at,Fe,null),e(Fe,nn),e(Fe,ba),e(ba,ln),e(w,sn),e(w,Le),u(ot,Le,null),e(Le,dn),e(Le,ka),e(ka,cn),p(a,Eo,m),p(a,he,m),e(he,ze),e(ze,ya),u(rt,ya,null),e(he,mn),e(he,$a),e($a,pn),p(a,wo,m),p(a,x,m),u(nt,x,null),e(x,fn),e(x,Da),e(Da,hn),e(x,gn),e(x,lt),e(lt,xa),e(xa,un),e(lt,_n),e(lt,Ea),e(Ea,vn),e(x,bn),u(Ae,x,null),e(x,kn),e(x,qe),u(st,qe,null),e(qe,yn),e(qe,wa),e(wa,$n),e(x,Dn),e(x,Me),u(it,Me,null),e(Me,xn),e(Me,Ca),e(Ca,En),e(x,wn),e(x,Ie),u(dt,Ie,null),e(Ie,Cn),e(Ie,Ta),e(Ta,Tn),p(a,Co,m),p(a,ge,m),e(ge,Oe),e(Oe,Pa),u(ct,Pa,null),e(ge,Pn),e(ge,Sa),e(Sa,Sn),p(a,To,m),p(a,C,m),u(mt,C,null),e(C,Fn),e(C,Fa),e(Fa,Ln),e(C,zn),e(C,pt),e(pt,La),e(La,An),e(pt,qn),e(pt,za),e(za,Mn),e(C,In),e(C,G),u(ft,G,null),e(G,On),e(G,Aa),e(Aa,Nn),e(G,Wn),e(G,z),e(z,ht),e(ht,Vn),e(ht,qa),e(qa,jn),e(ht,Bn),e(z,Kn),e(z,ue),e(ue,Hn),e(ue,Ma),e(Ma,Un),e(ue,Rn),e(ue,Ia),e(Ia,Xn),e(ue,Gn),e(z,Jn),e(z,gt),e(gt,Qn),e(gt,Oa),e(Oa,Yn),e(gt,Zn),e(z,el),e(z,J),e(J,tl),e(J,Na),e(Na,al),e(J,ol),e(J,Wa),e(Wa,rl),e(J,nl),e(J,Va),e(Va,ll),e(z,sl),e(z,_e),e(_e,il),e(_e,ja),e(ja,dl),e(_e,cl),e(_e,Ba),e(Ba,ml),e(_e,pl),e(C,fl),e(C,Q),u(ut,Q,null),e(Q,hl),e(Q,Ka),e(Ka,gl),e(Q,ul),e(Q,A),e(A,_t),e(_t,_l),e(_t,Ha),e(Ha,vl),e(_t,bl),e(A,kl),e(A,ve),e(ve,yl),e(ve,Ua),e(Ua,$l),e(ve,Dl),e(ve,Ra),e(Ra,xl),e(ve,El),e(A,wl),e(A,vt),e(vt,Cl),e(vt,Xa),e(Xa,Tl),e(vt,Pl),e(A,Sl),e(A,Y),e(Y,Fl),e(Y,Ga),e(Ga,Ll),e(Y,zl),e(Y,Ja),e(Ja,Al),e(Y,ql),e(Y,Qa),e(Qa,Ml),e(A,Il),e(A,be),e(be,Ol),e(be,Ya),e(Ya,Nl),e(be,Wl),e(be,Za),e(Za,Vl),e(be,jl),e(C,Bl),e(C,Z),u(bt,Z,null),e(Z,Kl),e(Z,eo),e(eo,Hl),e(Z,Ul),e(Z,q),e(q,kt),e(kt,Rl),e(kt,to),e(to,Xl),e(kt,Gl),e(q,Jl),e(q,ke),e(ke,Ql),e(ke,ao),e(ao,Yl),e(ke,Zl),e(ke,oo),e(oo,es),e(ke,ts),e(q,as),e(q,yt),e(yt,os),e(yt,ro),e(ro,rs),e(yt,ns),e(q,ls),e(q,ee),e(ee,ss),e(ee,no),e(no,is),e(ee,ds),e(ee,lo),e(lo,cs),e(ee,ms),e(ee,so),e(so,ps),e(q,fs),e(q,ye),e(ye,hs),e(ye,io),e(io,gs),e(ye,us),e(ye,co),e(co,_s),e(ye,vs),Po=!0},p(a,[m]){const $t={};m&2&&($t.$$scope={dirty:m,ctx:a}),Pe.$set($t);const mo={};m&2&&(mo.$$scope={dirty:m,ctx:a}),Ae.$set(mo)},i(a){Po||(_($.$$.fragment,a),_(Ve.$$.fragment,a),_(je.$$.fragment,a),_(Ke.$$.fragment,a),_(He.$$.fragment,a),_(Re.$$.fragment,a),_(Xe.$$.fragment,a),_(Ge.$$.fragment,a),_(Je.$$.fragment,a),_(Qe.$$.fragment,a),_(Ye.$$.fragment,a),_(Ze.$$.fragment,a),_(et.$$.fragment,a),_(Pe.$$.fragment,a),_(tt.$$.fragment,a),_(at.$$.fragment,a),_(ot.$$.fragment,a),_(rt.$$.fragment,a),_(nt.$$.fragment,a),_(Ae.$$.fragment,a),_(st.$$.fragment,a),_(it.$$.fragment,a),_(dt.$$.fragment,a),_(ct.$$.fragment,a),_(mt.$$.fragment,a),_(ft.$$.fragment,a),_(ut.$$.fragment,a),_(bt.$$.fragment,a),Po=!0)},o(a){v($.$$.fragment,a),v(Ve.$$.fragment,a),v(je.$$.fragment,a),v(Ke.$$.fragment,a),v(He.$$.fragment,a),v(Re.$$.fragment,a),v(Xe.$$.fragment,a),v(Ge.$$.fragment,a),v(Je.$$.fragment,a),v(Qe.$$.fragment,a),v(Ye.$$.fragment,a),v(Ze.$$.fragment,a),v(et.$$.fragment,a),v(Pe.$$.fragment,a),v(tt.$$.fragment,a),v(at.$$.fragment,a),v(ot.$$.fragment,a),v(rt.$$.fragment,a),v(nt.$$.fragment,a),v(Ae.$$.fragment,a),v(st.$$.fragment,a),v(it.$$.fragment,a),v(dt.$$.fragment,a),v(ct.$$.fragment,a),v(mt.$$.fragment,a),v(ft.$$.fragment,a),v(ut.$$.fragment,a),v(bt.$$.fragment,a),Po=!1},d(a){t(f),a&&t(W),a&&t(k),b($),a&&t(V),a&&t(E),a&&t(We),a&&t(U),a&&t(po),a&&t(X),a&&t(fo),a&&t(ne),b(Ve),a&&t(ho),a&&t(N),b(je),a&&t(go),a&&t(le),b(Ke),a&&t(uo),a&&t(L),b(He),a&&t(_o),a&&t(se),b(Re),a&&t(vo),a&&t(ie),b(Xe),a&&t(bo),a&&t(de),b(Ge),a&&t(ko),a&&t(ce),b(Je),a&&t(yo),a&&t(me),b(Qe),a&&t($o),a&&t(pe),b(Ye),a&&t(Do),a&&t(fe),b(Ze),a&&t(xo),a&&t(w),b(et),b(Pe),b(tt),b(at),b(ot),a&&t(Eo),a&&t(he),b(rt),a&&t(wo),a&&t(x),b(nt),b(Ae),b(st),b(it),b(dt),a&&t(Co),a&&t(ge),b(ct),a&&t(To),a&&t(C),b(mt),b(ft),b(ut),b(bt)}}}const cd={local:"data-collator",sections:[{local:"transformers.default_data_collator",title:"Default data collator"},{local:"transformers.DefaultDataCollator",title:"DefaultDataCollator"},{local:"transformers.DataCollatorWithPadding",title:"DataCollatorWithPadding"},{local:"transformers.DataCollatorForTokenClassification",title:"DataCollatorForTokenClassification"},{local:"transformers.DataCollatorForSeq2Seq",title:"DataCollatorForSeq2Seq"},{local:"transformers.DataCollatorForLanguageModeling",title:"DataCollatorForLanguageModeling"},{local:"transformers.DataCollatorForWholeWordMask",title:"DataCollatorForWholeWordMask"},{local:"transformers.DataCollatorForPermutationLanguageModeling",title:"DataCollatorForPermutationLanguageModeling"}],title:"Data Collator"};function md(wt){return ld(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class ud extends ad{constructor(f){super();od(this,f,md,dd,rd,{})}}export{ud as default,cd as metadata};
24
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/main_classes/feature_extractor.mdx-hf-doc-builder.js
import{S as Bn,i as Cn,s as An,e as a,k as c,w as h,t as s,M as Vn,c as o,d as r,m as d,a as n,x as g,h as i,b as l,G as e,g as w,y as _,q as v,o as x,B as y,v as jn,L as On}from"../../chunks/vendor-hf-doc-builder.js";import{T as Sn}from"../../chunks/Tip-hf-doc-builder.js";import{D as I}from"../../chunks/Docstring-hf-doc-builder.js";import{C as Wn}from"../../chunks/CodeBlock-hf-doc-builder.js";import{I as Kt}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{E as Rn}from"../../chunks/ExampleCodeBlock-hf-doc-builder.js";function Un(Q){let m,k,b,$,T;return{c(){m=a("p"),k=s("Passing "),b=a("code"),$=s("use_auth_token=True"),T=s(" is required when you want to use a private model.")},l(p){m=o(p,"P",{});var F=n(m);k=i(F,"Passing "),b=o(F,"CODE",{});var q=n(b);$=i(q,"use_auth_token=True"),q.forEach(r),T=i(F," is required when you want to use a private model."),F.forEach(r)},m(p,F){w(p,m,F),e(m,k),e(m,b),e(b,$),e(m,T)},d(p){p&&r(m)}}}function Hn(Q){let m,k,b,$,T;return $=new Wn({props:{code:`# We can't instantiate directly the base class *FeatureExtractionMixin* nor *SequenceFeatureExtractor* so let's show the examples on a # derived class: *Wav2Vec2FeatureExtractor* feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "facebook/wav2vec2-base-960h" ) # Download feature_extraction_config from huggingface.co and cache. feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "./test/saved_model/" ) # E.g. feature_extractor (or model) was saved using *save_pretrained('./test/saved_model/')* feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("./test/saved_model/preprocessor_config.json") feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "facebook/wav2vec2-base-960h", return_attention_mask=False, foo=False ) assert feature_extractor.return_attention_mask is False feature_extractor, unused_kwargs = Wav2Vec2FeatureExtractor.from_pretrained( "facebook/wav2vec2-base-960h", return_attention_mask=False, foo=False, return_unused_kwargs=True ) assert feature_extractor.return_attention_mask is False assert unused_kwargs == {"foo": False}`,highlighted:`<span class="hljs-comment"># We can&#x27;t instantiate directly the base class *FeatureExtractionMixin* nor *SequenceFeatureExtractor* so let&#x27;s show the examples on a</span> <span class="hljs-comment"># derived class: *Wav2Vec2FeatureExtractor*</span> feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( <span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span> ) <span class="hljs-comment"># Download feature_extraction_config from huggingface.co and cache.</span> feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( <span class="hljs-string">&quot;./test/saved_model/&quot;</span> ) <span class="hljs-comment"># E.g. feature_extractor (or model) was saved using *save_pretrained(&#x27;./test/saved_model/&#x27;)*</span> feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&quot;./test/saved_model/preprocessor_config.json&quot;</span>) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( <span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>, return_attention_mask=<span class="hljs-literal">False</span>, foo=<span class="hljs-literal">False</span> ) <span class="hljs-keyword">assert</span> feature_extractor.return_attention_mask <span class="hljs-keyword">is</span> <span class="hljs-literal">False</span> feature_extractor, unused_kwargs = Wav2Vec2FeatureExtractor.from_pretrained( <span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>, return_attention_mask=<span class="hljs-literal">False</span>, foo=<span class="hljs-literal">False</span>, return_unused_kwargs=<span class="hljs-literal">True</span> ) <span class="hljs-keyword">assert</span> feature_extractor.return_attention_mask <span class="hljs-keyword">is</span> <span class="hljs-literal">False</span> <span class="hljs-keyword">assert</span> unused_kwargs == {<span class="hljs-string">&quot;foo&quot;</span>: <span class="hljs-literal">False</span>}`}}),{c(){m=a("p"),k=s("Examples:"),b=c(),h($.$$.fragment)},l(p){m=o(p,"P",{});var F=n(m);k=i(F,"Examples:"),F.forEach(r),b=d(p),g($.$$.fragment,p)},m(p,F){w(p,m,F),e(m,k),w(p,b,F),_($,p,F),T=!0},p:On,i(p){T||(v($.$$.fragment,p),T=!0)},o(p){x($.$$.fragment,p),T=!1},d(p){p&&r(m),p&&r(b),y($,p)}}}function Gn(Q){let m,k,b,$,T,p,F,q;return{c(){m=a("p"),k=s("If the "),b=a("code"),$=s("processed_features"),T=s(` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the result will use the same type unless you provide a different tensor type with `),p=a("code"),F=s("return_tensors"),q=s(`. In the case of PyTorch tensors, you will lose the specific device of your tensors however.`)},l(j){m=o(j,"P",{});var z=n(m);k=i(z,"If the "),b=o(z,"CODE",{});var D=n(b);$=i(D,"processed_features"),D.forEach(r),T=i(z,` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the result will use the same type unless you provide a different tensor type with `),p=o(z,"CODE",{});var Ze=n(p);F=i(Ze,"return_tensors"),Ze.forEach(r),q=i(z,`. In the case of PyTorch tensors, you will lose the specific device of your tensors however.`),z.forEach(r)},m(j,z){w(j,m,z),e(m,k),e(m,b),e(b,$),e(m,T),e(m,p),e(p,F),e(m,q)},d(j){j&&r(m)}}}function Jn(Q){let m,k,b,$,T,p,F,q,j,z,D,Ze,mt,zr,Dr,pt,Mr,Lr,Qt,O,X,ut,$e,qr,ft,Nr,Xt,M,Ee,Sr,ht,Br,Cr,N,we,Ar,B,Vr,et,jr,Or,gt,Wr,Rr,tt,Ur,Hr,Gr,Z,Jr,ee,Yr,te,Fe,Kr,W,Qr,_t,Xr,Zr,rt,ea,ta,Zt,R,re,vt,Ie,ra,xt,aa,er,C,Te,oa,yt,na,sa,S,ke,ia,bt,ca,da,U,la,$t,ma,pa,Et,ua,fa,ha,ae,tr,H,oe,wt,Pe,ga,Ft,_a,rr,P,ze,va,G,xa,at,ya,ba,It,$a,Ea,wa,Tt,Fa,Ia,ne,De,Ta,kt,ka,Pa,se,Me,za,Le,Da,Pt,Ma,La,ar,J,ie,zt,qe,qa,Dt,Na,or,f,Ne,Sa,Mt,Ba,Ca,ce,Se,Aa,Be,Va,Lt,ja,Oa,Wa,de,Ce,Ra,Ae,Ua,qt,Ha,Ga,Ja,le,Ve,Ya,je,Ka,Nt,Qa,Xa,Za,me,Oe,eo,Y,to,St,ro,ao,Bt,oo,no,so,pe,We,io,L,co,Ct,lo,mo,At,po,uo,Vt,fo,ho,jt,go,_o,vo,ue,Re,xo,Ot,yo,bo,fe,Ue,$o,He,Eo,Wt,wo,Fo,Io,he,Ge,To,K,ko,Rt,Po,zo,Ut,Do,Mo,Lo,ge,Je,qo,Ye,No,Ht,So,Bo,Co,_e,Ke,Ao,Qe,Vo,Gt,jo,Oo,nr;return p=new Kt({}),$e=new Kt({}),Ee=new I({props:{name:"class transformers.FeatureExtractionMixin",anchor:"transformers.FeatureExtractionMixin",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/feature_extraction_utils.py#L198"}}),we=new I({props:{name:"from_pretrained",anchor:"transformers.FeatureExtractionMixin.from_pretrained",parameters:[{name:"pretrained_model_name_or_path",val:": typing.Union[str, os.PathLike]"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.FeatureExtractionMixin.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; This can be either:</p> <ul> <li>a string, the <em>model id</em> of a pretrained feature_extractor hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>a path to a <em>directory</em> containing a feature extractor file saved using the <a href="/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.save_pretrained">save_pretrained()</a> method, e.g., <code>./my_model_directory/</code>.</li> <li>a path or url to a saved feature extractor JSON <em>file</em>, e.g., <code>./my_model_directory/preprocessor_config.json</code>.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.FeatureExtractionMixin.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<code>str</code> or <code>os.PathLike</code>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model feature extractor should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.FeatureExtractionMixin.from_pretrained.force_download",description:`<strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to force to (re-)download the feature extractor files and override the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.FeatureExtractionMixin.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.FeatureExtractionMixin.from_pretrained.proxies",description:`<strong>proxies</strong> (<code>Dict[str, str]</code>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <code>{&apos;http&apos;: &apos;foo.bar:3128&apos;, &apos;http://hostname&apos;: &apos;foo.bar:4012&apos;}.</code> The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.FeatureExtractionMixin.from_pretrained.use_auth_token",description:`<strong>use_auth_token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>huggingface-cli login</code> (stored in <code>~/.huggingface</code>).`,name:"use_auth_token"},{anchor:"transformers.FeatureExtractionMixin.from_pretrained.revision",description:`<strong>revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;main&quot;</code>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <code>revision</code> can be any identifier allowed by git.`,name:"revision"},{anchor:"transformers.FeatureExtractionMixin.from_pretrained.return_unused_kwargs",description:`<strong>return_unused_kwargs</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If <code>False</code>, then this function returns just the final feature extractor object. If <code>True</code>, then this functions returns a <code>Tuple(feature_extractor, unused_kwargs)</code> where <em>unused_kwargs</em> is a dictionary consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of <code>kwargs</code> which has not been used to update <code>feature_extractor</code> and is otherwise ignored.`,name:"return_unused_kwargs"},{anchor:"transformers.FeatureExtractionMixin.from_pretrained.kwargs",description:`<strong>kwargs</strong> (<code>Dict[str, Any]</code>, <em>optional</em>) &#x2014; The values in kwargs of any keys which are feature extractor attributes will be used to override the loaded values. Behavior concerning key/value pairs whose keys are <em>not</em> feature extractor attributes is controlled by the <code>return_unused_kwargs</code> keyword parameter.`,name:"kwargs"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/feature_extraction_utils.py#L222",returnDescription:` <p>A feature extractor of type <a href="/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin" >FeatureExtractionMixin</a>.</p> `}}),Z=new Sn({props:{$$slots:{default:[Un]},$$scope:{ctx:Q}}}),ee=new Rn({props:{anchor:"transformers.FeatureExtractionMixin.from_pretrained.example",$$slots:{default:[Hn]},$$scope:{ctx:Q}}}),Fe=new I({props:{name:"save_pretrained",anchor:"transformers.FeatureExtractionMixin.save_pretrained",parameters:[{name:"save_directory",val:": typing.Union[str, os.PathLike]"},{name:"push_to_hub",val:": bool = False"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.FeatureExtractionMixin.save_pretrained.save_directory",description:`<strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Directory where the feature extractor JSON file will be saved (will be created if it does not exist).`,name:"save_directory"},{anchor:"transformers.FeatureExtractionMixin.save_pretrained.push_to_hub",description:`<strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with <code>repo_id</code> (will default to the name of <code>save_directory</code> in your namespace). kwargs &#x2014; Additional key word arguments passed along to the <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.push_to_hub">push_to_hub()</a> method.`,name:"push_to_hub"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/feature_extraction_utils.py#L306"}}),Ie=new Kt({}),Te=new I({props:{name:"class transformers.SequenceFeatureExtractor",anchor:"transformers.SequenceFeatureExtractor",parameters:[{name:"feature_size",val:": int"},{name:"sampling_rate",val:": int"},{name:"padding_value",val:": float"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.SequenceFeatureExtractor.feature_size",description:`<strong>feature_size</strong> (<code>int</code>) &#x2014; The feature dimension of the extracted features.`,name:"feature_size"},{anchor:"transformers.SequenceFeatureExtractor.sampling_rate",description:`<strong>sampling_rate</strong> (<code>int</code>) &#x2014; The sampling rate at which the audio files should be digitalized expressed in Hertz per second (Hz).`,name:"sampling_rate"},{anchor:"transformers.SequenceFeatureExtractor.padding_value",description:`<strong>padding_value</strong> (<code>float</code>) &#x2014; The value that is used to fill the padding values / vectors.`,name:"padding_value"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/feature_extraction_sequence_utils.py#L30"}}),ke=new I({props:{name:"pad",anchor:"transformers.SequenceFeatureExtractor.pad",parameters:[{name:"processed_features",val:": typing.Union[transformers.feature_extraction_utils.BatchFeature, typing.List[transformers.feature_extraction_utils.BatchFeature], typing.Dict[str, transformers.feature_extraction_utils.BatchFeature], typing.Dict[str, typing.List[transformers.feature_extraction_utils.BatchFeature]], typing.List[typing.Dict[str, transformers.feature_extraction_utils.BatchFeature]]]"},{name:"padding",val:": typing.Union[bool, str, transformers.utils.generic.PaddingStrategy] = True"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"truncation",val:": bool = False"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.utils.generic.TensorType, NoneType] = None"}],parametersDescription:[{anchor:"transformers.SequenceFeatureExtractor.pad.processed_features",description:`<strong>processed_features</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.BatchFeature">BatchFeature</a>, list of <a href="/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.BatchFeature">BatchFeature</a>, <code>Dict[str, List[float]]</code>, <code>Dict[str, List[List[float]]</code> or <code>List[Dict[str, List[float]]]</code>) &#x2014; Processed inputs. Can represent one input (<a href="/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.BatchFeature">BatchFeature</a> or <code>Dict[str, List[float]]</code>) or a batch of input values / vectors (list of <a href="/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.BatchFeature">BatchFeature</a>, <em>Dict[str, List[List[float]]]</em> or <em>List[Dict[str, List[float]]]</em>) so you can use this method during preprocessing as well as in a PyTorch Dataloader collate function.</p> <p>Instead of <code>List[float]</code> you can have tensors (numpy arrays, PyTorch tensors or TensorFlow tensors), see the note above for the return type.`,name:"processed_features"},{anchor:"transformers.SequenceFeatureExtractor.pad.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Select a strategy to pad the returned sequences (according to the model&#x2019;s padding side and padding index) among:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.SequenceFeatureExtractor.pad.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Maximum length of the returned list and optionally padding length (see above).`,name:"max_length"},{anchor:"transformers.SequenceFeatureExtractor.pad.truncation",description:`<strong>truncation</strong> (<code>bool</code>) &#x2014; Activates truncation to cut input sequences longer than <code>max_length</code> to <code>max_length</code>.`,name:"truncation"},{anchor:"transformers.SequenceFeatureExtractor.pad.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value.</p> <p>This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability</p> <blockquote> <p>= 7.5 (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.</p> </blockquote>`,name:"pad_to_multiple_of"},{anchor:"transformers.SequenceFeatureExtractor.pad.return_attention_mask",description:`<strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific feature_extractor&#x2019;s default.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"return_attention_mask"},{anchor:"transformers.SequenceFeatureExtractor.pad.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/feature_extraction_sequence_utils.py#L53"}}),ae=new Sn({props:{$$slots:{default:[Gn]},$$scope:{ctx:Q}}}),Pe=new Kt({}),ze=new I({props:{name:"class transformers.BatchFeature",anchor:"transformers.BatchFeature",parameters:[{name:"data",val:": typing.Union[typing.Dict[str, typing.Any], NoneType] = None"},{name:"tensor_type",val:": typing.Union[NoneType, str, transformers.utils.generic.TensorType] = None"}],parametersDescription:[{anchor:"transformers.BatchFeature.data",description:`<strong>data</strong> (<code>dict</code>) &#x2014; Dictionary of lists/arrays/tensors returned by the <strong>call</strong>/pad methods (&#x2018;input_values&#x2019;, &#x2018;attention_mask&#x2019;, etc.).`,name:"data"},{anchor:"transformers.BatchFeature.tensor_type",description:`<strong>tensor_type</strong> (<code>Union[None, str, TensorType]</code>, <em>optional</em>) &#x2014; You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at initialization.`,name:"tensor_type"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/feature_extraction_utils.py#L56"}}),De=new I({props:{name:"convert_to_tensors",anchor:"transformers.BatchFeature.convert_to_tensors",parameters:[{name:"tensor_type",val:": typing.Union[str, transformers.utils.generic.TensorType, NoneType] = None"}],parametersDescription:[{anchor:"transformers.BatchFeature.convert_to_tensors.tensor_type",description:`<strong>tensor_type</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; The type of tensors to use. If <code>str</code>, should be one of the values of the enum <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>. If <code>None</code>, no modification is done.`,name:"tensor_type"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/feature_extraction_utils.py#L110"}}),Me=new I({props:{name:"to",anchor:"transformers.BatchFeature.to",parameters:[{name:"device",val:": typing.Union[str, ForwardRef('torch.device')]"}],parametersDescription:[{anchor:"transformers.BatchFeature.to.device",description:"<strong>device</strong> (<code>str</code> or <code>torch.device</code>) &#x2014; The device to put the tensors on.",name:"device"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/feature_extraction_utils.py#L175",returnDescription:` <p>The same instance after modification.</p> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a></p> `}}),qe=new Kt({}),Ne=new I({props:{name:"class transformers.ImageFeatureExtractionMixin",anchor:"transformers.ImageFeatureExtractionMixin",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/image_utils.py#L78"}}),Se=new I({props:{name:"center_crop",anchor:"transformers.ImageFeatureExtractionMixin.center_crop",parameters:[{name:"image",val:""},{name:"size",val:""}],parametersDescription:[{anchor:"transformers.ImageFeatureExtractionMixin.center_crop.image",description:`<strong>image</strong> (<code>PIL.Image.Image</code> or <code>np.ndarray</code> or <code>torch.Tensor</code> of shape (n_channels, height, width) or (height, width, n_channels)) &#x2014; The image to resize.`,name:"image"},{anchor:"transformers.ImageFeatureExtractionMixin.center_crop.size",description:`<strong>size</strong> (<code>int</code> or <code>Tuple[int, int]</code>) &#x2014; The size to which crop the image.`,name:"size"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/image_utils.py#L304",returnDescription:` <p>A center cropped <code>PIL.Image.Image</code> or <code>np.ndarray</code> or <code>torch.Tensor</code> of shape: (n_channels, height, width).</p> `,returnType:` <p>new_image</p> `}}),Ce=new I({props:{name:"convert_rgb",anchor:"transformers.ImageFeatureExtractionMixin.convert_rgb",parameters:[{name:"image",val:""}],parametersDescription:[{anchor:"transformers.ImageFeatureExtractionMixin.convert_rgb.image",description:`<strong>image</strong> (<code>PIL.Image.Image</code>) &#x2014; The image to convert.`,name:"image"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/image_utils.py#L120"}}),Ve=new I({props:{name:"expand_dims",anchor:"transformers.ImageFeatureExtractionMixin.expand_dims",parameters:[{name:"image",val:""}],parametersDescription:[{anchor:"transformers.ImageFeatureExtractionMixin.expand_dims.image",description:`<strong>image</strong> (<code>PIL.Image.Image</code> or <code>np.ndarray</code> or <code>torch.Tensor</code>) &#x2014; The image to expand.`,name:"image"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/image_utils.py#L173"}}),Oe=new I({props:{name:"flip_channel_order",anchor:"transformers.ImageFeatureExtractionMixin.flip_channel_order",parameters:[{name:"image",val:""}],parametersDescription:[{anchor:"transformers.ImageFeatureExtractionMixin.flip_channel_order.image",description:`<strong>image</strong> (<code>PIL.Image.Image</code> or <code>np.ndarray</code> or <code>torch.Tensor</code>) &#x2014; The image whose color channels to flip. If <code>np.ndarray</code> or <code>torch.Tensor</code>, the channel dimension should be first.`,name:"image"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/image_utils.py#L379"}}),We=new I({props:{name:"normalize",anchor:"transformers.ImageFeatureExtractionMixin.normalize",parameters:[{name:"image",val:""},{name:"mean",val:""},{name:"std",val:""},{name:"rescale",val:" = False"}],parametersDescription:[{anchor:"transformers.ImageFeatureExtractionMixin.normalize.image",description:`<strong>image</strong> (<code>PIL.Image.Image</code> or <code>np.ndarray</code> or <code>torch.Tensor</code>) &#x2014; The image to normalize.`,name:"image"},{anchor:"transformers.ImageFeatureExtractionMixin.normalize.mean",description:`<strong>mean</strong> (<code>List[float]</code> or <code>np.ndarray</code> or <code>torch.Tensor</code>) &#x2014; The mean (per channel) to use for normalization.`,name:"mean"},{anchor:"transformers.ImageFeatureExtractionMixin.normalize.std",description:`<strong>std</strong> (<code>List[float]</code> or <code>np.ndarray</code> or <code>torch.Tensor</code>) &#x2014; The standard deviation (per channel) to use for normalization.`,name:"std"},{anchor:"transformers.ImageFeatureExtractionMixin.normalize.rescale",description:`<strong>rescale</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to rescale the image to be between 0 and 1. If a PIL image is provided, scaling will happen automatically.`,name:"rescale"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/image_utils.py#L193"}}),Re=new I({props:{name:"rescale",anchor:"transformers.ImageFeatureExtractionMixin.rescale",parameters:[{name:"image",val:": ndarray"},{name:"scale",val:": typing.Union[float, int]"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/image_utils.py#L134"}}),Ue=new I({props:{name:"resize",anchor:"transformers.ImageFeatureExtractionMixin.resize",parameters:[{name:"image",val:""},{name:"size",val:""},{name:"resample",val:" = <Resampling.BILINEAR: 2>"},{name:"default_to_square",val:" = True"},{name:"max_size",val:" = None"}],parametersDescription:[{anchor:"transformers.ImageFeatureExtractionMixin.resize.image",description:`<strong>image</strong> (<code>PIL.Image.Image</code> or <code>np.ndarray</code> or <code>torch.Tensor</code>) &#x2014; The image to resize.`,name:"image"},{anchor:"transformers.ImageFeatureExtractionMixin.resize.size",description:`<strong>size</strong> (<code>int</code> or <code>Tuple[int, int]</code>) &#x2014; The size to use for resizing the image. If <code>size</code> is a sequence like (h, w), output size will be matched to this.</p> <p>If <code>size</code> is an int and <code>default_to_square</code> is <code>True</code>, then image will be resized to (size, size). If <code>size</code> is an int and <code>default_to_square</code> is <code>False</code>, then smaller edge of the image will be matched to this number. i.e, if height &gt; width, then image will be rescaled to (size * height / width, size).`,name:"size"},{anchor:"transformers.ImageFeatureExtractionMixin.resize.resample",description:`<strong>resample</strong> (<code>int</code>, <em>optional</em>, defaults to <code>PIL.Image.BILINEAR</code>) &#x2014; The filter to user for resampling.`,name:"resample"},{anchor:"transformers.ImageFeatureExtractionMixin.resize.default_to_square",description:`<strong>default_to_square</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; How to convert <code>size</code> when it is a single int. If set to <code>True</code>, the <code>size</code> will be converted to a square (<code>size</code>,<code>size</code>). If set to <code>False</code>, will replicate <a href="https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize" rel="nofollow"><code>torchvision.transforms.Resize</code></a> with support for resizing only the smallest edge and providing an optional <code>max_size</code>.`,name:"default_to_square"},{anchor:"transformers.ImageFeatureExtractionMixin.resize.max_size",description:`<strong>max_size</strong> (<code>int</code>, <em>optional</em>, defaults to <code>None</code>) &#x2014; The maximum allowed for the longer edge of the resized image: if the longer edge of the image is greater than <code>max_size</code> after being resized according to <code>size</code>, then the image is resized again so that the longer edge is equal to <code>max_size</code>. As a result, <code>size</code> might be overruled, i.e the smaller edge may be shorter than <code>size</code>. Only used if <code>default_to_square</code> is <code>False</code>.`,name:"max_size"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/image_utils.py#L239",returnDescription:` <p>A resized <code>PIL.Image.Image</code>.</p> `,returnType:` <p>image</p> `}}),Ge=new I({props:{name:"rotate",anchor:"transformers.ImageFeatureExtractionMixin.rotate",parameters:[{name:"image",val:""},{name:"angle",val:""},{name:"resample",val:" = 0"},{name:"expand",val:" = 0"},{name:"center",val:" = None"},{name:"translate",val:" = None"},{name:"fillcolor",val:" = None"}],parametersDescription:[{anchor:"transformers.ImageFeatureExtractionMixin.rotate.image",description:`<strong>image</strong> (<code>PIL.Image.Image</code> or <code>np.ndarray</code> or <code>torch.Tensor</code>) &#x2014; The image to rotate. If <code>np.ndarray</code> or <code>torch.Tensor</code>, will be converted to <code>PIL.Image.Image</code> before rotating.`,name:"image"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/image_utils.py#L396",returnDescription:` <p>A rotated <code>PIL.Image.Image</code>.</p> `,returnType:` <p>image</p> `}}),Je=new I({props:{name:"to_numpy_array",anchor:"transformers.ImageFeatureExtractionMixin.to_numpy_array",parameters:[{name:"image",val:""},{name:"rescale",val:" = None"},{name:"channel_first",val:" = True"}],parametersDescription:[{anchor:"transformers.ImageFeatureExtractionMixin.to_numpy_array.image",description:`<strong>image</strong> (<code>PIL.Image.Image</code> or <code>np.ndarray</code> or <code>torch.Tensor</code>) &#x2014; The image to convert to a NumPy array.`,name:"image"},{anchor:"transformers.ImageFeatureExtractionMixin.to_numpy_array.rescale",description:`<strong>rescale</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.). Will default to <code>True</code> if the image is a PIL Image or an array/tensor of integers, <code>False</code> otherwise.`,name:"rescale"},{anchor:"transformers.ImageFeatureExtractionMixin.to_numpy_array.channel_first",description:`<strong>channel_first</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to permute the dimensions of the image to put the channel dimension first.`,name:"channel_first"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/image_utils.py#L141"}}),Ke=new I({props:{name:"to_pil_image",anchor:"transformers.ImageFeatureExtractionMixin.to_pil_image",parameters:[{name:"image",val:""},{name:"rescale",val:" = None"}],parametersDescription:[{anchor:"transformers.ImageFeatureExtractionMixin.to_pil_image.image",description:`<strong>image</strong> (<code>PIL.Image.Image</code> or <code>numpy.ndarray</code> or <code>torch.Tensor</code>) &#x2014; The image to convert to the PIL Image format.`,name:"image"},{anchor:"transformers.ImageFeatureExtractionMixin.to_pil_image.rescale",description:`<strong>rescale</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will default to <code>True</code> if the image type is a floating type, <code>False</code> otherwise.`,name:"rescale"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/image_utils.py#L90"}}),{c(){m=a("meta"),k=c(),b=a("h1"),$=a("a"),T=a("span"),h(p.$$.fragment),F=c(),q=a("span"),j=s("Feature Extractor"),z=c(),D=a("p"),Ze=s(`A feature extractor is in charge of preparing input features for audio or vision models. This includes feature extraction from sequences, `),mt=a("em"),zr=s("e.g."),Dr=s(`, pre-processing audio files to Log-Mel Spectrogram features, feature extraction from images `),pt=a("em"),Mr=s("e.g."),Lr=s(` cropping image image files, but also padding, normalization, and conversion to Numpy, PyTorch, and TensorFlow tensors.`),Qt=c(),O=a("h2"),X=a("a"),ut=a("span"),h($e.$$.fragment),qr=c(),ft=a("span"),Nr=s("FeatureExtractionMixin"),Xt=c(),M=a("div"),h(Ee.$$.fragment),Sr=c(),ht=a("p"),Br=s(`This is a feature extraction mixin used to provide saving/loading functionality for sequential and image feature extractors.`),Cr=c(),N=a("div"),h(we.$$.fragment),Ar=c(),B=a("p"),Vr=s("Instantiate a type of "),et=a("a"),jr=s("FeatureExtractionMixin"),Or=s(" from a feature extractor, "),gt=a("em"),Wr=s("e.g."),Rr=s(` a derived class of `),tt=a("a"),Ur=s("SequenceFeatureExtractor"),Hr=s("."),Gr=c(),h(Z.$$.fragment),Jr=c(),h(ee.$$.fragment),Yr=c(),te=a("div"),h(Fe.$$.fragment),Kr=c(),W=a("p"),Qr=s("Save a feature_extractor object to the directory "),_t=a("code"),Xr=s("save_directory"),Zr=s(`, so that it can be re-loaded using the `),rt=a("a"),ea=s("from_pretrained()"),ta=s(" class method."),Zt=c(),R=a("h2"),re=a("a"),vt=a("span"),h(Ie.$$.fragment),ra=c(),xt=a("span"),aa=s("SequenceFeatureExtractor"),er=c(),C=a("div"),h(Te.$$.fragment),oa=c(),yt=a("p"),na=s("This is a general feature extraction class for speech recognition."),sa=c(),S=a("div"),h(ke.$$.fragment),ia=c(),bt=a("p"),ca=s(`Pad input values / input vectors or a batch of input values / input vectors up to predefined length or to the max sequence length in the batch.`),da=c(),U=a("p"),la=s("Padding side (left/right) padding values are defined at the feature extractor level (with "),$t=a("code"),ma=s("self.padding_side"),pa=s(`, `),Et=a("code"),ua=s("self.padding_value"),fa=s(")"),ha=c(),h(ae.$$.fragment),tr=c(),H=a("h2"),oe=a("a"),wt=a("span"),h(Pe.$$.fragment),ga=c(),Ft=a("span"),_a=s("BatchFeature"),rr=c(),P=a("div"),h(ze.$$.fragment),va=c(),G=a("p"),xa=s("Holds the output of the "),at=a("a"),ya=s("pad()"),ba=s(" and feature extractor specific "),It=a("code"),$a=s("__call__"),Ea=s(" methods."),wa=c(),Tt=a("p"),Fa=s("This class is derived from a python dictionary and can be used as a dictionary."),Ia=c(),ne=a("div"),h(De.$$.fragment),Ta=c(),kt=a("p"),ka=s("Convert the inner content to tensors."),Pa=c(),se=a("div"),h(Me.$$.fragment),za=c(),Le=a("p"),Da=s("Send all values to device by calling "),Pt=a("code"),Ma=s("v.to(device)"),La=s(" (PyTorch only)."),ar=c(),J=a("h2"),ie=a("a"),zt=a("span"),h(qe.$$.fragment),qa=c(),Dt=a("span"),Na=s("ImageFeatureExtractionMixin"),or=c(),f=a("div"),h(Ne.$$.fragment),Sa=c(),Mt=a("p"),Ba=s("Mixin that contain utilities for preparing image features."),Ca=c(),ce=a("div"),h(Se.$$.fragment),Aa=c(),Be=a("p"),Va=s("Crops "),Lt=a("code"),ja=s("image"),Oa=s(` to the given size using a center crop. Note that if the image is too small to be cropped to the size given, it will be padded (so the returned result has the size asked).`),Wa=c(),de=a("div"),h(Ce.$$.fragment),Ra=c(),Ae=a("p"),Ua=s("Converts "),qt=a("code"),Ha=s("PIL.Image.Image"),Ga=s(" to RGB format."),Ja=c(),le=a("div"),h(Ve.$$.fragment),Ya=c(),je=a("p"),Ka=s("Expands 2-dimensional "),Nt=a("code"),Qa=s("image"),Xa=s(" to 3 dimensions."),Za=c(),me=a("div"),h(Oe.$$.fragment),eo=c(),Y=a("p"),to=s("Flips the channel order of "),St=a("code"),ro=s("image"),ao=s(` from RGB to BGR, or vice versa. Note that this will trigger a conversion of `),Bt=a("code"),oo=s("image"),no=s(" to a NumPy array if it\u2019s a PIL Image."),so=c(),pe=a("div"),h(We.$$.fragment),io=c(),L=a("p"),co=s("Normalizes "),Ct=a("code"),lo=s("image"),mo=s(" with "),At=a("code"),po=s("mean"),uo=s(" and "),Vt=a("code"),fo=s("std"),ho=s(". Note that this will trigger a conversion of "),jt=a("code"),go=s("image"),_o=s(` to a NumPy array if it\u2019s a PIL Image.`),vo=c(),ue=a("div"),h(Re.$$.fragment),xo=c(),Ot=a("p"),yo=s("Rescale a numpy image by scale amount"),bo=c(),fe=a("div"),h(Ue.$$.fragment),$o=c(),He=a("p"),Eo=s("Resizes "),Wt=a("code"),wo=s("image"),Fo=s(". Enforces conversion of input to PIL.Image."),Io=c(),he=a("div"),h(Ge.$$.fragment),To=c(),K=a("p"),ko=s("Returns a rotated copy of "),Rt=a("code"),Po=s("image"),zo=s(". This method returns a copy of "),Ut=a("code"),Do=s("image"),Mo=s(`, rotated the given number of degrees counter clockwise around its centre.`),Lo=c(),ge=a("div"),h(Je.$$.fragment),qo=c(),Ye=a("p"),No=s("Converts "),Ht=a("code"),So=s("image"),Bo=s(` to a numpy array. Optionally rescales it and puts the channel dimension as the first dimension.`),Co=c(),_e=a("div"),h(Ke.$$.fragment),Ao=c(),Qe=a("p"),Vo=s("Converts "),Gt=a("code"),jo=s("image"),Oo=s(` to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if needed.`),this.h()},l(t){const u=Vn('[data-svelte="svelte-1phssyn"]',document.head);m=o(u,"META",{name:!0,content:!0}),u.forEach(r),k=d(t),b=o(t,"H1",{class:!0});var Xe=n(b);$=o(Xe,"A",{id:!0,class:!0,href:!0});var Jt=n($);T=o(Jt,"SPAN",{});var Yt=n(T);g(p.$$.fragment,Yt),Yt.forEach(r),Jt.forEach(r),F=d(Xe),q=o(Xe,"SPAN",{});var Wo=n(q);j=i(Wo,"Feature Extractor"),Wo.forEach(r),Xe.forEach(r),z=d(t),D=o(t,"P",{});var ot=n(D);Ze=i(ot,`A feature extractor is in charge of preparing input features for audio or vision models. This includes feature extraction from sequences, `),mt=o(ot,"EM",{});var Ro=n(mt);zr=i(Ro,"e.g."),Ro.forEach(r),Dr=i(ot,`, pre-processing audio files to Log-Mel Spectrogram features, feature extraction from images `),pt=o(ot,"EM",{});var Uo=n(pt);Mr=i(Uo,"e.g."),Uo.forEach(r),Lr=i(ot,` cropping image image files, but also padding, normalization, and conversion to Numpy, PyTorch, and TensorFlow tensors.`),ot.forEach(r),Qt=d(t),O=o(t,"H2",{class:!0});var sr=n(O);X=o(sr,"A",{id:!0,class:!0,href:!0});var Ho=n(X);ut=o(Ho,"SPAN",{});var Go=n(ut);g($e.$$.fragment,Go),Go.forEach(r),Ho.forEach(r),qr=d(sr),ft=o(sr,"SPAN",{});var Jo=n(ft);Nr=i(Jo,"FeatureExtractionMixin"),Jo.forEach(r),sr.forEach(r),Xt=d(t),M=o(t,"DIV",{class:!0});var ve=n(M);g(Ee.$$.fragment,ve),Sr=d(ve),ht=o(ve,"P",{});var Yo=n(ht);Br=i(Yo,`This is a feature extraction mixin used to provide saving/loading functionality for sequential and image feature extractors.`),Yo.forEach(r),Cr=d(ve),N=o(ve,"DIV",{class:!0});var xe=n(N);g(we.$$.fragment,xe),Ar=d(xe),B=o(xe,"P",{});var ye=n(B);Vr=i(ye,"Instantiate a type of "),et=o(ye,"A",{href:!0});var Ko=n(et);jr=i(Ko,"FeatureExtractionMixin"),Ko.forEach(r),Or=i(ye," from a feature extractor, "),gt=o(ye,"EM",{});var Qo=n(gt);Wr=i(Qo,"e.g."),Qo.forEach(r),Rr=i(ye,` a derived class of `),tt=o(ye,"A",{href:!0});var Xo=n(tt);Ur=i(Xo,"SequenceFeatureExtractor"),Xo.forEach(r),Hr=i(ye,"."),ye.forEach(r),Gr=d(xe),g(Z.$$.fragment,xe),Jr=d(xe),g(ee.$$.fragment,xe),xe.forEach(r),Yr=d(ve),te=o(ve,"DIV",{class:!0});var ir=n(te);g(Fe.$$.fragment,ir),Kr=d(ir),W=o(ir,"P",{});var nt=n(W);Qr=i(nt,"Save a feature_extractor object to the directory "),_t=o(nt,"CODE",{});var Zo=n(_t);Xr=i(Zo,"save_directory"),Zo.forEach(r),Zr=i(nt,`, so that it can be re-loaded using the `),rt=o(nt,"A",{href:!0});var en=n(rt);ea=i(en,"from_pretrained()"),en.forEach(r),ta=i(nt," class method."),nt.forEach(r),ir.forEach(r),ve.forEach(r),Zt=d(t),R=o(t,"H2",{class:!0});var cr=n(R);re=o(cr,"A",{id:!0,class:!0,href:!0});var tn=n(re);vt=o(tn,"SPAN",{});var rn=n(vt);g(Ie.$$.fragment,rn),rn.forEach(r),tn.forEach(r),ra=d(cr),xt=o(cr,"SPAN",{});var an=n(xt);aa=i(an,"SequenceFeatureExtractor"),an.forEach(r),cr.forEach(r),er=d(t),C=o(t,"DIV",{class:!0});var st=n(C);g(Te.$$.fragment,st),oa=d(st),yt=o(st,"P",{});var on=n(yt);na=i(on,"This is a general feature extraction class for speech recognition."),on.forEach(r),sa=d(st),S=o(st,"DIV",{class:!0});var be=n(S);g(ke.$$.fragment,be),ia=d(be),bt=o(be,"P",{});var nn=n(bt);ca=i(nn,`Pad input values / input vectors or a batch of input values / input vectors up to predefined length or to the max sequence length in the batch.`),nn.forEach(r),da=d(be),U=o(be,"P",{});var it=n(U);la=i(it,"Padding side (left/right) padding values are defined at the feature extractor level (with "),$t=o(it,"CODE",{});var sn=n($t);ma=i(sn,"self.padding_side"),sn.forEach(r),pa=i(it,`, `),Et=o(it,"CODE",{});var cn=n(Et);ua=i(cn,"self.padding_value"),cn.forEach(r),fa=i(it,")"),it.forEach(r),ha=d(be),g(ae.$$.fragment,be),be.forEach(r),st.forEach(r),tr=d(t),H=o(t,"H2",{class:!0});var dr=n(H);oe=o(dr,"A",{id:!0,class:!0,href:!0});var dn=n(oe);wt=o(dn,"SPAN",{});var ln=n(wt);g(Pe.$$.fragment,ln),ln.forEach(r),dn.forEach(r),ga=d(dr),Ft=o(dr,"SPAN",{});var mn=n(Ft);_a=i(mn,"BatchFeature"),mn.forEach(r),dr.forEach(r),rr=d(t),P=o(t,"DIV",{class:!0});var A=n(P);g(ze.$$.fragment,A),va=d(A),G=o(A,"P",{});var ct=n(G);xa=i(ct,"Holds the output of the "),at=o(ct,"A",{href:!0});var pn=n(at);ya=i(pn,"pad()"),pn.forEach(r),ba=i(ct," and feature extractor specific "),It=o(ct,"CODE",{});var un=n(It);$a=i(un,"__call__"),un.forEach(r),Ea=i(ct," methods."),ct.forEach(r),wa=d(A),Tt=o(A,"P",{});var fn=n(Tt);Fa=i(fn,"This class is derived from a python dictionary and can be used as a dictionary."),fn.forEach(r),Ia=d(A),ne=o(A,"DIV",{class:!0});var lr=n(ne);g(De.$$.fragment,lr),Ta=d(lr),kt=o(lr,"P",{});var hn=n(kt);ka=i(hn,"Convert the inner content to tensors."),hn.forEach(r),lr.forEach(r),Pa=d(A),se=o(A,"DIV",{class:!0});var mr=n(se);g(Me.$$.fragment,mr),za=d(mr),Le=o(mr,"P",{});var pr=n(Le);Da=i(pr,"Send all values to device by calling "),Pt=o(pr,"CODE",{});var gn=n(Pt);Ma=i(gn,"v.to(device)"),gn.forEach(r),La=i(pr," (PyTorch only)."),pr.forEach(r),mr.forEach(r),A.forEach(r),ar=d(t),J=o(t,"H2",{class:!0});var ur=n(J);ie=o(ur,"A",{id:!0,class:!0,href:!0});var _n=n(ie);zt=o(_n,"SPAN",{});var vn=n(zt);g(qe.$$.fragment,vn),vn.forEach(r),_n.forEach(r),qa=d(ur),Dt=o(ur,"SPAN",{});var xn=n(Dt);Na=i(xn,"ImageFeatureExtractionMixin"),xn.forEach(r),ur.forEach(r),or=d(t),f=o(t,"DIV",{class:!0});var E=n(f);g(Ne.$$.fragment,E),Sa=d(E),Mt=o(E,"P",{});var yn=n(Mt);Ba=i(yn,"Mixin that contain utilities for preparing image features."),yn.forEach(r),Ca=d(E),ce=o(E,"DIV",{class:!0});var fr=n(ce);g(Se.$$.fragment,fr),Aa=d(fr),Be=o(fr,"P",{});var hr=n(Be);Va=i(hr,"Crops "),Lt=o(hr,"CODE",{});var bn=n(Lt);ja=i(bn,"image"),bn.forEach(r),Oa=i(hr,` to the given size using a center crop. Note that if the image is too small to be cropped to the size given, it will be padded (so the returned result has the size asked).`),hr.forEach(r),fr.forEach(r),Wa=d(E),de=o(E,"DIV",{class:!0});var gr=n(de);g(Ce.$$.fragment,gr),Ra=d(gr),Ae=o(gr,"P",{});var _r=n(Ae);Ua=i(_r,"Converts "),qt=o(_r,"CODE",{});var $n=n(qt);Ha=i($n,"PIL.Image.Image"),$n.forEach(r),Ga=i(_r," to RGB format."),_r.forEach(r),gr.forEach(r),Ja=d(E),le=o(E,"DIV",{class:!0});var vr=n(le);g(Ve.$$.fragment,vr),Ya=d(vr),je=o(vr,"P",{});var xr=n(je);Ka=i(xr,"Expands 2-dimensional "),Nt=o(xr,"CODE",{});var En=n(Nt);Qa=i(En,"image"),En.forEach(r),Xa=i(xr," to 3 dimensions."),xr.forEach(r),vr.forEach(r),Za=d(E),me=o(E,"DIV",{class:!0});var yr=n(me);g(Oe.$$.fragment,yr),eo=d(yr),Y=o(yr,"P",{});var dt=n(Y);to=i(dt,"Flips the channel order of "),St=o(dt,"CODE",{});var wn=n(St);ro=i(wn,"image"),wn.forEach(r),ao=i(dt,` from RGB to BGR, or vice versa. Note that this will trigger a conversion of `),Bt=o(dt,"CODE",{});var Fn=n(Bt);oo=i(Fn,"image"),Fn.forEach(r),no=i(dt," to a NumPy array if it\u2019s a PIL Image."),dt.forEach(r),yr.forEach(r),so=d(E),pe=o(E,"DIV",{class:!0});var br=n(pe);g(We.$$.fragment,br),io=d(br),L=o(br,"P",{});var V=n(L);co=i(V,"Normalizes "),Ct=o(V,"CODE",{});var In=n(Ct);lo=i(In,"image"),In.forEach(r),mo=i(V," with "),At=o(V,"CODE",{});var Tn=n(At);po=i(Tn,"mean"),Tn.forEach(r),uo=i(V," and "),Vt=o(V,"CODE",{});var kn=n(Vt);fo=i(kn,"std"),kn.forEach(r),ho=i(V,". Note that this will trigger a conversion of "),jt=o(V,"CODE",{});var Pn=n(jt);go=i(Pn,"image"),Pn.forEach(r),_o=i(V,` to a NumPy array if it\u2019s a PIL Image.`),V.forEach(r),br.forEach(r),vo=d(E),ue=o(E,"DIV",{class:!0});var $r=n(ue);g(Re.$$.fragment,$r),xo=d($r),Ot=o($r,"P",{});var zn=n(Ot);yo=i(zn,"Rescale a numpy image by scale amount"),zn.forEach(r),$r.forEach(r),bo=d(E),fe=o(E,"DIV",{class:!0});var Er=n(fe);g(Ue.$$.fragment,Er),$o=d(Er),He=o(Er,"P",{});var wr=n(He);Eo=i(wr,"Resizes "),Wt=o(wr,"CODE",{});var Dn=n(Wt);wo=i(Dn,"image"),Dn.forEach(r),Fo=i(wr,". Enforces conversion of input to PIL.Image."),wr.forEach(r),Er.forEach(r),Io=d(E),he=o(E,"DIV",{class:!0});var Fr=n(he);g(Ge.$$.fragment,Fr),To=d(Fr),K=o(Fr,"P",{});var lt=n(K);ko=i(lt,"Returns a rotated copy of "),Rt=o(lt,"CODE",{});var Mn=n(Rt);Po=i(Mn,"image"),Mn.forEach(r),zo=i(lt,". This method returns a copy of "),Ut=o(lt,"CODE",{});var Ln=n(Ut);Do=i(Ln,"image"),Ln.forEach(r),Mo=i(lt,`, rotated the given number of degrees counter clockwise around its centre.`),lt.forEach(r),Fr.forEach(r),Lo=d(E),ge=o(E,"DIV",{class:!0});var Ir=n(ge);g(Je.$$.fragment,Ir),qo=d(Ir),Ye=o(Ir,"P",{});var Tr=n(Ye);No=i(Tr,"Converts "),Ht=o(Tr,"CODE",{});var qn=n(Ht);So=i(qn,"image"),qn.forEach(r),Bo=i(Tr,` to a numpy array. Optionally rescales it and puts the channel dimension as the first dimension.`),Tr.forEach(r),Ir.forEach(r),Co=d(E),_e=o(E,"DIV",{class:!0});var kr=n(_e);g(Ke.$$.fragment,kr),Ao=d(kr),Qe=o(kr,"P",{});var Pr=n(Qe);Vo=i(Pr,"Converts "),Gt=o(Pr,"CODE",{});var Nn=n(Gt);jo=i(Nn,"image"),Nn.forEach(r),Oo=i(Pr,` to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if needed.`),Pr.forEach(r),kr.forEach(r),E.forEach(r),this.h()},h(){l(m,"name","hf:doc:metadata"),l(m,"content",JSON.stringify(Yn)),l($,"id","feature-extractor"),l($,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l($,"href","#feature-extractor"),l(b,"class","relative group"),l(X,"id","transformers.FeatureExtractionMixin"),l(X,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(X,"href","#transformers.FeatureExtractionMixin"),l(O,"class","relative group"),l(et,"href","/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin"),l(tt,"href","/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.SequenceFeatureExtractor"),l(N,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(rt,"href","/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.from_pretrained"),l(te,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(M,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(re,"id","transformers.SequenceFeatureExtractor"),l(re,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(re,"href","#transformers.SequenceFeatureExtractor"),l(R,"class","relative group"),l(S,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(C,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(oe,"id","transformers.BatchFeature"),l(oe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(oe,"href","#transformers.BatchFeature"),l(H,"class","relative group"),l(at,"href","/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.SequenceFeatureExtractor.pad"),l(ne,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(se,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(P,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(ie,"id","transformers.ImageFeatureExtractionMixin"),l(ie,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(ie,"href","#transformers.ImageFeatureExtractionMixin"),l(J,"class","relative group"),l(ce,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(de,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(le,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(me,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(pe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(ue,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(fe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(he,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(ge,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(_e,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(f,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(t,u){e(document.head,m),w(t,k,u),w(t,b,u),e(b,$),e($,T),_(p,T,null),e(b,F),e(b,q),e(q,j),w(t,z,u),w(t,D,u),e(D,Ze),e(D,mt),e(mt,zr),e(D,Dr),e(D,pt),e(pt,Mr),e(D,Lr),w(t,Qt,u),w(t,O,u),e(O,X),e(X,ut),_($e,ut,null),e(O,qr),e(O,ft),e(ft,Nr),w(t,Xt,u),w(t,M,u),_(Ee,M,null),e(M,Sr),e(M,ht),e(ht,Br),e(M,Cr),e(M,N),_(we,N,null),e(N,Ar),e(N,B),e(B,Vr),e(B,et),e(et,jr),e(B,Or),e(B,gt),e(gt,Wr),e(B,Rr),e(B,tt),e(tt,Ur),e(B,Hr),e(N,Gr),_(Z,N,null),e(N,Jr),_(ee,N,null),e(M,Yr),e(M,te),_(Fe,te,null),e(te,Kr),e(te,W),e(W,Qr),e(W,_t),e(_t,Xr),e(W,Zr),e(W,rt),e(rt,ea),e(W,ta),w(t,Zt,u),w(t,R,u),e(R,re),e(re,vt),_(Ie,vt,null),e(R,ra),e(R,xt),e(xt,aa),w(t,er,u),w(t,C,u),_(Te,C,null),e(C,oa),e(C,yt),e(yt,na),e(C,sa),e(C,S),_(ke,S,null),e(S,ia),e(S,bt),e(bt,ca),e(S,da),e(S,U),e(U,la),e(U,$t),e($t,ma),e(U,pa),e(U,Et),e(Et,ua),e(U,fa),e(S,ha),_(ae,S,null),w(t,tr,u),w(t,H,u),e(H,oe),e(oe,wt),_(Pe,wt,null),e(H,ga),e(H,Ft),e(Ft,_a),w(t,rr,u),w(t,P,u),_(ze,P,null),e(P,va),e(P,G),e(G,xa),e(G,at),e(at,ya),e(G,ba),e(G,It),e(It,$a),e(G,Ea),e(P,wa),e(P,Tt),e(Tt,Fa),e(P,Ia),e(P,ne),_(De,ne,null),e(ne,Ta),e(ne,kt),e(kt,ka),e(P,Pa),e(P,se),_(Me,se,null),e(se,za),e(se,Le),e(Le,Da),e(Le,Pt),e(Pt,Ma),e(Le,La),w(t,ar,u),w(t,J,u),e(J,ie),e(ie,zt),_(qe,zt,null),e(J,qa),e(J,Dt),e(Dt,Na),w(t,or,u),w(t,f,u),_(Ne,f,null),e(f,Sa),e(f,Mt),e(Mt,Ba),e(f,Ca),e(f,ce),_(Se,ce,null),e(ce,Aa),e(ce,Be),e(Be,Va),e(Be,Lt),e(Lt,ja),e(Be,Oa),e(f,Wa),e(f,de),_(Ce,de,null),e(de,Ra),e(de,Ae),e(Ae,Ua),e(Ae,qt),e(qt,Ha),e(Ae,Ga),e(f,Ja),e(f,le),_(Ve,le,null),e(le,Ya),e(le,je),e(je,Ka),e(je,Nt),e(Nt,Qa),e(je,Xa),e(f,Za),e(f,me),_(Oe,me,null),e(me,eo),e(me,Y),e(Y,to),e(Y,St),e(St,ro),e(Y,ao),e(Y,Bt),e(Bt,oo),e(Y,no),e(f,so),e(f,pe),_(We,pe,null),e(pe,io),e(pe,L),e(L,co),e(L,Ct),e(Ct,lo),e(L,mo),e(L,At),e(At,po),e(L,uo),e(L,Vt),e(Vt,fo),e(L,ho),e(L,jt),e(jt,go),e(L,_o),e(f,vo),e(f,ue),_(Re,ue,null),e(ue,xo),e(ue,Ot),e(Ot,yo),e(f,bo),e(f,fe),_(Ue,fe,null),e(fe,$o),e(fe,He),e(He,Eo),e(He,Wt),e(Wt,wo),e(He,Fo),e(f,Io),e(f,he),_(Ge,he,null),e(he,To),e(he,K),e(K,ko),e(K,Rt),e(Rt,Po),e(K,zo),e(K,Ut),e(Ut,Do),e(K,Mo),e(f,Lo),e(f,ge),_(Je,ge,null),e(ge,qo),e(ge,Ye),e(Ye,No),e(Ye,Ht),e(Ht,So),e(Ye,Bo),e(f,Co),e(f,_e),_(Ke,_e,null),e(_e,Ao),e(_e,Qe),e(Qe,Vo),e(Qe,Gt),e(Gt,jo),e(Qe,Oo),nr=!0},p(t,[u]){const Xe={};u&2&&(Xe.$$scope={dirty:u,ctx:t}),Z.$set(Xe);const Jt={};u&2&&(Jt.$$scope={dirty:u,ctx:t}),ee.$set(Jt);const Yt={};u&2&&(Yt.$$scope={dirty:u,ctx:t}),ae.$set(Yt)},i(t){nr||(v(p.$$.fragment,t),v($e.$$.fragment,t),v(Ee.$$.fragment,t),v(we.$$.fragment,t),v(Z.$$.fragment,t),v(ee.$$.fragment,t),v(Fe.$$.fragment,t),v(Ie.$$.fragment,t),v(Te.$$.fragment,t),v(ke.$$.fragment,t),v(ae.$$.fragment,t),v(Pe.$$.fragment,t),v(ze.$$.fragment,t),v(De.$$.fragment,t),v(Me.$$.fragment,t),v(qe.$$.fragment,t),v(Ne.$$.fragment,t),v(Se.$$.fragment,t),v(Ce.$$.fragment,t),v(Ve.$$.fragment,t),v(Oe.$$.fragment,t),v(We.$$.fragment,t),v(Re.$$.fragment,t),v(Ue.$$.fragment,t),v(Ge.$$.fragment,t),v(Je.$$.fragment,t),v(Ke.$$.fragment,t),nr=!0)},o(t){x(p.$$.fragment,t),x($e.$$.fragment,t),x(Ee.$$.fragment,t),x(we.$$.fragment,t),x(Z.$$.fragment,t),x(ee.$$.fragment,t),x(Fe.$$.fragment,t),x(Ie.$$.fragment,t),x(Te.$$.fragment,t),x(ke.$$.fragment,t),x(ae.$$.fragment,t),x(Pe.$$.fragment,t),x(ze.$$.fragment,t),x(De.$$.fragment,t),x(Me.$$.fragment,t),x(qe.$$.fragment,t),x(Ne.$$.fragment,t),x(Se.$$.fragment,t),x(Ce.$$.fragment,t),x(Ve.$$.fragment,t),x(Oe.$$.fragment,t),x(We.$$.fragment,t),x(Re.$$.fragment,t),x(Ue.$$.fragment,t),x(Ge.$$.fragment,t),x(Je.$$.fragment,t),x(Ke.$$.fragment,t),nr=!1},d(t){r(m),t&&r(k),t&&r(b),y(p),t&&r(z),t&&r(D),t&&r(Qt),t&&r(O),y($e),t&&r(Xt),t&&r(M),y(Ee),y(we),y(Z),y(ee),y(Fe),t&&r(Zt),t&&r(R),y(Ie),t&&r(er),t&&r(C),y(Te),y(ke),y(ae),t&&r(tr),t&&r(H),y(Pe),t&&r(rr),t&&r(P),y(ze),y(De),y(Me),t&&r(ar),t&&r(J),y(qe),t&&r(or),t&&r(f),y(Ne),y(Se),y(Ce),y(Ve),y(Oe),y(We),y(Re),y(Ue),y(Ge),y(Je),y(Ke)}}}const Yn={local:"feature-extractor",sections:[{local:"transformers.FeatureExtractionMixin",title:"FeatureExtractionMixin"},{local:"transformers.SequenceFeatureExtractor",title:"SequenceFeatureExtractor"},{local:"transformers.BatchFeature",title:"BatchFeature"},{local:"transformers.ImageFeatureExtractionMixin",title:"ImageFeatureExtractionMixin"}],title:"Feature Extractor"};function Kn(Q){return jn(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class as extends Bn{constructor(m){super();Cn(this,m,Kn,Jn,An,{})}}export{as as default,Yn as metadata};
25
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/tasks/asr.mdx-hf-doc-builder.js
import{S as Ue,i as Fe,s as Ye,e as o,k as u,w,t as e,M as Ve,c as r,d as t,m as d,a as i,x as y,h as n,b as m,G as a,g as p,y as k,q as x,o as E,B as A,v as Ne}from"../../chunks/vendor-hf-doc-builder.js";import{T as ee}from"../../chunks/Tip-hf-doc-builder.js";import{Y as He}from"../../chunks/Youtube-hf-doc-builder.js";import{I as et}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{C as U}from"../../chunks/CodeBlock-hf-doc-builder.js";import{F as Be,M as Ge}from"../../chunks/Markdown-hf-doc-builder.js";function Je(F){let c,j,h,$,v;return{c(){c=o("p"),j=e("See the automatic speech recognition "),h=o("a"),$=e("task page"),v=e(" for more information about its associated models, datasets, and metrics."),this.h()},l(g){c=r(g,"P",{});var q=i(c);j=n(q,"See the automatic speech recognition "),h=r(q,"A",{href:!0,rel:!0});var T=i(h);$=n(T,"task page"),T.forEach(t),v=n(q," for more information about its associated models, datasets, and metrics."),q.forEach(t),this.h()},h(){m(h,"href","https://huggingface.co/tasks/automatic-speech-recognition"),m(h,"rel","nofollow")},m(g,q){p(g,c,q),a(c,j),a(c,h),a(h,$),a(c,v)},d(g){g&&t(c)}}}function Ke(F){let c,j,h,$,v,g,q,T;return{c(){c=o("p"),j=e("If you aren\u2019t familiar with fine-tuning a model with the "),h=o("a"),$=e("Trainer"),v=e(", take a look at the basic tutorial "),g=o("a"),q=e("here"),T=e("!"),this.h()},l(P){c=r(P,"P",{});var _=i(c);j=n(_,"If you aren\u2019t familiar with fine-tuning a model with the "),h=r(_,"A",{href:!0});var D=i(h);$=n(D,"Trainer"),D.forEach(t),v=n(_,", take a look at the basic tutorial "),g=r(_,"A",{href:!0});var C=i(g);q=n(C,"here"),C.forEach(t),T=n(_,"!"),_.forEach(t),this.h()},h(){m(h,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),m(g,"href","../training#finetune-with-trainer")},m(P,_){p(P,c,_),a(c,j),a(c,h),a(h,$),a(c,v),a(c,g),a(g,q),a(c,T)},d(P){P&&t(c)}}}function Qe(F){let c,j,h,$,v,g,q,T,P,_,D,C,K,as,us,S,R,I,ys,ts,Q,ks,xs,Y,V,X,M,N,cs,L,Es,H,As,ds,W,Z,B;return _=new U({props:{code:`from transformers import AutoModelForCTC, TrainingArguments, Trainer model = AutoModelForCTC.from_pretrained( "facebook/wav2vec2-base", ctc_loss_reduction="mean", pad_token_id=processor.tokenizer.pad_token_id, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForCTC, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCTC.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;facebook/wav2vec2-base&quot;</span>, <span class="hljs-meta">... </span> ctc_loss_reduction=<span class="hljs-string">&quot;mean&quot;</span>, <span class="hljs-meta">... </span> pad_token_id=processor.tokenizer.pad_token_id, <span class="hljs-meta">... </span>)`}}),C=new ee({props:{$$slots:{default:[Ke]},$$scope:{ctx:F}}}),Z=new U({props:{code:`training_args = TrainingArguments( output_dir="./results", group_by_length=True, per_device_train_batch_size=16, evaluation_strategy="steps", num_train_epochs=3, fp16=True, gradient_checkpointing=True, learning_rate=1e-4, weight_decay=0.005, save_total_limit=2, ) trainer = Trainer( model=model, args=training_args, train_dataset=encoded_minds["train"], eval_dataset=encoded_minds["test"], tokenizer=processor.feature_extractor, data_collator=data_collator, ) trainer.train()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> group_by_length=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;steps&quot;</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> fp16=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> gradient_checkpointing=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">1e-4</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.005</span>, <span class="hljs-meta">... </span> save_total_limit=<span class="hljs-number">2</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=encoded_minds[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=encoded_minds[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> tokenizer=processor.feature_extractor, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()`}}),{c(){c=o("p"),j=e("Load Wav2Vec2 with "),h=o("a"),$=e("AutoModelForCTC"),v=e(". For "),g=o("code"),q=e("ctc_loss_reduction"),T=e(", it is often better to use the average instead of the default summation:"),P=u(),w(_.$$.fragment),D=u(),w(C.$$.fragment),K=u(),as=o("p"),us=e("At this point, only three steps remain:"),S=u(),R=o("ol"),I=o("li"),ys=e("Define your training hyperparameters in "),ts=o("a"),Q=e("TrainingArguments"),ks=e("."),xs=u(),Y=o("li"),V=e("Pass the training arguments to "),X=o("a"),M=e("Trainer"),N=e(" along with the model, datasets, tokenizer, and data collator."),cs=u(),L=o("li"),Es=e("Call "),H=o("a"),As=e("train()"),ds=e(" to fine-tune your model."),W=u(),w(Z.$$.fragment),this.h()},l(f){c=r(f,"P",{});var b=i(c);j=n(b,"Load Wav2Vec2 with "),h=r(b,"A",{href:!0});var es=i(h);$=n(es,"AutoModelForCTC"),es.forEach(t),v=n(b,". For "),g=r(b,"CODE",{});var ns=i(g);q=n(ns,"ctc_loss_reduction"),ns.forEach(t),T=n(b,", it is often better to use the average instead of the default summation:"),b.forEach(t),P=d(f),y(_.$$.fragment,f),D=d(f),y(C.$$.fragment,f),K=d(f),as=r(f,"P",{});var qs=i(as);us=n(qs,"At this point, only three steps remain:"),qs.forEach(t),S=d(f),R=r(f,"OL",{});var z=i(R);I=r(z,"LI",{});var ms=i(I);ys=n(ms,"Define your training hyperparameters in "),ts=r(ms,"A",{href:!0});var Ts=i(ts);Q=n(Ts,"TrainingArguments"),Ts.forEach(t),ks=n(ms,"."),ms.forEach(t),xs=d(z),Y=r(z,"LI",{});var G=i(Y);V=n(G,"Pass the training arguments to "),X=r(G,"A",{href:!0});var Ps=i(X);M=n(Ps,"Trainer"),Ps.forEach(t),N=n(G," along with the model, datasets, tokenizer, and data collator."),G.forEach(t),cs=d(z),L=r(z,"LI",{});var ss=i(L);Es=n(ss,"Call "),H=r(ss,"A",{href:!0});var Bs=i(H);As=n(Bs,"train()"),Bs.forEach(t),ds=n(ss," to fine-tune your model."),ss.forEach(t),z.forEach(t),W=d(f),y(Z.$$.fragment,f),this.h()},h(){m(h,"href","/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoModelForCTC"),m(ts,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments"),m(X,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),m(H,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.train")},m(f,b){p(f,c,b),a(c,j),a(c,h),a(h,$),a(c,v),a(c,g),a(g,q),a(c,T),p(f,P,b),k(_,f,b),p(f,D,b),k(C,f,b),p(f,K,b),p(f,as,b),a(as,us),p(f,S,b),p(f,R,b),a(R,I),a(I,ys),a(I,ts),a(ts,Q),a(I,ks),a(R,xs),a(R,Y),a(Y,V),a(Y,X),a(X,M),a(Y,N),a(R,cs),a(R,L),a(L,Es),a(L,H),a(H,As),a(L,ds),p(f,W,b),k(Z,f,b),B=!0},p(f,b){const es={};b&2&&(es.$$scope={dirty:b,ctx:f}),C.$set(es)},i(f){B||(x(_.$$.fragment,f),x(C.$$.fragment,f),x(Z.$$.fragment,f),B=!0)},o(f){E(_.$$.fragment,f),E(C.$$.fragment,f),E(Z.$$.fragment,f),B=!1},d(f){f&&t(c),f&&t(P),A(_,f),f&&t(D),A(C,f),f&&t(K),f&&t(as),f&&t(S),f&&t(R),f&&t(W),A(Z,f)}}}function Xe(F){let c,j;return c=new Ge({props:{$$slots:{default:[Qe]},$$scope:{ctx:F}}}),{c(){w(c.$$.fragment)},l(h){y(c.$$.fragment,h)},m(h,$){k(c,h,$),j=!0},p(h,$){const v={};$&2&&(v.$$scope={dirty:$,ctx:h}),c.$set(v)},i(h){j||(x(c.$$.fragment,h),j=!0)},o(h){E(c.$$.fragment,h),j=!1},d(h){A(c,h)}}}function Ze(F){let c,j,h,$,v,g,q,T;return{c(){c=o("p"),j=e("For a more in-depth example of how to fine-tune a model for automatic speech recognition, take a look at this blog "),h=o("a"),$=e("post"),v=e(" for English ASR and this "),g=o("a"),q=e("post"),T=e(" for multilingual ASR."),this.h()},l(P){c=r(P,"P",{});var _=i(c);j=n(_,"For a more in-depth example of how to fine-tune a model for automatic speech recognition, take a look at this blog "),h=r(_,"A",{href:!0,rel:!0});var D=i(h);$=n(D,"post"),D.forEach(t),v=n(_," for English ASR and this "),g=r(_,"A",{href:!0,rel:!0});var C=i(g);q=n(C,"post"),C.forEach(t),T=n(_," for multilingual ASR."),_.forEach(t),this.h()},h(){m(h,"href","https://huggingface.co/blog/fine-tune-wav2vec2-english"),m(h,"rel","nofollow"),m(g,"href","https://huggingface.co/blog/fine-tune-xlsr-wav2vec2"),m(g,"rel","nofollow")},m(P,_){p(P,c,_),a(c,j),a(c,h),a(h,$),a(c,v),a(c,g),a(g,q),a(c,T)},d(P){P&&t(c)}}}function sn(F){let c,j,h,$,v,g,q,T,P,_,D,C,K,as,us,S,R,I,ys,ts,Q,ks,xs,Y,V,X,M,N,cs,L,Es,H,As,ds,W,Z,B,f,b,es,ns,qs,z,ms,Ts,G,Ps,ss,Bs,ka,Cs,xa,O,nt,ea,lt,ot,na,rt,it,la,pt,ct,oa,ht,ft,Ea,Ds,Aa,Gs,ut,qa,Ss,Ta,ls,dt,ra,mt,gt,ia,_t,$t,Pa,hs,gs,pa,Is,jt,ca,bt,Ca,Js,vt,Da,Ls,Sa,_s,wt,Rs,yt,kt,Ia,Os,La,Ks,xt,Ra,os,Ms,Et,ha,At,qt,Tt,Ws,Pt,fa,Ct,Dt,St,ua,It,Oa,zs,Ma,rs,Lt,Us,Rt,Ot,da,Mt,Wt,Wa,Fs,za,J,zt,Qs,Ut,Ft,ma,Yt,Vt,ga,Nt,Ht,Ua,is,Bt,_a,Gt,Jt,$a,Kt,Qt,Fa,Ys,Ya,$s,Xt,ja,Zt,se,Va,Vs,Na,fs,js,ba,Ns,ae,va,te,Ha,bs,Ba,vs,Ga;return g=new et({}),D=new He({props:{id:"TksaY_FDgnk"}}),V=new ee({props:{$$slots:{default:[Je]},$$scope:{ctx:F}}}),L=new et({}),ns=new U({props:{code:`from datasets import load_dataset, Audio minds = load_dataset("PolyAI/minds14", name="en-US", split="train")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset, Audio <span class="hljs-meta">&gt;&gt;&gt; </span>minds = load_dataset(<span class="hljs-string">&quot;PolyAI/minds14&quot;</span>, name=<span class="hljs-string">&quot;en-US&quot;</span>, split=<span class="hljs-string">&quot;train&quot;</span>)`}}),G=new U({props:{code:"minds = minds.train_test_split(test_size=0.2)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>minds = minds.train_test_split(test_size=<span class="hljs-number">0.2</span>)'}}),Cs=new U({props:{code:"minds",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>minds DatasetDict({ train: Dataset({ features: [<span class="hljs-string">&#x27;path&#x27;</span>, <span class="hljs-string">&#x27;audio&#x27;</span>, <span class="hljs-string">&#x27;transcription&#x27;</span>, <span class="hljs-string">&#x27;english_transcription&#x27;</span>, <span class="hljs-string">&#x27;intent_class&#x27;</span>, <span class="hljs-string">&#x27;lang_id&#x27;</span>], num_rows: <span class="hljs-number">450</span> }) test: Dataset({ features: [<span class="hljs-string">&#x27;path&#x27;</span>, <span class="hljs-string">&#x27;audio&#x27;</span>, <span class="hljs-string">&#x27;transcription&#x27;</span>, <span class="hljs-string">&#x27;english_transcription&#x27;</span>, <span class="hljs-string">&#x27;intent_class&#x27;</span>, <span class="hljs-string">&#x27;lang_id&#x27;</span>], num_rows: <span class="hljs-number">113</span> }) })`}}),Ds=new U({props:{code:'minds = minds.remove_columns(["english_transcription", "intent_class", "lang_id"])',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>minds = minds.remove_columns([<span class="hljs-string">&quot;english_transcription&quot;</span>, <span class="hljs-string">&quot;intent_class&quot;</span>, <span class="hljs-string">&quot;lang_id&quot;</span>])'}}),Ss=new U({props:{code:'minds["train"][0]',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>minds[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;audio&#x27;</span>: {<span class="hljs-string">&#x27;array&#x27;</span>: array([-<span class="hljs-number">0.00024414</span>, <span class="hljs-number">0.</span> , <span class="hljs-number">0.</span> , ..., <span class="hljs-number">0.00024414</span>, <span class="hljs-number">0.00024414</span>, <span class="hljs-number">0.00024414</span>], dtype=float32), <span class="hljs-string">&#x27;path&#x27;</span>: <span class="hljs-string">&#x27;/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav&#x27;</span>, <span class="hljs-string">&#x27;sampling_rate&#x27;</span>: <span class="hljs-number">8000</span>}, <span class="hljs-string">&#x27;path&#x27;</span>: <span class="hljs-string">&#x27;/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav&#x27;</span>, <span class="hljs-string">&#x27;transcription&#x27;</span>: <span class="hljs-string">&quot;hi I&#x27;m trying to use the banking app on my phone and currently my checking and savings account balance is not refreshing&quot;</span>}`}}),Is=new et({}),Ls=new U({props:{code:`from transformers import AutoProcessor processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoProcessor <span class="hljs-meta">&gt;&gt;&gt; </span>processor = AutoProcessor.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-base&quot;</span>)`}}),Os=new U({props:{code:`minds = minds.cast_column("audio", Audio(sampling_rate=16_000)) minds["train"][0]`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>minds = minds.cast_column(<span class="hljs-string">&quot;audio&quot;</span>, Audio(sampling_rate=<span class="hljs-number">16_000</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>minds[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;audio&#x27;</span>: {<span class="hljs-string">&#x27;array&#x27;</span>: array([-<span class="hljs-number">2.38064706e-04</span>, -<span class="hljs-number">1.58618059e-04</span>, -<span class="hljs-number">5.43987835e-06</span>, ..., <span class="hljs-number">2.78103951e-04</span>, <span class="hljs-number">2.38446111e-04</span>, <span class="hljs-number">1.18740834e-04</span>], dtype=float32), <span class="hljs-string">&#x27;path&#x27;</span>: <span class="hljs-string">&#x27;/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav&#x27;</span>, <span class="hljs-string">&#x27;sampling_rate&#x27;</span>: <span class="hljs-number">16000</span>}, <span class="hljs-string">&#x27;path&#x27;</span>: <span class="hljs-string">&#x27;/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav&#x27;</span>, <span class="hljs-string">&#x27;transcription&#x27;</span>: <span class="hljs-string">&quot;hi I&#x27;m trying to use the banking app on my phone and currently my checking and savings account balance is not refreshing&quot;</span>}`}}),zs=new U({props:{code:`def prepare_dataset(batch): audio = batch["audio"] batch = processor(audio=audio["array"], sampling_rate=audio["sampling_rate"]).input_values[0] batch["input_length"] = len(batch["input_values"]) batch["labels"] = processor(text=batch["transcription"]).input_ids return batch`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">prepare_dataset</span>(<span class="hljs-params">batch</span>): <span class="hljs-meta">... </span> audio = batch[<span class="hljs-string">&quot;audio&quot;</span>] <span class="hljs-meta">... </span> batch = processor(audio=audio[<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=audio[<span class="hljs-string">&quot;sampling_rate&quot;</span>]).input_values[<span class="hljs-number">0</span>] <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;input_length&quot;</span>] = <span class="hljs-built_in">len</span>(batch[<span class="hljs-string">&quot;input_values&quot;</span>]) <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;labels&quot;</span>] = processor(text=batch[<span class="hljs-string">&quot;transcription&quot;</span>]).input_ids <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch`}}),Fs=new U({props:{code:'encoded_minds = minds.map(prepare_dataset, remove_columns=minds.column_names["train"], num_proc=4)',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>encoded_minds = minds.<span class="hljs-built_in">map</span>(prepare_dataset, remove_columns=minds.column_names[<span class="hljs-string">&quot;train&quot;</span>], num_proc=<span class="hljs-number">4</span>)'}}),Ys=new U({props:{code:`import torch from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union @dataclass class DataCollatorCTCWithPadding: processor: AutoProcessor padding: Union[bool, str] = True def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and labels since they have to be of different lengths and need # different padding methods input_features = [{"input_values": feature["input_values"]} for feature in features] label_features = [{"input_ids": feature["labels"]} for feature in features] batch = self.processor.pad(input_features, padding=self.padding, return_tensors="pt") labels_batch = self.processor.pad(labels=label_features, padding=self.padding, return_tensors="pt") # replace padding with -100 to ignore loss correctly labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) batch["labels"] = labels return batch`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> dataclasses <span class="hljs-keyword">import</span> dataclass, field <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> typing <span class="hljs-keyword">import</span> <span class="hljs-type">Any</span>, <span class="hljs-type">Dict</span>, <span class="hljs-type">List</span>, <span class="hljs-type">Optional</span>, <span class="hljs-type">Union</span> <span class="hljs-meta">&gt;&gt;&gt; </span>@dataclass <span class="hljs-meta">... </span><span class="hljs-keyword">class</span> <span class="hljs-title class_">DataCollatorCTCWithPadding</span>: <span class="hljs-meta">... </span> processor: AutoProcessor <span class="hljs-meta">... </span> padding: <span class="hljs-type">Union</span>[<span class="hljs-built_in">bool</span>, <span class="hljs-built_in">str</span>] = <span class="hljs-literal">True</span> <span class="hljs-meta">... </span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">__call__</span>(<span class="hljs-params">self, features: <span class="hljs-type">List</span>[<span class="hljs-type">Dict</span>[<span class="hljs-built_in">str</span>, <span class="hljs-type">Union</span>[<span class="hljs-type">List</span>[<span class="hljs-built_in">int</span>], torch.Tensor]]]</span>) -&gt; <span class="hljs-type">Dict</span>[<span class="hljs-built_in">str</span>, torch.Tensor]: <span class="hljs-meta">... </span> <span class="hljs-comment"># split inputs and labels since they have to be of different lengths and need</span> <span class="hljs-meta">... </span> <span class="hljs-comment"># different padding methods</span> <span class="hljs-meta">... </span> input_features = [{<span class="hljs-string">&quot;input_values&quot;</span>: feature[<span class="hljs-string">&quot;input_values&quot;</span>]} <span class="hljs-keyword">for</span> feature <span class="hljs-keyword">in</span> features] <span class="hljs-meta">... </span> label_features = [{<span class="hljs-string">&quot;input_ids&quot;</span>: feature[<span class="hljs-string">&quot;labels&quot;</span>]} <span class="hljs-keyword">for</span> feature <span class="hljs-keyword">in</span> features] <span class="hljs-meta">... </span> batch = self.processor.pad(input_features, padding=self.padding, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">... </span> labels_batch = self.processor.pad(labels=label_features, padding=self.padding, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">... </span> <span class="hljs-comment"># replace padding with -100 to ignore loss correctly</span> <span class="hljs-meta">... </span> labels = labels_batch[<span class="hljs-string">&quot;input_ids&quot;</span>].masked_fill(labels_batch.attention_mask.ne(<span class="hljs-number">1</span>), -<span class="hljs-number">100</span>) <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;labels&quot;</span>] = labels <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch`}}),Vs=new U({props:{code:"data_collator = DataCollatorCTCWithPadding(processor=processor, padding=True)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorCTCWithPadding(processor=processor, padding=<span class="hljs-literal">True</span>)'}}),Ns=new et({}),bs=new Be({props:{pytorch:!0,tensorflow:!1,jax:!1,$$slots:{pytorch:[Xe]},$$scope:{ctx:F}}}),vs=new ee({props:{$$slots:{default:[Ze]},$$scope:{ctx:F}}}),{c(){c=o("meta"),j=u(),h=o("h1"),$=o("a"),v=o("span"),w(g.$$.fragment),q=u(),T=o("span"),P=e("Automatic speech recognition"),_=u(),w(D.$$.fragment),C=u(),K=o("p"),as=e("Automatic speech recognition (ASR) converts a speech signal to text. It is an example of a sequence-to-sequence task, going from a sequence of audio inputs to textual outputs. Voice assistants like Siri and Alexa utilize ASR models to assist users."),us=u(),S=o("p"),R=e("This guide will show you how to fine-tune "),I=o("a"),ys=e("Wav2Vec2"),ts=e(" on the "),Q=o("a"),ks=e("MInDS-14"),xs=e(" dataset to transcribe audio to text."),Y=u(),w(V.$$.fragment),X=u(),M=o("h2"),N=o("a"),cs=o("span"),w(L.$$.fragment),Es=u(),H=o("span"),As=e("Load MInDS-14 dataset"),ds=u(),W=o("p"),Z=e("Load the "),B=o("a"),f=e("MInDS-14"),b=e(" from the \u{1F917} Datasets library:"),es=u(),w(ns.$$.fragment),qs=u(),z=o("p"),ms=e("Split this dataset into a train and test set:"),Ts=u(),w(G.$$.fragment),Ps=u(),ss=o("p"),Bs=e("Then take a look at the dataset:"),ka=u(),w(Cs.$$.fragment),xa=u(),O=o("p"),nt=e("While the dataset contains a lot of helpful information, like "),ea=o("code"),lt=e("lang_id"),ot=e(" and "),na=o("code"),rt=e("intent_class"),it=e(", you will focus on the "),la=o("code"),pt=e("audio"),ct=e(" and "),oa=o("code"),ht=e("transcription"),ft=e(" columns in this guide. Remove the other columns:"),Ea=u(),w(Ds.$$.fragment),Aa=u(),Gs=o("p"),ut=e("Take a look at the example again:"),qa=u(),w(Ss.$$.fragment),Ta=u(),ls=o("p"),dt=e("The "),ra=o("code"),mt=e("audio"),gt=e(" column contains a 1-dimensional "),ia=o("code"),_t=e("array"),$t=e(" of the speech signal that must be called to load and resample the audio file."),Pa=u(),hs=o("h2"),gs=o("a"),pa=o("span"),w(Is.$$.fragment),jt=u(),ca=o("span"),bt=e("Preprocess"),Ca=u(),Js=o("p"),vt=e("Load the Wav2Vec2 processor to process the audio signal and transcribed text:"),Da=u(),w(Ls.$$.fragment),Sa=u(),_s=o("p"),wt=e("The "),Rs=o("a"),yt=e("MInDS-14"),kt=e(" dataset has a sampling rate of 8000khz. You will need to resample the dataset to use the pretrained Wav2Vec2 model:"),Ia=u(),w(Os.$$.fragment),La=u(),Ks=o("p"),xt=e("The preprocessing function needs to:"),Ra=u(),os=o("ol"),Ms=o("li"),Et=e("Call the "),ha=o("code"),At=e("audio"),qt=e(" column to load and resample the audio file."),Tt=u(),Ws=o("li"),Pt=e("Extract the "),fa=o("code"),Ct=e("input_values"),Dt=e(" from the audio file."),St=u(),ua=o("li"),It=e("Typically, when you call the processor, you call the feature extractor. Since you also want to tokenize text, instruct the processor to call the tokenizer instead with a context manager."),Oa=u(),w(zs.$$.fragment),Ma=u(),rs=o("p"),Lt=e("Use \u{1F917} Datasets "),Us=o("a"),Rt=e("map"),Ot=e(" function to apply the preprocessing function over the entire dataset. You can speed up the map function by increasing the number of processes with "),da=o("code"),Mt=e("num_proc"),Wt=e(". Remove the columns you don\u2019t need:"),Wa=u(),w(Fs.$$.fragment),za=u(),J=o("p"),zt=e("\u{1F917} Transformers doesn\u2019t have a data collator for automatic speech recognition, so you will need to create one. You can adapt the "),Qs=o("a"),Ut=e("DataCollatorWithPadding"),Ft=e(" to create a batch of examples for automatic speech recognition. It will also dynamically pad your text and labels to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the "),ma=o("code"),Yt=e("tokenizer"),Vt=e(" function by setting "),ga=o("code"),Nt=e("padding=True"),Ht=e(", dynamic padding is more efficient."),Ua=u(),is=o("p"),Bt=e("Unlike other data collators, this specific data collator needs to apply a different padding method to "),_a=o("code"),Gt=e("input_values"),Jt=e(" and "),$a=o("code"),Kt=e("labels"),Qt=e(". You can apply a different padding method with a context manager:"),Fa=u(),w(Ys.$$.fragment),Ya=u(),$s=o("p"),Xt=e("Create a batch of examples and dynamically pad them with "),ja=o("code"),Zt=e("DataCollatorForCTCWithPadding"),se=e(":"),Va=u(),w(Vs.$$.fragment),Na=u(),fs=o("h2"),js=o("a"),ba=o("span"),w(Ns.$$.fragment),ae=u(),va=o("span"),te=e("Train"),Ha=u(),w(bs.$$.fragment),Ba=u(),w(vs.$$.fragment),this.h()},l(s){const l=Ve('[data-svelte="svelte-1phssyn"]',document.head);c=r(l,"META",{name:!0,content:!0}),l.forEach(t),j=d(s),h=r(s,"H1",{class:!0});var Hs=i(h);$=r(Hs,"A",{id:!0,class:!0,href:!0});var wa=i($);v=r(wa,"SPAN",{});var ya=i(v);y(g.$$.fragment,ya),ya.forEach(t),wa.forEach(t),q=d(Hs),T=r(Hs,"SPAN",{});var ne=i(T);P=n(ne,"Automatic speech recognition"),ne.forEach(t),Hs.forEach(t),_=d(s),y(D.$$.fragment,s),C=d(s),K=r(s,"P",{});var le=i(K);as=n(le,"Automatic speech recognition (ASR) converts a speech signal to text. It is an example of a sequence-to-sequence task, going from a sequence of audio inputs to textual outputs. Voice assistants like Siri and Alexa utilize ASR models to assist users."),le.forEach(t),us=d(s),S=r(s,"P",{});var Xs=i(S);R=n(Xs,"This guide will show you how to fine-tune "),I=r(Xs,"A",{href:!0,rel:!0});var oe=i(I);ys=n(oe,"Wav2Vec2"),oe.forEach(t),ts=n(Xs," on the "),Q=r(Xs,"A",{href:!0,rel:!0});var re=i(Q);ks=n(re,"MInDS-14"),re.forEach(t),xs=n(Xs," dataset to transcribe audio to text."),Xs.forEach(t),Y=d(s),y(V.$$.fragment,s),X=d(s),M=r(s,"H2",{class:!0});var Ja=i(M);N=r(Ja,"A",{id:!0,class:!0,href:!0});var ie=i(N);cs=r(ie,"SPAN",{});var pe=i(cs);y(L.$$.fragment,pe),pe.forEach(t),ie.forEach(t),Es=d(Ja),H=r(Ja,"SPAN",{});var ce=i(H);As=n(ce,"Load MInDS-14 dataset"),ce.forEach(t),Ja.forEach(t),ds=d(s),W=r(s,"P",{});var Ka=i(W);Z=n(Ka,"Load the "),B=r(Ka,"A",{href:!0,rel:!0});var he=i(B);f=n(he,"MInDS-14"),he.forEach(t),b=n(Ka," from the \u{1F917} Datasets library:"),Ka.forEach(t),es=d(s),y(ns.$$.fragment,s),qs=d(s),z=r(s,"P",{});var fe=i(z);ms=n(fe,"Split this dataset into a train and test set:"),fe.forEach(t),Ts=d(s),y(G.$$.fragment,s),Ps=d(s),ss=r(s,"P",{});var ue=i(ss);Bs=n(ue,"Then take a look at the dataset:"),ue.forEach(t),ka=d(s),y(Cs.$$.fragment,s),xa=d(s),O=r(s,"P",{});var ps=i(O);nt=n(ps,"While the dataset contains a lot of helpful information, like "),ea=r(ps,"CODE",{});var de=i(ea);lt=n(de,"lang_id"),de.forEach(t),ot=n(ps," and "),na=r(ps,"CODE",{});var me=i(na);rt=n(me,"intent_class"),me.forEach(t),it=n(ps,", you will focus on the "),la=r(ps,"CODE",{});var ge=i(la);pt=n(ge,"audio"),ge.forEach(t),ct=n(ps," and "),oa=r(ps,"CODE",{});var _e=i(oa);ht=n(_e,"transcription"),_e.forEach(t),ft=n(ps," columns in this guide. Remove the other columns:"),ps.forEach(t),Ea=d(s),y(Ds.$$.fragment,s),Aa=d(s),Gs=r(s,"P",{});var $e=i(Gs);ut=n($e,"Take a look at the example again:"),$e.forEach(t),qa=d(s),y(Ss.$$.fragment,s),Ta=d(s),ls=r(s,"P",{});var Zs=i(ls);dt=n(Zs,"The "),ra=r(Zs,"CODE",{});var je=i(ra);mt=n(je,"audio"),je.forEach(t),gt=n(Zs," column contains a 1-dimensional "),ia=r(Zs,"CODE",{});var be=i(ia);_t=n(be,"array"),be.forEach(t),$t=n(Zs," of the speech signal that must be called to load and resample the audio file."),Zs.forEach(t),Pa=d(s),hs=r(s,"H2",{class:!0});var Qa=i(hs);gs=r(Qa,"A",{id:!0,class:!0,href:!0});var ve=i(gs);pa=r(ve,"SPAN",{});var we=i(pa);y(Is.$$.fragment,we),we.forEach(t),ve.forEach(t),jt=d(Qa),ca=r(Qa,"SPAN",{});var ye=i(ca);bt=n(ye,"Preprocess"),ye.forEach(t),Qa.forEach(t),Ca=d(s),Js=r(s,"P",{});var ke=i(Js);vt=n(ke,"Load the Wav2Vec2 processor to process the audio signal and transcribed text:"),ke.forEach(t),Da=d(s),y(Ls.$$.fragment,s),Sa=d(s),_s=r(s,"P",{});var Xa=i(_s);wt=n(Xa,"The "),Rs=r(Xa,"A",{href:!0,rel:!0});var xe=i(Rs);yt=n(xe,"MInDS-14"),xe.forEach(t),kt=n(Xa," dataset has a sampling rate of 8000khz. You will need to resample the dataset to use the pretrained Wav2Vec2 model:"),Xa.forEach(t),Ia=d(s),y(Os.$$.fragment,s),La=d(s),Ks=r(s,"P",{});var Ee=i(Ks);xt=n(Ee,"The preprocessing function needs to:"),Ee.forEach(t),Ra=d(s),os=r(s,"OL",{});var sa=i(os);Ms=r(sa,"LI",{});var Za=i(Ms);Et=n(Za,"Call the "),ha=r(Za,"CODE",{});var Ae=i(ha);At=n(Ae,"audio"),Ae.forEach(t),qt=n(Za," column to load and resample the audio file."),Za.forEach(t),Tt=d(sa),Ws=r(sa,"LI",{});var st=i(Ws);Pt=n(st,"Extract the "),fa=r(st,"CODE",{});var qe=i(fa);Ct=n(qe,"input_values"),qe.forEach(t),Dt=n(st," from the audio file."),st.forEach(t),St=d(sa),ua=r(sa,"LI",{});var Te=i(ua);It=n(Te,"Typically, when you call the processor, you call the feature extractor. Since you also want to tokenize text, instruct the processor to call the tokenizer instead with a context manager."),Te.forEach(t),sa.forEach(t),Oa=d(s),y(zs.$$.fragment,s),Ma=d(s),rs=r(s,"P",{});var aa=i(rs);Lt=n(aa,"Use \u{1F917} Datasets "),Us=r(aa,"A",{href:!0,rel:!0});var Pe=i(Us);Rt=n(Pe,"map"),Pe.forEach(t),Ot=n(aa," function to apply the preprocessing function over the entire dataset. You can speed up the map function by increasing the number of processes with "),da=r(aa,"CODE",{});var Ce=i(da);Mt=n(Ce,"num_proc"),Ce.forEach(t),Wt=n(aa,". Remove the columns you don\u2019t need:"),aa.forEach(t),Wa=d(s),y(Fs.$$.fragment,s),za=d(s),J=r(s,"P",{});var ws=i(J);zt=n(ws,"\u{1F917} Transformers doesn\u2019t have a data collator for automatic speech recognition, so you will need to create one. You can adapt the "),Qs=r(ws,"A",{href:!0});var De=i(Qs);Ut=n(De,"DataCollatorWithPadding"),De.forEach(t),Ft=n(ws," to create a batch of examples for automatic speech recognition. It will also dynamically pad your text and labels to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the "),ma=r(ws,"CODE",{});var Se=i(ma);Yt=n(Se,"tokenizer"),Se.forEach(t),Vt=n(ws," function by setting "),ga=r(ws,"CODE",{});var Ie=i(ga);Nt=n(Ie,"padding=True"),Ie.forEach(t),Ht=n(ws,", dynamic padding is more efficient."),ws.forEach(t),Ua=d(s),is=r(s,"P",{});var ta=i(is);Bt=n(ta,"Unlike other data collators, this specific data collator needs to apply a different padding method to "),_a=r(ta,"CODE",{});var Le=i(_a);Gt=n(Le,"input_values"),Le.forEach(t),Jt=n(ta," and "),$a=r(ta,"CODE",{});var Re=i($a);Kt=n(Re,"labels"),Re.forEach(t),Qt=n(ta,". You can apply a different padding method with a context manager:"),ta.forEach(t),Fa=d(s),y(Ys.$$.fragment,s),Ya=d(s),$s=r(s,"P",{});var at=i($s);Xt=n(at,"Create a batch of examples and dynamically pad them with "),ja=r(at,"CODE",{});var Oe=i(ja);Zt=n(Oe,"DataCollatorForCTCWithPadding"),Oe.forEach(t),se=n(at,":"),at.forEach(t),Va=d(s),y(Vs.$$.fragment,s),Na=d(s),fs=r(s,"H2",{class:!0});var tt=i(fs);js=r(tt,"A",{id:!0,class:!0,href:!0});var Me=i(js);ba=r(Me,"SPAN",{});var We=i(ba);y(Ns.$$.fragment,We),We.forEach(t),Me.forEach(t),ae=d(tt),va=r(tt,"SPAN",{});var ze=i(va);te=n(ze,"Train"),ze.forEach(t),tt.forEach(t),Ha=d(s),y(bs.$$.fragment,s),Ba=d(s),y(vs.$$.fragment,s),this.h()},h(){m(c,"name","hf:doc:metadata"),m(c,"content",JSON.stringify(an)),m($,"id","automatic-speech-recognition"),m($,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m($,"href","#automatic-speech-recognition"),m(h,"class","relative group"),m(I,"href","https://huggingface.co/facebook/wav2vec2-base"),m(I,"rel","nofollow"),m(Q,"href","https://huggingface.co/datasets/PolyAI/minds14"),m(Q,"rel","nofollow"),m(N,"id","load-minds14-dataset"),m(N,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(N,"href","#load-minds14-dataset"),m(M,"class","relative group"),m(B,"href","https://huggingface.co/datasets/PolyAI/minds14"),m(B,"rel","nofollow"),m(gs,"id","preprocess"),m(gs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(gs,"href","#preprocess"),m(hs,"class","relative group"),m(Rs,"href","https://huggingface.co/datasets/PolyAI/minds14"),m(Rs,"rel","nofollow"),m(Us,"href","https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.map"),m(Us,"rel","nofollow"),m(Qs,"href","/docs/transformers/pr_19429/en/main_classes/data_collator#transformers.DataCollatorWithPadding"),m(js,"id","train"),m(js,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(js,"href","#train"),m(fs,"class","relative group")},m(s,l){a(document.head,c),p(s,j,l),p(s,h,l),a(h,$),a($,v),k(g,v,null),a(h,q),a(h,T),a(T,P),p(s,_,l),k(D,s,l),p(s,C,l),p(s,K,l),a(K,as),p(s,us,l),p(s,S,l),a(S,R),a(S,I),a(I,ys),a(S,ts),a(S,Q),a(Q,ks),a(S,xs),p(s,Y,l),k(V,s,l),p(s,X,l),p(s,M,l),a(M,N),a(N,cs),k(L,cs,null),a(M,Es),a(M,H),a(H,As),p(s,ds,l),p(s,W,l),a(W,Z),a(W,B),a(B,f),a(W,b),p(s,es,l),k(ns,s,l),p(s,qs,l),p(s,z,l),a(z,ms),p(s,Ts,l),k(G,s,l),p(s,Ps,l),p(s,ss,l),a(ss,Bs),p(s,ka,l),k(Cs,s,l),p(s,xa,l),p(s,O,l),a(O,nt),a(O,ea),a(ea,lt),a(O,ot),a(O,na),a(na,rt),a(O,it),a(O,la),a(la,pt),a(O,ct),a(O,oa),a(oa,ht),a(O,ft),p(s,Ea,l),k(Ds,s,l),p(s,Aa,l),p(s,Gs,l),a(Gs,ut),p(s,qa,l),k(Ss,s,l),p(s,Ta,l),p(s,ls,l),a(ls,dt),a(ls,ra),a(ra,mt),a(ls,gt),a(ls,ia),a(ia,_t),a(ls,$t),p(s,Pa,l),p(s,hs,l),a(hs,gs),a(gs,pa),k(Is,pa,null),a(hs,jt),a(hs,ca),a(ca,bt),p(s,Ca,l),p(s,Js,l),a(Js,vt),p(s,Da,l),k(Ls,s,l),p(s,Sa,l),p(s,_s,l),a(_s,wt),a(_s,Rs),a(Rs,yt),a(_s,kt),p(s,Ia,l),k(Os,s,l),p(s,La,l),p(s,Ks,l),a(Ks,xt),p(s,Ra,l),p(s,os,l),a(os,Ms),a(Ms,Et),a(Ms,ha),a(ha,At),a(Ms,qt),a(os,Tt),a(os,Ws),a(Ws,Pt),a(Ws,fa),a(fa,Ct),a(Ws,Dt),a(os,St),a(os,ua),a(ua,It),p(s,Oa,l),k(zs,s,l),p(s,Ma,l),p(s,rs,l),a(rs,Lt),a(rs,Us),a(Us,Rt),a(rs,Ot),a(rs,da),a(da,Mt),a(rs,Wt),p(s,Wa,l),k(Fs,s,l),p(s,za,l),p(s,J,l),a(J,zt),a(J,Qs),a(Qs,Ut),a(J,Ft),a(J,ma),a(ma,Yt),a(J,Vt),a(J,ga),a(ga,Nt),a(J,Ht),p(s,Ua,l),p(s,is,l),a(is,Bt),a(is,_a),a(_a,Gt),a(is,Jt),a(is,$a),a($a,Kt),a(is,Qt),p(s,Fa,l),k(Ys,s,l),p(s,Ya,l),p(s,$s,l),a($s,Xt),a($s,ja),a(ja,Zt),a($s,se),p(s,Va,l),k(Vs,s,l),p(s,Na,l),p(s,fs,l),a(fs,js),a(js,ba),k(Ns,ba,null),a(fs,ae),a(fs,va),a(va,te),p(s,Ha,l),k(bs,s,l),p(s,Ba,l),k(vs,s,l),Ga=!0},p(s,[l]){const Hs={};l&2&&(Hs.$$scope={dirty:l,ctx:s}),V.$set(Hs);const wa={};l&2&&(wa.$$scope={dirty:l,ctx:s}),bs.$set(wa);const ya={};l&2&&(ya.$$scope={dirty:l,ctx:s}),vs.$set(ya)},i(s){Ga||(x(g.$$.fragment,s),x(D.$$.fragment,s),x(V.$$.fragment,s),x(L.$$.fragment,s),x(ns.$$.fragment,s),x(G.$$.fragment,s),x(Cs.$$.fragment,s),x(Ds.$$.fragment,s),x(Ss.$$.fragment,s),x(Is.$$.fragment,s),x(Ls.$$.fragment,s),x(Os.$$.fragment,s),x(zs.$$.fragment,s),x(Fs.$$.fragment,s),x(Ys.$$.fragment,s),x(Vs.$$.fragment,s),x(Ns.$$.fragment,s),x(bs.$$.fragment,s),x(vs.$$.fragment,s),Ga=!0)},o(s){E(g.$$.fragment,s),E(D.$$.fragment,s),E(V.$$.fragment,s),E(L.$$.fragment,s),E(ns.$$.fragment,s),E(G.$$.fragment,s),E(Cs.$$.fragment,s),E(Ds.$$.fragment,s),E(Ss.$$.fragment,s),E(Is.$$.fragment,s),E(Ls.$$.fragment,s),E(Os.$$.fragment,s),E(zs.$$.fragment,s),E(Fs.$$.fragment,s),E(Ys.$$.fragment,s),E(Vs.$$.fragment,s),E(Ns.$$.fragment,s),E(bs.$$.fragment,s),E(vs.$$.fragment,s),Ga=!1},d(s){t(c),s&&t(j),s&&t(h),A(g),s&&t(_),A(D,s),s&&t(C),s&&t(K),s&&t(us),s&&t(S),s&&t(Y),A(V,s),s&&t(X),s&&t(M),A(L),s&&t(ds),s&&t(W),s&&t(es),A(ns,s),s&&t(qs),s&&t(z),s&&t(Ts),A(G,s),s&&t(Ps),s&&t(ss),s&&t(ka),A(Cs,s),s&&t(xa),s&&t(O),s&&t(Ea),A(Ds,s),s&&t(Aa),s&&t(Gs),s&&t(qa),A(Ss,s),s&&t(Ta),s&&t(ls),s&&t(Pa),s&&t(hs),A(Is),s&&t(Ca),s&&t(Js),s&&t(Da),A(Ls,s),s&&t(Sa),s&&t(_s),s&&t(Ia),A(Os,s),s&&t(La),s&&t(Ks),s&&t(Ra),s&&t(os),s&&t(Oa),A(zs,s),s&&t(Ma),s&&t(rs),s&&t(Wa),A(Fs,s),s&&t(za),s&&t(J),s&&t(Ua),s&&t(is),s&&t(Fa),A(Ys,s),s&&t(Ya),s&&t($s),s&&t(Va),A(Vs,s),s&&t(Na),s&&t(fs),A(Ns),s&&t(Ha),A(bs,s),s&&t(Ba),A(vs,s)}}}const an={local:"automatic-speech-recognition",sections:[{local:"load-minds14-dataset",title:"Load MInDS-14 dataset"},{local:"preprocess",title:"Preprocess"},{local:"train",title:"Train"}],title:"Automatic speech recognition"};function tn(F){return Ne(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class cn extends Ue{constructor(c){super();Fe(this,c,tn,sn,Ye,{})}}export{cn as default,an as metadata};
26
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/tasks/semantic_segmentation.mdx-hf-doc-builder.js
import{S as fr,i as ur,s as dr,e as o,k as m,w as u,t as l,M as gr,c as i,d as s,m as c,a as p,x as d,h as n,b as h,N as cr,G as a,g as r,y as g,q as _,o as b,B as v,v as _r}from"../../chunks/vendor-hf-doc-builder.js";import{T as hr}from"../../chunks/Tip-hf-doc-builder.js";import{Y as br}from"../../chunks/Youtube-hf-doc-builder.js";import{I as Js}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{C as y}from"../../chunks/CodeBlock-hf-doc-builder.js";import{D as vr}from"../../chunks/DocNotebookDropdown-hf-doc-builder.js";function jr(Je){let f,S,j,w,T;return{c(){f=o("p"),S=l("See the image segmentation "),j=o("a"),w=l("task page"),T=l(" for more information about its associated models, datasets, and metrics."),this.h()},l($){f=i($,"P",{});var P=p(f);S=n(P,"See the image segmentation "),j=i(P,"A",{href:!0,rel:!0});var A=p(j);w=n(A,"task page"),A.forEach(s),T=n(P," for more information about its associated models, datasets, and metrics."),P.forEach(s),this.h()},h(){h(j,"href","https://huggingface.co/tasks/image-segmentation"),h(j,"rel","nofollow")},m($,P){r($,f,P),a(f,S),a(f,j),a(j,w),a(f,T)},d($){$&&s(f)}}}function $r(Je){let f,S,j,w,T,$,P,A;return{c(){f=o("p"),S=l("If you aren\u2019t familiar with finetuning a model with the "),j=o("a"),w=l("Trainer"),T=l(", take a look at the basic tutorial "),$=o("a"),P=l("here"),A=l("!"),this.h()},l(G){f=i(G,"P",{});var q=p(f);S=n(q,"If you aren\u2019t familiar with finetuning a model with the "),j=i(q,"A",{href:!0});var I=p(j);w=n(I,"Trainer"),I.forEach(s),T=n(q,", take a look at the basic tutorial "),$=i(q,"A",{href:!0});var oe=p($);P=n(oe,"here"),oe.forEach(s),A=n(q,"!"),q.forEach(s),this.h()},h(){h(j,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h($,"href","../training#finetune-with-trainer")},m(G,q){r(G,f,q),a(f,S),a(f,j),a(j,w),a(f,T),a(f,$),a($,P),a(f,A)},d(G){G&&s(f)}}}function yr(Je){let f,S,j,w,T,$,P,A,G,q,I,oe,ie,Rs,Re,nt,Ys,z,rt,pe,ot,it,me,pt,mt,Vs,Y,Ws,Ye,ct,Ks,ce,Qs,B,V,fs,he,ht,us,ft,Xs,Ve,ut,Zs,fe,ea,We,dt,sa,ue,aa,Ke,gt,ta,de,la,k,_t,ds,bt,vt,gs,jt,$t,_s,yt,wt,bs,kt,Et,vs,xt,Pt,na,O,Tt,js,qt,St,$s,At,Ft,ra,ge,oa,H,W,ys,_e,Ct,ws,Dt,ia,L,It,ks,zt,Ot,Es,Lt,Nt,pa,be,ma,N,Mt,ve,xs,Ut,Gt,je,Bt,Ht,ca,$e,ha,E,Jt,Ps,Rt,Yt,Ts,Vt,Wt,qs,Kt,Qt,Ss,Xt,Zt,As,el,sl,fa,ye,ua,M,al,Fs,tl,ll,we,nl,rl,da,ke,ga,J,K,Cs,Ee,ol,Ds,il,_a,Q,pl,Qe,ml,cl,ba,xe,va,X,ja,x,hl,Xe,fl,ul,Is,dl,gl,zs,_l,bl,Os,vl,jl,Ls,$l,yl,$a,Z,wl,Ns,kl,El,ya,Pe,wa,ee,xl,Te,Pl,Tl,ka,Ze,ql,Ea,qe,xa,U,Sl,Se,Al,Fl,Ae,Cl,Dl,Pa,Fe,Ta,se,Il,es,zl,Ol,qa,Ce,Sa,ae,Ll,ss,Nl,Ml,Aa,De,Fa,R,te,Ms,Ie,Ul,Us,Gl,Ca,as,Bl,Da,ts,Hl,Ia,ze,za,Oe,ls,sn,Oa,le,Jl,Gs,Rl,Yl,La,Le,Na,ne,Vl,Bs,Wl,Kl,Ma,Ne,Ua,ns,Ql,Ga,Me,Ba,re,Xl,Ue,Zl,en,Ha,Ge,Ja,Be,rs,an,Ra;return $=new Js({}),I=new vr({props:{classNames:"absolute z-10 right-0 top-0",options:[{label:"Mixed",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/semantic_segmentation.ipynb"},{label:"PyTorch",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/pytorch/semantic_segmentation.ipynb"},{label:"TensorFlow",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/tensorflow/semantic_segmentation.ipynb"},{label:"Mixed",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/en/semantic_segmentation.ipynb"},{label:"PyTorch",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/en/pytorch/semantic_segmentation.ipynb"},{label:"TensorFlow",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/en/tensorflow/semantic_segmentation.ipynb"}]}}),ie=new br({props:{id:"dKE8SIt9C-w"}}),Y=new hr({props:{$$slots:{default:[jr]},$$scope:{ctx:Je}}}),ce=new y({props:{code:"pip install -q datasets transformers evaluate",highlighted:"pip install -q datasets transformers evaluate"}}),he=new Js({}),fe=new y({props:{code:`from datasets import load_dataset ds = load_dataset("scene_parse_150", split="train[:50]")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>ds = load_dataset(<span class="hljs-string">&quot;scene_parse_150&quot;</span>, split=<span class="hljs-string">&quot;train[:50]&quot;</span>)`}}),ue=new y({props:{code:`ds = ds.train_test_split(test_size=0.2) train_ds = ds["train"] test_ds = ds["test"]`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>ds = ds.train_test_split(test_size=<span class="hljs-number">0.2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>train_ds = ds[<span class="hljs-string">&quot;train&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>test_ds = ds[<span class="hljs-string">&quot;test&quot;</span>]`}}),de=new y({props:{code:"train_ds[0]",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>train_ds[<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;image&#x27;</span>: &lt;PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=512x683 at <span class="hljs-number">0x7F9B0C201F90</span>&gt;, <span class="hljs-string">&#x27;annotation&#x27;</span>: &lt;PIL.PngImagePlugin.PngImageFile image mode=L size=512x683 at <span class="hljs-number">0x7F9B0C201DD0</span>&gt;, <span class="hljs-string">&#x27;scene_category&#x27;</span>: <span class="hljs-number">368</span>}`}}),ge=new y({props:{code:`import json from huggingface_hub import cached_download, hf_hub_url repo_id = "huggingface/label-files" filename = "ade20k-hf-doc-builder.json" id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r")) id2label = {int(k): v for k, v in id2label.items()} label2id = {v: k for k, v in id2label.items()} num_labels = len(id2label)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> json <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> huggingface_hub <span class="hljs-keyword">import</span> cached_download, hf_hub_url <span class="hljs-meta">&gt;&gt;&gt; </span>repo_id = <span class="hljs-string">&quot;huggingface/label-files&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>filename = <span class="hljs-string">&quot;ade20k-hf-doc-builder.json&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>id2label = json.load(<span class="hljs-built_in">open</span>(cached_download(hf_hub_url(repo_id, filename, repo_type=<span class="hljs-string">&quot;dataset&quot;</span>)), <span class="hljs-string">&quot;r&quot;</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>id2label = {<span class="hljs-built_in">int</span>(k): v <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> id2label.items()} <span class="hljs-meta">&gt;&gt;&gt; </span>label2id = {v: k <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> id2label.items()} <span class="hljs-meta">&gt;&gt;&gt; </span>num_labels = <span class="hljs-built_in">len</span>(id2label)`}}),_e=new Js({}),be=new y({props:{code:`from transformers import AutoFeatureExtractor feature_extractor = AutoFeatureExtractor.from_pretrained("nvidia/mit-b0", reduce_labels=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoFeatureExtractor <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = AutoFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;nvidia/mit-b0&quot;</span>, reduce_labels=<span class="hljs-literal">True</span>)`}}),$e=new y({props:{code:`from torchvision.transforms import ColorJitter jitter = ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> torchvision.transforms <span class="hljs-keyword">import</span> ColorJitter <span class="hljs-meta">&gt;&gt;&gt; </span>jitter = ColorJitter(brightness=<span class="hljs-number">0.25</span>, contrast=<span class="hljs-number">0.25</span>, saturation=<span class="hljs-number">0.25</span>, hue=<span class="hljs-number">0.1</span>)`}}),ye=new y({props:{code:`def train_transforms(example_batch): images = [jitter(x) for x in example_batch["image"]] labels = [x for x in example_batch["annotation"]] inputs = feature_extractor(images, labels) return inputs def val_transforms(example_batch): images = [x for x in example_batch["image"]] labels = [x for x in example_batch["annotation"]] inputs = feature_extractor(images, labels) return inputs`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">train_transforms</span>(<span class="hljs-params">example_batch</span>): <span class="hljs-meta">... </span> images = [jitter(x) <span class="hljs-keyword">for</span> x <span class="hljs-keyword">in</span> example_batch[<span class="hljs-string">&quot;image&quot;</span>]] <span class="hljs-meta">... </span> labels = [x <span class="hljs-keyword">for</span> x <span class="hljs-keyword">in</span> example_batch[<span class="hljs-string">&quot;annotation&quot;</span>]] <span class="hljs-meta">... </span> inputs = feature_extractor(images, labels) <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> inputs <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">val_transforms</span>(<span class="hljs-params">example_batch</span>): <span class="hljs-meta">... </span> images = [x <span class="hljs-keyword">for</span> x <span class="hljs-keyword">in</span> example_batch[<span class="hljs-string">&quot;image&quot;</span>]] <span class="hljs-meta">... </span> labels = [x <span class="hljs-keyword">for</span> x <span class="hljs-keyword">in</span> example_batch[<span class="hljs-string">&quot;annotation&quot;</span>]] <span class="hljs-meta">... </span> inputs = feature_extractor(images, labels) <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> inputs`}}),ke=new y({props:{code:`train_ds.set_transform(train_transforms) test_ds.set_transform(val_transforms)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>train_ds.set_transform(train_transforms) <span class="hljs-meta">&gt;&gt;&gt; </span>test_ds.set_transform(val_transforms)`}}),Ee=new Js({}),xe=new y({props:{code:`from transformers import AutoModelForSemanticSegmentation pretrained_model_name = "nvidia/mit-b0" model = AutoModelForSemanticSegmentation.from_pretrained( pretrained_model_name, id2label=id2label, label2id=label2id )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSemanticSegmentation <span class="hljs-meta">&gt;&gt;&gt; </span>pretrained_model_name = <span class="hljs-string">&quot;nvidia/mit-b0&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSemanticSegmentation.from_pretrained( <span class="hljs-meta">... </span> pretrained_model_name, id2label=id2label, label2id=label2id <span class="hljs-meta">... </span>)`}}),X=new hr({props:{$$slots:{default:[$r]},$$scope:{ctx:Je}}}),Pe=new y({props:{code:`from transformers import TrainingArguments training_args = TrainingArguments( output_dir="segformer-b0-scene-parse-150", learning_rate=6e-5, num_train_epochs=50, per_device_train_batch_size=2, per_device_eval_batch_size=2, save_total_limit=3, evaluation_strategy="steps", save_strategy="steps", save_steps=20, eval_steps=20, logging_steps=1, eval_accumulation_steps=5, remove_unused_columns=False, push_to_hub=True, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TrainingArguments <span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;segformer-b0-scene-parse-150&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">6e-5</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">50</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">2</span>, <span class="hljs-meta">... </span> per_device_eval_batch_size=<span class="hljs-number">2</span>, <span class="hljs-meta">... </span> save_total_limit=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;steps&quot;</span>, <span class="hljs-meta">... </span> save_strategy=<span class="hljs-string">&quot;steps&quot;</span>, <span class="hljs-meta">... </span> save_steps=<span class="hljs-number">20</span>, <span class="hljs-meta">... </span> eval_steps=<span class="hljs-number">20</span>, <span class="hljs-meta">... </span> logging_steps=<span class="hljs-number">1</span>, <span class="hljs-meta">... </span> eval_accumulation_steps=<span class="hljs-number">5</span>, <span class="hljs-meta">... </span> remove_unused_columns=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> push_to_hub=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span>)`}}),qe=new y({props:{code:`import evaluate metric = evaluate.load("mean_iou")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> evaluate <span class="hljs-meta">&gt;&gt;&gt; </span>metric = evaluate.load(<span class="hljs-string">&quot;mean_iou&quot;</span>)`}}),Fe=new y({props:{code:`def compute_metrics(eval_pred): with torch.no_grad(): logits, labels = eval_pred logits_tensor = torch.from_numpy(logits) logits_tensor = nn.functional.interpolate( logits_tensor, size=labels.shape[-2:], mode="bilinear", align_corners=False, ).argmax(dim=1) pred_labels = logits_tensor.detach().cpu().numpy() metrics = metric.compute( predictions=pred_labels, references=labels, num_labels=num_labels, ignore_index=255, reduce_labels=False, ) for key, value in metrics.items(): if type(value) is np.ndarray: metrics[key] = value.tolist() return metrics`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">compute_metrics</span>(<span class="hljs-params">eval_pred</span>): <span class="hljs-meta">... </span> <span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> logits, labels = eval_pred <span class="hljs-meta">... </span> logits_tensor = torch.from_numpy(logits) <span class="hljs-meta">... </span> logits_tensor = nn.functional.interpolate( <span class="hljs-meta">... </span> logits_tensor, <span class="hljs-meta">... </span> size=labels.shape[-<span class="hljs-number">2</span>:], <span class="hljs-meta">... </span> mode=<span class="hljs-string">&quot;bilinear&quot;</span>, <span class="hljs-meta">... </span> align_corners=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> ).argmax(dim=<span class="hljs-number">1</span>) <span class="hljs-meta">... </span> pred_labels = logits_tensor.detach().cpu().numpy() <span class="hljs-meta">... </span> metrics = metric.compute( <span class="hljs-meta">... </span> predictions=pred_labels, <span class="hljs-meta">... </span> references=labels, <span class="hljs-meta">... </span> num_labels=num_labels, <span class="hljs-meta">... </span> ignore_index=<span class="hljs-number">255</span>, <span class="hljs-meta">... </span> reduce_labels=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> key, value <span class="hljs-keyword">in</span> metrics.items(): <span class="hljs-meta">... </span> <span class="hljs-keyword">if</span> <span class="hljs-built_in">type</span>(value) <span class="hljs-keyword">is</span> np.ndarray: <span class="hljs-meta">... </span> metrics[key] = value.tolist() <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> metrics`}}),Ce=new y({props:{code:`from transformers import Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_ds, eval_dataset=test_ds, compute_metrics=compute_metrics, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=train_ds, <span class="hljs-meta">... </span> eval_dataset=test_ds, <span class="hljs-meta">... </span> compute_metrics=compute_metrics, <span class="hljs-meta">... </span>)`}}),De=new y({props:{code:"trainer.train()",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()'}}),Ie=new Js({}),ze=new y({props:{code:`image = ds[0]["image"] image`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>image = ds[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;image&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>image`}}),Le=new y({props:{code:`device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # use GPU if available, otherwise use a CPU encoding = feature_extractor(image, return_tensors="pt") pixel_values = encoding.pixel_values.to(device)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>device = torch.device(<span class="hljs-string">&quot;cuda&quot;</span> <span class="hljs-keyword">if</span> torch.cuda.is_available() <span class="hljs-keyword">else</span> <span class="hljs-string">&quot;cpu&quot;</span>) <span class="hljs-comment"># use GPU if available, otherwise use a CPU</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = feature_extractor(image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pixel_values = encoding.pixel_values.to(device)`}}),Ne=new y({props:{code:`outputs = model(pixel_values=pixel_values) logits = outputs.logits.cpu()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(pixel_values=pixel_values) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits.cpu()`}}),Me=new y({props:{code:`upsampled_logits = nn.functional.interpolate( logits, size=image.size[::-1], mode="bilinear", align_corners=False, ) pred_seg = upsampled_logits.argmax(dim=1)[0]`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>upsampled_logits = nn.functional.interpolate( <span class="hljs-meta">... </span> logits, <span class="hljs-meta">... </span> size=image.size[::-<span class="hljs-number">1</span>], <span class="hljs-meta">... </span> mode=<span class="hljs-string">&quot;bilinear&quot;</span>, <span class="hljs-meta">... </span> align_corners=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pred_seg = upsampled_logits.argmax(dim=<span class="hljs-number">1</span>)[<span class="hljs-number">0</span>]`}}),Ge=new y({props:{code:`import matplotlib.pyplot as plt color_seg = np.zeros((pred_seg.shape[0], pred_seg.shape[1], 3), dtype=np.uint8) palette = np.array(ade_palette()) for label, color in enumerate(palette): color_seg[pred_seg == label, :] = color color_seg = color_seg[..., ::-1] # convert to BGR img = np.array(image) * 0.5 + color_seg * 0.5 # plot the image with the segmentation map img = img.astype(np.uint8) plt.figure(figsize=(15, 10)) plt.imshow(img) plt.show()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> matplotlib.pyplot <span class="hljs-keyword">as</span> plt <span class="hljs-meta">&gt;&gt;&gt; </span>color_seg = np.zeros((pred_seg.shape[<span class="hljs-number">0</span>], pred_seg.shape[<span class="hljs-number">1</span>], <span class="hljs-number">3</span>), dtype=np.uint8) <span class="hljs-meta">&gt;&gt;&gt; </span>palette = np.array(ade_palette()) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> label, color <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(palette): <span class="hljs-meta">... </span> color_seg[pred_seg == label, :] = color <span class="hljs-meta">&gt;&gt;&gt; </span>color_seg = color_seg[..., ::-<span class="hljs-number">1</span>] <span class="hljs-comment"># convert to BGR</span> <span class="hljs-meta">&gt;&gt;&gt; </span>img = np.array(image) * <span class="hljs-number">0.5</span> + color_seg * <span class="hljs-number">0.5</span> <span class="hljs-comment"># plot the image with the segmentation map</span> <span class="hljs-meta">&gt;&gt;&gt; </span>img = img.astype(np.uint8) <span class="hljs-meta">&gt;&gt;&gt; </span>plt.figure(figsize=(<span class="hljs-number">15</span>, <span class="hljs-number">10</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>plt.imshow(img) <span class="hljs-meta">&gt;&gt;&gt; </span>plt.show()`}}),{c(){f=o("meta"),S=m(),j=o("h1"),w=o("a"),T=o("span"),u($.$$.fragment),P=m(),A=o("span"),G=l("Semantic segmentation"),q=m(),u(I.$$.fragment),oe=m(),u(ie.$$.fragment),Rs=m(),Re=o("p"),nt=l("Semantic segmentation assigns a label or class to each individual pixel of an image. There are several types of segmentation, and in the case of semantic segmentation, no distinction is made between unique instances of the same object. Both objects are given the same label (for example, \u201Ccar\u201D instead of \u201Ccar-1\u201D and \u201Ccar-2\u201D). Common real-world applications of semantic segmentation include training self-driving cars to identify pedestrians and important traffic information, identifying cells and abnormalities in medical imagery, and monitoring environmental changes from satellite imagery."),Ys=m(),z=o("p"),rt=l("This guide will show you how to finetune "),pe=o("a"),ot=l("SegFormer"),it=l(" on the "),me=o("a"),pt=l("SceneParse150"),mt=l(" dataset."),Vs=m(),u(Y.$$.fragment),Ws=m(),Ye=o("p"),ct=l("Before you begin, make sure you have all the necessary libraries installed:"),Ks=m(),u(ce.$$.fragment),Qs=m(),B=o("h2"),V=o("a"),fs=o("span"),u(he.$$.fragment),ht=m(),us=o("span"),ft=l("Load SceneParse150 dataset"),Xs=m(),Ve=o("p"),ut=l("Load the first 50 examples of the SceneParse150 dataset from the \u{1F917} Datasets library so you can quickly train and test a model:"),Zs=m(),u(fe.$$.fragment),ea=m(),We=o("p"),dt=l("Split this dataset into a train and test set:"),sa=m(),u(ue.$$.fragment),aa=m(),Ke=o("p"),gt=l("Then take a look at an example:"),ta=m(),u(de.$$.fragment),la=m(),k=o("p"),_t=l("There is an "),ds=o("code"),bt=l("image"),vt=l(", an "),gs=o("code"),jt=l("annotation"),$t=l(" (this is the segmentation map or label), and a "),_s=o("code"),yt=l("scene_category"),wt=l(" field that describes the image scene, like \u201Ckitchen\u201D or \u201Coffice\u201D. In this guide, you\u2019ll only need "),bs=o("code"),kt=l("image"),Et=l(" and "),vs=o("code"),xt=l("annotation"),Pt=l(", both of which are PIL images."),na=m(),O=o("p"),Tt=l("You\u2019ll also want to create a dictionary that maps a label id to a label class which will be useful when you set up the model later. Download the mappings from the Hub and create the "),js=o("code"),qt=l("id2label"),St=l(" and "),$s=o("code"),At=l("label2id"),Ft=l(" dictionaries:"),ra=m(),u(ge.$$.fragment),oa=m(),H=o("h2"),W=o("a"),ys=o("span"),u(_e.$$.fragment),Ct=m(),ws=o("span"),Dt=l("Preprocess"),ia=m(),L=o("p"),It=l("Next, load a SegFormer feature extractor to prepare the images and annotations for the model. Some datasets, like this one, use the zero-index as the background class. However, the background class isn\u2019t included in the 150 classes, so you\u2019ll need to set "),ks=o("code"),zt=l("reduce_labels=True"),Ot=l(" to subtract one from all the labels. The zero-index is replaced by "),Es=o("code"),Lt=l("255"),Nt=l(" so it\u2019s ignored by SegFormer\u2019s loss function:"),pa=m(),u(be.$$.fragment),ma=m(),N=o("p"),Mt=l("It is common to apply some data augmentations to an image dataset to make a model more robust against overfitting. In this guide, you\u2019ll use the "),ve=o("a"),xs=o("code"),Ut=l("ColorJitter"),Gt=l(" function from "),je=o("a"),Bt=l("torchvision"),Ht=l(" to randomly change the color properties of an image:"),ca=m(),u($e.$$.fragment),ha=m(),E=o("p"),Jt=l("Now create two preprocessing functions to prepare the images and annotations for the model. These functions convert the images into "),Ps=o("code"),Rt=l("pixel_values"),Yt=l(" and annotations to "),Ts=o("code"),Vt=l("labels"),Wt=l(". For the training set, "),qs=o("code"),Kt=l("jitter"),Qt=l(" is applied before providing the images to the feature extractor. For the test set, the feature extractor crops and normalizes the "),Ss=o("code"),Xt=l("images"),Zt=l(", and only crops the "),As=o("code"),el=l("labels"),sl=l(" because no data augmentation is applied during testing."),fa=m(),u(ye.$$.fragment),ua=m(),M=o("p"),al=l("To apply the "),Fs=o("code"),tl=l("jitter"),ll=l(" over the entire dataset, use the \u{1F917} Datasets "),we=o("a"),nl=l("set_transform"),rl=l(" function. The transform is applied on the fly which is faster and consumes less disk space:"),da=m(),u(ke.$$.fragment),ga=m(),J=o("h2"),K=o("a"),Cs=o("span"),u(Ee.$$.fragment),ol=m(),Ds=o("span"),il=l("Train"),_a=m(),Q=o("p"),pl=l("Load SegFormer with "),Qe=o("a"),ml=l("AutoModelForSemanticSegmentation"),cl=l(", and pass the model the mapping between label ids and label classes:"),ba=m(),u(xe.$$.fragment),va=m(),u(X.$$.fragment),ja=m(),x=o("p"),hl=l("Define your training hyperparameters in "),Xe=o("a"),fl=l("TrainingArguments"),ul=l(". It is important not to remove unused columns because this will drop the "),Is=o("code"),dl=l("image"),gl=l(" column. Without the "),zs=o("code"),_l=l("image"),bl=l(" column, you can\u2019t create "),Os=o("code"),vl=l("pixel_values"),jl=l(". Set "),Ls=o("code"),$l=l("remove_unused_columns=False"),yl=l(" to prevent this behavior!"),$a=m(),Z=o("p"),wl=l("To save and push a model under your namespace to the Hub, set "),Ns=o("code"),kl=l("push_to_hub=True"),El=l(":"),ya=m(),u(Pe.$$.fragment),wa=m(),ee=o("p"),xl=l("To evaluate model performance during training, you\u2019ll need to create a function to compute and report metrics. For semantic segmentation, you\u2019ll typically compute the "),Te=o("a"),Pl=l("mean Intersection over Union"),Tl=l(" (IoU). The mean IoU measures the overlapping area between the predicted and ground truth segmentation maps."),ka=m(),Ze=o("p"),ql=l("Load the mean IoU from the \u{1F917} Evaluate library:"),Ea=m(),u(qe.$$.fragment),xa=m(),U=o("p"),Sl=l("Then create a function to "),Se=o("a"),Al=l("compute"),Fl=l(" the metrics. Your predictions need to be converted to logits first, and then reshaped to match the size of the labels before you can call "),Ae=o("a"),Cl=l("compute"),Dl=l(":"),Pa=m(),u(Fe.$$.fragment),Ta=m(),se=o("p"),Il=l("Pass your model, training arguments, datasets, and metrics function to the "),es=o("a"),zl=l("Trainer"),Ol=l(":"),qa=m(),u(Ce.$$.fragment),Sa=m(),ae=o("p"),Ll=l("Lastly, call "),ss=o("a"),Nl=l("train()"),Ml=l(" to finetune your model:"),Aa=m(),u(De.$$.fragment),Fa=m(),R=o("h2"),te=o("a"),Ms=o("span"),u(Ie.$$.fragment),Ul=m(),Us=o("span"),Gl=l("Inference"),Ca=m(),as=o("p"),Bl=l("Great, now that you\u2019ve finetuned a model, you can use it for inference!"),Da=m(),ts=o("p"),Hl=l("Load an image for inference:"),Ia=m(),u(ze.$$.fragment),za=m(),Oe=o("div"),ls=o("img"),Oa=m(),le=o("p"),Jl=l("Process the image with a feature extractor and place the "),Gs=o("code"),Rl=l("pixel_values"),Yl=l(" on a GPU:"),La=m(),u(Le.$$.fragment),Na=m(),ne=o("p"),Vl=l("Pass your input to the model and return the "),Bs=o("code"),Wl=l("logits"),Kl=l(":"),Ma=m(),u(Ne.$$.fragment),Ua=m(),ns=o("p"),Ql=l("Next, rescale the logits to the original image size:"),Ga=m(),u(Me.$$.fragment),Ba=m(),re=o("p"),Xl=l("To visualize the results, load the "),Ue=o("a"),Zl=l("dataset color palette"),en=l(" that maps each class to their RGB values. Then you can combine and plot your image and the predicted segmentation map:"),Ha=m(),u(Ge.$$.fragment),Ja=m(),Be=o("div"),rs=o("img"),this.h()},l(e){const t=gr('[data-svelte="svelte-1phssyn"]',document.head);f=i(t,"META",{name:!0,content:!0}),t.forEach(s),S=c(e),j=i(e,"H1",{class:!0});var He=p(j);w=i(He,"A",{id:!0,class:!0,href:!0});var Hs=p(w);T=i(Hs,"SPAN",{});var tn=p(T);d($.$$.fragment,tn),tn.forEach(s),Hs.forEach(s),P=c(He),A=i(He,"SPAN",{});var ln=p(A);G=n(ln,"Semantic segmentation"),ln.forEach(s),He.forEach(s),q=c(e),d(I.$$.fragment,e),oe=c(e),d(ie.$$.fragment,e),Rs=c(e),Re=i(e,"P",{});var nn=p(Re);nt=n(nn,"Semantic segmentation assigns a label or class to each individual pixel of an image. There are several types of segmentation, and in the case of semantic segmentation, no distinction is made between unique instances of the same object. Both objects are given the same label (for example, \u201Ccar\u201D instead of \u201Ccar-1\u201D and \u201Ccar-2\u201D). Common real-world applications of semantic segmentation include training self-driving cars to identify pedestrians and important traffic information, identifying cells and abnormalities in medical imagery, and monitoring environmental changes from satellite imagery."),nn.forEach(s),Ys=c(e),z=i(e,"P",{});var os=p(z);rt=n(os,"This guide will show you how to finetune "),pe=i(os,"A",{href:!0,rel:!0});var rn=p(pe);ot=n(rn,"SegFormer"),rn.forEach(s),it=n(os," on the "),me=i(os,"A",{href:!0,rel:!0});var on=p(me);pt=n(on,"SceneParse150"),on.forEach(s),mt=n(os," dataset."),os.forEach(s),Vs=c(e),d(Y.$$.fragment,e),Ws=c(e),Ye=i(e,"P",{});var pn=p(Ye);ct=n(pn,"Before you begin, make sure you have all the necessary libraries installed:"),pn.forEach(s),Ks=c(e),d(ce.$$.fragment,e),Qs=c(e),B=i(e,"H2",{class:!0});var Ya=p(B);V=i(Ya,"A",{id:!0,class:!0,href:!0});var mn=p(V);fs=i(mn,"SPAN",{});var cn=p(fs);d(he.$$.fragment,cn),cn.forEach(s),mn.forEach(s),ht=c(Ya),us=i(Ya,"SPAN",{});var hn=p(us);ft=n(hn,"Load SceneParse150 dataset"),hn.forEach(s),Ya.forEach(s),Xs=c(e),Ve=i(e,"P",{});var fn=p(Ve);ut=n(fn,"Load the first 50 examples of the SceneParse150 dataset from the \u{1F917} Datasets library so you can quickly train and test a model:"),fn.forEach(s),Zs=c(e),d(fe.$$.fragment,e),ea=c(e),We=i(e,"P",{});var un=p(We);dt=n(un,"Split this dataset into a train and test set:"),un.forEach(s),sa=c(e),d(ue.$$.fragment,e),aa=c(e),Ke=i(e,"P",{});var dn=p(Ke);gt=n(dn,"Then take a look at an example:"),dn.forEach(s),ta=c(e),d(de.$$.fragment,e),la=c(e),k=i(e,"P",{});var F=p(k);_t=n(F,"There is an "),ds=i(F,"CODE",{});var gn=p(ds);bt=n(gn,"image"),gn.forEach(s),vt=n(F,", an "),gs=i(F,"CODE",{});var _n=p(gs);jt=n(_n,"annotation"),_n.forEach(s),$t=n(F," (this is the segmentation map or label), and a "),_s=i(F,"CODE",{});var bn=p(_s);yt=n(bn,"scene_category"),bn.forEach(s),wt=n(F," field that describes the image scene, like \u201Ckitchen\u201D or \u201Coffice\u201D. In this guide, you\u2019ll only need "),bs=i(F,"CODE",{});var vn=p(bs);kt=n(vn,"image"),vn.forEach(s),Et=n(F," and "),vs=i(F,"CODE",{});var jn=p(vs);xt=n(jn,"annotation"),jn.forEach(s),Pt=n(F,", both of which are PIL images."),F.forEach(s),na=c(e),O=i(e,"P",{});var is=p(O);Tt=n(is,"You\u2019ll also want to create a dictionary that maps a label id to a label class which will be useful when you set up the model later. Download the mappings from the Hub and create the "),js=i(is,"CODE",{});var $n=p(js);qt=n($n,"id2label"),$n.forEach(s),St=n(is," and "),$s=i(is,"CODE",{});var yn=p($s);At=n(yn,"label2id"),yn.forEach(s),Ft=n(is," dictionaries:"),is.forEach(s),ra=c(e),d(ge.$$.fragment,e),oa=c(e),H=i(e,"H2",{class:!0});var Va=p(H);W=i(Va,"A",{id:!0,class:!0,href:!0});var wn=p(W);ys=i(wn,"SPAN",{});var kn=p(ys);d(_e.$$.fragment,kn),kn.forEach(s),wn.forEach(s),Ct=c(Va),ws=i(Va,"SPAN",{});var En=p(ws);Dt=n(En,"Preprocess"),En.forEach(s),Va.forEach(s),ia=c(e),L=i(e,"P",{});var ps=p(L);It=n(ps,"Next, load a SegFormer feature extractor to prepare the images and annotations for the model. Some datasets, like this one, use the zero-index as the background class. However, the background class isn\u2019t included in the 150 classes, so you\u2019ll need to set "),ks=i(ps,"CODE",{});var xn=p(ks);zt=n(xn,"reduce_labels=True"),xn.forEach(s),Ot=n(ps," to subtract one from all the labels. The zero-index is replaced by "),Es=i(ps,"CODE",{});var Pn=p(Es);Lt=n(Pn,"255"),Pn.forEach(s),Nt=n(ps," so it\u2019s ignored by SegFormer\u2019s loss function:"),ps.forEach(s),pa=c(e),d(be.$$.fragment,e),ma=c(e),N=i(e,"P",{});var ms=p(N);Mt=n(ms,"It is common to apply some data augmentations to an image dataset to make a model more robust against overfitting. In this guide, you\u2019ll use the "),ve=i(ms,"A",{href:!0,rel:!0});var Tn=p(ve);xs=i(Tn,"CODE",{});var qn=p(xs);Ut=n(qn,"ColorJitter"),qn.forEach(s),Tn.forEach(s),Gt=n(ms," function from "),je=i(ms,"A",{href:!0,rel:!0});var Sn=p(je);Bt=n(Sn,"torchvision"),Sn.forEach(s),Ht=n(ms," to randomly change the color properties of an image:"),ms.forEach(s),ca=c(e),d($e.$$.fragment,e),ha=c(e),E=i(e,"P",{});var C=p(E);Jt=n(C,"Now create two preprocessing functions to prepare the images and annotations for the model. These functions convert the images into "),Ps=i(C,"CODE",{});var An=p(Ps);Rt=n(An,"pixel_values"),An.forEach(s),Yt=n(C," and annotations to "),Ts=i(C,"CODE",{});var Fn=p(Ts);Vt=n(Fn,"labels"),Fn.forEach(s),Wt=n(C,". For the training set, "),qs=i(C,"CODE",{});var Cn=p(qs);Kt=n(Cn,"jitter"),Cn.forEach(s),Qt=n(C," is applied before providing the images to the feature extractor. For the test set, the feature extractor crops and normalizes the "),Ss=i(C,"CODE",{});var Dn=p(Ss);Xt=n(Dn,"images"),Dn.forEach(s),Zt=n(C,", and only crops the "),As=i(C,"CODE",{});var In=p(As);el=n(In,"labels"),In.forEach(s),sl=n(C," because no data augmentation is applied during testing."),C.forEach(s),fa=c(e),d(ye.$$.fragment,e),ua=c(e),M=i(e,"P",{});var cs=p(M);al=n(cs,"To apply the "),Fs=i(cs,"CODE",{});var zn=p(Fs);tl=n(zn,"jitter"),zn.forEach(s),ll=n(cs," over the entire dataset, use the \u{1F917} Datasets "),we=i(cs,"A",{href:!0,rel:!0});var On=p(we);nl=n(On,"set_transform"),On.forEach(s),rl=n(cs," function. The transform is applied on the fly which is faster and consumes less disk space:"),cs.forEach(s),da=c(e),d(ke.$$.fragment,e),ga=c(e),J=i(e,"H2",{class:!0});var Wa=p(J);K=i(Wa,"A",{id:!0,class:!0,href:!0});var Ln=p(K);Cs=i(Ln,"SPAN",{});var Nn=p(Cs);d(Ee.$$.fragment,Nn),Nn.forEach(s),Ln.forEach(s),ol=c(Wa),Ds=i(Wa,"SPAN",{});var Mn=p(Ds);il=n(Mn,"Train"),Mn.forEach(s),Wa.forEach(s),_a=c(e),Q=i(e,"P",{});var Ka=p(Q);pl=n(Ka,"Load SegFormer with "),Qe=i(Ka,"A",{href:!0});var Un=p(Qe);ml=n(Un,"AutoModelForSemanticSegmentation"),Un.forEach(s),cl=n(Ka,", and pass the model the mapping between label ids and label classes:"),Ka.forEach(s),ba=c(e),d(xe.$$.fragment,e),va=c(e),d(X.$$.fragment,e),ja=c(e),x=i(e,"P",{});var D=p(x);hl=n(D,"Define your training hyperparameters in "),Xe=i(D,"A",{href:!0});var Gn=p(Xe);fl=n(Gn,"TrainingArguments"),Gn.forEach(s),ul=n(D,". It is important not to remove unused columns because this will drop the "),Is=i(D,"CODE",{});var Bn=p(Is);dl=n(Bn,"image"),Bn.forEach(s),gl=n(D," column. Without the "),zs=i(D,"CODE",{});var Hn=p(zs);_l=n(Hn,"image"),Hn.forEach(s),bl=n(D," column, you can\u2019t create "),Os=i(D,"CODE",{});var Jn=p(Os);vl=n(Jn,"pixel_values"),Jn.forEach(s),jl=n(D,". Set "),Ls=i(D,"CODE",{});var Rn=p(Ls);$l=n(Rn,"remove_unused_columns=False"),Rn.forEach(s),yl=n(D," to prevent this behavior!"),D.forEach(s),$a=c(e),Z=i(e,"P",{});var Qa=p(Z);wl=n(Qa,"To save and push a model under your namespace to the Hub, set "),Ns=i(Qa,"CODE",{});var Yn=p(Ns);kl=n(Yn,"push_to_hub=True"),Yn.forEach(s),El=n(Qa,":"),Qa.forEach(s),ya=c(e),d(Pe.$$.fragment,e),wa=c(e),ee=i(e,"P",{});var Xa=p(ee);xl=n(Xa,"To evaluate model performance during training, you\u2019ll need to create a function to compute and report metrics. For semantic segmentation, you\u2019ll typically compute the "),Te=i(Xa,"A",{href:!0,rel:!0});var Vn=p(Te);Pl=n(Vn,"mean Intersection over Union"),Vn.forEach(s),Tl=n(Xa," (IoU). The mean IoU measures the overlapping area between the predicted and ground truth segmentation maps."),Xa.forEach(s),ka=c(e),Ze=i(e,"P",{});var Wn=p(Ze);ql=n(Wn,"Load the mean IoU from the \u{1F917} Evaluate library:"),Wn.forEach(s),Ea=c(e),d(qe.$$.fragment,e),xa=c(e),U=i(e,"P",{});var hs=p(U);Sl=n(hs,"Then create a function to "),Se=i(hs,"A",{href:!0,rel:!0});var Kn=p(Se);Al=n(Kn,"compute"),Kn.forEach(s),Fl=n(hs," the metrics. Your predictions need to be converted to logits first, and then reshaped to match the size of the labels before you can call "),Ae=i(hs,"A",{href:!0,rel:!0});var Qn=p(Ae);Cl=n(Qn,"compute"),Qn.forEach(s),Dl=n(hs,":"),hs.forEach(s),Pa=c(e),d(Fe.$$.fragment,e),Ta=c(e),se=i(e,"P",{});var Za=p(se);Il=n(Za,"Pass your model, training arguments, datasets, and metrics function to the "),es=i(Za,"A",{href:!0});var Xn=p(es);zl=n(Xn,"Trainer"),Xn.forEach(s),Ol=n(Za,":"),Za.forEach(s),qa=c(e),d(Ce.$$.fragment,e),Sa=c(e),ae=i(e,"P",{});var et=p(ae);Ll=n(et,"Lastly, call "),ss=i(et,"A",{href:!0});var Zn=p(ss);Nl=n(Zn,"train()"),Zn.forEach(s),Ml=n(et," to finetune your model:"),et.forEach(s),Aa=c(e),d(De.$$.fragment,e),Fa=c(e),R=i(e,"H2",{class:!0});var st=p(R);te=i(st,"A",{id:!0,class:!0,href:!0});var er=p(te);Ms=i(er,"SPAN",{});var sr=p(Ms);d(Ie.$$.fragment,sr),sr.forEach(s),er.forEach(s),Ul=c(st),Us=i(st,"SPAN",{});var ar=p(Us);Gl=n(ar,"Inference"),ar.forEach(s),st.forEach(s),Ca=c(e),as=i(e,"P",{});var tr=p(as);Bl=n(tr,"Great, now that you\u2019ve finetuned a model, you can use it for inference!"),tr.forEach(s),Da=c(e),ts=i(e,"P",{});var lr=p(ts);Hl=n(lr,"Load an image for inference:"),lr.forEach(s),Ia=c(e),d(ze.$$.fragment,e),za=c(e),Oe=i(e,"DIV",{class:!0});var nr=p(Oe);ls=i(nr,"IMG",{src:!0,alt:!0}),nr.forEach(s),Oa=c(e),le=i(e,"P",{});var at=p(le);Jl=n(at,"Process the image with a feature extractor and place the "),Gs=i(at,"CODE",{});var rr=p(Gs);Rl=n(rr,"pixel_values"),rr.forEach(s),Yl=n(at," on a GPU:"),at.forEach(s),La=c(e),d(Le.$$.fragment,e),Na=c(e),ne=i(e,"P",{});var tt=p(ne);Vl=n(tt,"Pass your input to the model and return the "),Bs=i(tt,"CODE",{});var or=p(Bs);Wl=n(or,"logits"),or.forEach(s),Kl=n(tt,":"),tt.forEach(s),Ma=c(e),d(Ne.$$.fragment,e),Ua=c(e),ns=i(e,"P",{});var ir=p(ns);Ql=n(ir,"Next, rescale the logits to the original image size:"),ir.forEach(s),Ga=c(e),d(Me.$$.fragment,e),Ba=c(e),re=i(e,"P",{});var lt=p(re);Xl=n(lt,"To visualize the results, load the "),Ue=i(lt,"A",{href:!0,rel:!0});var pr=p(Ue);Zl=n(pr,"dataset color palette"),pr.forEach(s),en=n(lt," that maps each class to their RGB values. Then you can combine and plot your image and the predicted segmentation map:"),lt.forEach(s),Ha=c(e),d(Ge.$$.fragment,e),Ja=c(e),Be=i(e,"DIV",{class:!0});var mr=p(Be);rs=i(mr,"IMG",{src:!0,alt:!0}),mr.forEach(s),this.h()},h(){h(f,"name","hf:doc:metadata"),h(f,"content",JSON.stringify(wr)),h(w,"id","semantic-segmentation"),h(w,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(w,"href","#semantic-segmentation"),h(j,"class","relative group"),h(pe,"href","https://huggingface.co/docs/transformers/main/en/model_doc/segformer#segformer"),h(pe,"rel","nofollow"),h(me,"href","https://huggingface.co/datasets/scene_parse_150"),h(me,"rel","nofollow"),h(V,"id","load-sceneparse150-dataset"),h(V,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(V,"href","#load-sceneparse150-dataset"),h(B,"class","relative group"),h(W,"id","preprocess"),h(W,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(W,"href","#preprocess"),h(H,"class","relative group"),h(ve,"href","https://pytorch.org/vision/stable/generated/torchvision.transforms.ColorJitter.html"),h(ve,"rel","nofollow"),h(je,"href","https://pytorch.org/vision/stable/index.html"),h(je,"rel","nofollow"),h(we,"href","https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.set_transform"),h(we,"rel","nofollow"),h(K,"id","train"),h(K,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(K,"href","#train"),h(J,"class","relative group"),h(Qe,"href","/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoModelForSemanticSegmentation"),h(Xe,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments"),h(Te,"href","https://huggingface.co/spaces/evaluate-metric/mean_iou"),h(Te,"rel","nofollow"),h(Se,"href","https://huggingface.co/docs/evaluate/main/en/package_reference/main_classes#evaluate.EvaluationModule.compute"),h(Se,"rel","nofollow"),h(Ae,"href","https://huggingface.co/docs/evaluate/main/en/package_reference/main_classes#evaluate.EvaluationModule.compute"),h(Ae,"rel","nofollow"),h(es,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),h(ss,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.train"),h(te,"id","inference"),h(te,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),h(te,"href","#inference"),h(R,"class","relative group"),cr(ls.src,sn="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/semantic-seg-image.png")||h(ls,"src",sn),h(ls,"alt","Image of bedroom"),h(Oe,"class","flex justify-center"),h(Ue,"href","https://github.com/tensorflow/models/blob/3f1ca33afe3c1631b733ea7e40c294273b9e406d/research/deeplab/utils/get_dataset_colormap.py#L51"),h(Ue,"rel","nofollow"),cr(rs.src,an="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/semantic-seg-preds.png")||h(rs,"src",an),h(rs,"alt","Image of bedroom overlayed with segmentation map"),h(Be,"class","flex justify-center")},m(e,t){a(document.head,f),r(e,S,t),r(e,j,t),a(j,w),a(w,T),g($,T,null),a(j,P),a(j,A),a(A,G),r(e,q,t),g(I,e,t),r(e,oe,t),g(ie,e,t),r(e,Rs,t),r(e,Re,t),a(Re,nt),r(e,Ys,t),r(e,z,t),a(z,rt),a(z,pe),a(pe,ot),a(z,it),a(z,me),a(me,pt),a(z,mt),r(e,Vs,t),g(Y,e,t),r(e,Ws,t),r(e,Ye,t),a(Ye,ct),r(e,Ks,t),g(ce,e,t),r(e,Qs,t),r(e,B,t),a(B,V),a(V,fs),g(he,fs,null),a(B,ht),a(B,us),a(us,ft),r(e,Xs,t),r(e,Ve,t),a(Ve,ut),r(e,Zs,t),g(fe,e,t),r(e,ea,t),r(e,We,t),a(We,dt),r(e,sa,t),g(ue,e,t),r(e,aa,t),r(e,Ke,t),a(Ke,gt),r(e,ta,t),g(de,e,t),r(e,la,t),r(e,k,t),a(k,_t),a(k,ds),a(ds,bt),a(k,vt),a(k,gs),a(gs,jt),a(k,$t),a(k,_s),a(_s,yt),a(k,wt),a(k,bs),a(bs,kt),a(k,Et),a(k,vs),a(vs,xt),a(k,Pt),r(e,na,t),r(e,O,t),a(O,Tt),a(O,js),a(js,qt),a(O,St),a(O,$s),a($s,At),a(O,Ft),r(e,ra,t),g(ge,e,t),r(e,oa,t),r(e,H,t),a(H,W),a(W,ys),g(_e,ys,null),a(H,Ct),a(H,ws),a(ws,Dt),r(e,ia,t),r(e,L,t),a(L,It),a(L,ks),a(ks,zt),a(L,Ot),a(L,Es),a(Es,Lt),a(L,Nt),r(e,pa,t),g(be,e,t),r(e,ma,t),r(e,N,t),a(N,Mt),a(N,ve),a(ve,xs),a(xs,Ut),a(N,Gt),a(N,je),a(je,Bt),a(N,Ht),r(e,ca,t),g($e,e,t),r(e,ha,t),r(e,E,t),a(E,Jt),a(E,Ps),a(Ps,Rt),a(E,Yt),a(E,Ts),a(Ts,Vt),a(E,Wt),a(E,qs),a(qs,Kt),a(E,Qt),a(E,Ss),a(Ss,Xt),a(E,Zt),a(E,As),a(As,el),a(E,sl),r(e,fa,t),g(ye,e,t),r(e,ua,t),r(e,M,t),a(M,al),a(M,Fs),a(Fs,tl),a(M,ll),a(M,we),a(we,nl),a(M,rl),r(e,da,t),g(ke,e,t),r(e,ga,t),r(e,J,t),a(J,K),a(K,Cs),g(Ee,Cs,null),a(J,ol),a(J,Ds),a(Ds,il),r(e,_a,t),r(e,Q,t),a(Q,pl),a(Q,Qe),a(Qe,ml),a(Q,cl),r(e,ba,t),g(xe,e,t),r(e,va,t),g(X,e,t),r(e,ja,t),r(e,x,t),a(x,hl),a(x,Xe),a(Xe,fl),a(x,ul),a(x,Is),a(Is,dl),a(x,gl),a(x,zs),a(zs,_l),a(x,bl),a(x,Os),a(Os,vl),a(x,jl),a(x,Ls),a(Ls,$l),a(x,yl),r(e,$a,t),r(e,Z,t),a(Z,wl),a(Z,Ns),a(Ns,kl),a(Z,El),r(e,ya,t),g(Pe,e,t),r(e,wa,t),r(e,ee,t),a(ee,xl),a(ee,Te),a(Te,Pl),a(ee,Tl),r(e,ka,t),r(e,Ze,t),a(Ze,ql),r(e,Ea,t),g(qe,e,t),r(e,xa,t),r(e,U,t),a(U,Sl),a(U,Se),a(Se,Al),a(U,Fl),a(U,Ae),a(Ae,Cl),a(U,Dl),r(e,Pa,t),g(Fe,e,t),r(e,Ta,t),r(e,se,t),a(se,Il),a(se,es),a(es,zl),a(se,Ol),r(e,qa,t),g(Ce,e,t),r(e,Sa,t),r(e,ae,t),a(ae,Ll),a(ae,ss),a(ss,Nl),a(ae,Ml),r(e,Aa,t),g(De,e,t),r(e,Fa,t),r(e,R,t),a(R,te),a(te,Ms),g(Ie,Ms,null),a(R,Ul),a(R,Us),a(Us,Gl),r(e,Ca,t),r(e,as,t),a(as,Bl),r(e,Da,t),r(e,ts,t),a(ts,Hl),r(e,Ia,t),g(ze,e,t),r(e,za,t),r(e,Oe,t),a(Oe,ls),r(e,Oa,t),r(e,le,t),a(le,Jl),a(le,Gs),a(Gs,Rl),a(le,Yl),r(e,La,t),g(Le,e,t),r(e,Na,t),r(e,ne,t),a(ne,Vl),a(ne,Bs),a(Bs,Wl),a(ne,Kl),r(e,Ma,t),g(Ne,e,t),r(e,Ua,t),r(e,ns,t),a(ns,Ql),r(e,Ga,t),g(Me,e,t),r(e,Ba,t),r(e,re,t),a(re,Xl),a(re,Ue),a(Ue,Zl),a(re,en),r(e,Ha,t),g(Ge,e,t),r(e,Ja,t),r(e,Be,t),a(Be,rs),Ra=!0},p(e,[t]){const He={};t&2&&(He.$$scope={dirty:t,ctx:e}),Y.$set(He);const Hs={};t&2&&(Hs.$$scope={dirty:t,ctx:e}),X.$set(Hs)},i(e){Ra||(_($.$$.fragment,e),_(I.$$.fragment,e),_(ie.$$.fragment,e),_(Y.$$.fragment,e),_(ce.$$.fragment,e),_(he.$$.fragment,e),_(fe.$$.fragment,e),_(ue.$$.fragment,e),_(de.$$.fragment,e),_(ge.$$.fragment,e),_(_e.$$.fragment,e),_(be.$$.fragment,e),_($e.$$.fragment,e),_(ye.$$.fragment,e),_(ke.$$.fragment,e),_(Ee.$$.fragment,e),_(xe.$$.fragment,e),_(X.$$.fragment,e),_(Pe.$$.fragment,e),_(qe.$$.fragment,e),_(Fe.$$.fragment,e),_(Ce.$$.fragment,e),_(De.$$.fragment,e),_(Ie.$$.fragment,e),_(ze.$$.fragment,e),_(Le.$$.fragment,e),_(Ne.$$.fragment,e),_(Me.$$.fragment,e),_(Ge.$$.fragment,e),Ra=!0)},o(e){b($.$$.fragment,e),b(I.$$.fragment,e),b(ie.$$.fragment,e),b(Y.$$.fragment,e),b(ce.$$.fragment,e),b(he.$$.fragment,e),b(fe.$$.fragment,e),b(ue.$$.fragment,e),b(de.$$.fragment,e),b(ge.$$.fragment,e),b(_e.$$.fragment,e),b(be.$$.fragment,e),b($e.$$.fragment,e),b(ye.$$.fragment,e),b(ke.$$.fragment,e),b(Ee.$$.fragment,e),b(xe.$$.fragment,e),b(X.$$.fragment,e),b(Pe.$$.fragment,e),b(qe.$$.fragment,e),b(Fe.$$.fragment,e),b(Ce.$$.fragment,e),b(De.$$.fragment,e),b(Ie.$$.fragment,e),b(ze.$$.fragment,e),b(Le.$$.fragment,e),b(Ne.$$.fragment,e),b(Me.$$.fragment,e),b(Ge.$$.fragment,e),Ra=!1},d(e){s(f),e&&s(S),e&&s(j),v($),e&&s(q),v(I,e),e&&s(oe),v(ie,e),e&&s(Rs),e&&s(Re),e&&s(Ys),e&&s(z),e&&s(Vs),v(Y,e),e&&s(Ws),e&&s(Ye),e&&s(Ks),v(ce,e),e&&s(Qs),e&&s(B),v(he),e&&s(Xs),e&&s(Ve),e&&s(Zs),v(fe,e),e&&s(ea),e&&s(We),e&&s(sa),v(ue,e),e&&s(aa),e&&s(Ke),e&&s(ta),v(de,e),e&&s(la),e&&s(k),e&&s(na),e&&s(O),e&&s(ra),v(ge,e),e&&s(oa),e&&s(H),v(_e),e&&s(ia),e&&s(L),e&&s(pa),v(be,e),e&&s(ma),e&&s(N),e&&s(ca),v($e,e),e&&s(ha),e&&s(E),e&&s(fa),v(ye,e),e&&s(ua),e&&s(M),e&&s(da),v(ke,e),e&&s(ga),e&&s(J),v(Ee),e&&s(_a),e&&s(Q),e&&s(ba),v(xe,e),e&&s(va),v(X,e),e&&s(ja),e&&s(x),e&&s($a),e&&s(Z),e&&s(ya),v(Pe,e),e&&s(wa),e&&s(ee),e&&s(ka),e&&s(Ze),e&&s(Ea),v(qe,e),e&&s(xa),e&&s(U),e&&s(Pa),v(Fe,e),e&&s(Ta),e&&s(se),e&&s(qa),v(Ce,e),e&&s(Sa),e&&s(ae),e&&s(Aa),v(De,e),e&&s(Fa),e&&s(R),v(Ie),e&&s(Ca),e&&s(as),e&&s(Da),e&&s(ts),e&&s(Ia),v(ze,e),e&&s(za),e&&s(Oe),e&&s(Oa),e&&s(le),e&&s(La),v(Le,e),e&&s(Na),e&&s(ne),e&&s(Ma),v(Ne,e),e&&s(Ua),e&&s(ns),e&&s(Ga),v(Me,e),e&&s(Ba),e&&s(re),e&&s(Ha),v(Ge,e),e&&s(Ja),e&&s(Be)}}}const wr={local:"semantic-segmentation",sections:[{local:"load-sceneparse150-dataset",title:"Load SceneParse150 dataset"},{local:"preprocess",title:"Preprocess"},{local:"train",title:"Train"},{local:"inference",title:"Inference"}],title:"Semantic segmentation"};function kr(Je){return _r(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Ar extends fr{constructor(f){super();ur(this,f,kr,yr,dr,{})}}export{Ar as default,wr as metadata};
27
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/tasks/question_answering.mdx-hf-doc-builder.js
import{S as Ta,i as Da,s as za,e as i,k as _,w as q,t as r,M as Ca,c as p,d as s,m as g,a as f,x as y,h as o,b as w,G as t,g as c,y as E,q as A,o as T,B as D,v as Pa,L as Aa}from"../../chunks/vendor-hf-doc-builder.js";import{T as Gt}from"../../chunks/Tip-hf-doc-builder.js";import{Y as ya}from"../../chunks/Youtube-hf-doc-builder.js";import{I as Yt}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{C as ne}from"../../chunks/CodeBlock-hf-doc-builder.js";import{F as Ea,M as Jt}from"../../chunks/Markdown-hf-doc-builder.js";function Fa(F){let a,u,n,d,j;return{c(){a=i("p"),u=r("See the question answering "),n=i("a"),d=r("task page"),j=r(" for more information about other forms of question answering and their associated models, datasets, and metrics."),this.h()},l($){a=p($,"P",{});var v=f(a);u=o(v,"See the question answering "),n=p(v,"A",{href:!0,rel:!0});var C=f(n);d=o(C,"task page"),C.forEach(s),j=o(v," for more information about other forms of question answering and their associated models, datasets, and metrics."),v.forEach(s),this.h()},h(){w(n,"href","https://huggingface.co/tasks/question-answering"),w(n,"rel","nofollow")},m($,v){c($,a,v),t(a,u),t(a,n),t(n,d),t(a,j)},d($){$&&s(a)}}}function Ma(F){let a,u;return a=new ne({props:{code:`from transformers import DefaultDataCollator data_collator = DefaultDataCollator()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DefaultDataCollator <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DefaultDataCollator()`}}),{c(){q(a.$$.fragment)},l(n){y(a.$$.fragment,n)},m(n,d){E(a,n,d),u=!0},p:Aa,i(n){u||(A(a.$$.fragment,n),u=!0)},o(n){T(a.$$.fragment,n),u=!1},d(n){D(a,n)}}}function Sa(F){let a,u;return a=new Jt({props:{$$slots:{default:[Ma]},$$scope:{ctx:F}}}),{c(){q(a.$$.fragment)},l(n){y(a.$$.fragment,n)},m(n,d){E(a,n,d),u=!0},p(n,d){const j={};d&2&&(j.$$scope={dirty:d,ctx:n}),a.$set(j)},i(n){u||(A(a.$$.fragment,n),u=!0)},o(n){T(a.$$.fragment,n),u=!1},d(n){D(a,n)}}}function La(F){let a,u;return a=new ne({props:{code:`from transformers import DefaultDataCollator data_collator = DefaultDataCollator(return_tensors="tf")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DefaultDataCollator <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DefaultDataCollator(return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)`}}),{c(){q(a.$$.fragment)},l(n){y(a.$$.fragment,n)},m(n,d){E(a,n,d),u=!0},p:Aa,i(n){u||(A(a.$$.fragment,n),u=!0)},o(n){T(a.$$.fragment,n),u=!1},d(n){D(a,n)}}}function Qa(F){let a,u;return a=new Jt({props:{$$slots:{default:[La]},$$scope:{ctx:F}}}),{c(){q(a.$$.fragment)},l(n){y(a.$$.fragment,n)},m(n,d){E(a,n,d),u=!0},p(n,d){const j={};d&2&&(j.$$scope={dirty:d,ctx:n}),a.$set(j)},i(n){u||(A(a.$$.fragment,n),u=!0)},o(n){T(a.$$.fragment,n),u=!1},d(n){D(a,n)}}}function Oa(F){let a,u,n,d,j,$,v,C;return{c(){a=i("p"),u=r("If you aren\u2019t familiar with fine-tuning a model with the "),n=i("a"),d=r("Trainer"),j=r(", take a look at the basic tutorial "),$=i("a"),v=r("here"),C=r("!"),this.h()},l(z){a=p(z,"P",{});var k=f(a);u=o(k,"If you aren\u2019t familiar with fine-tuning a model with the "),n=p(k,"A",{href:!0});var P=f(n);d=o(P,"Trainer"),P.forEach(s),j=o(k,", take a look at the basic tutorial "),$=p(k,"A",{href:!0});var M=f($);v=o(M,"here"),M.forEach(s),C=o(k,"!"),k.forEach(s),this.h()},h(){w(n,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),w($,"href","../training#finetune-with-trainer")},m(z,k){c(z,a,k),t(a,u),t(a,n),t(n,d),t(a,j),t(a,$),t($,v),t(a,C)},d(z){z&&s(a)}}}function Ia(F){let a,u,n,d,j,$,v,C,z,k,P,M,J,L,K,H,O,le,W,re,N,ie,Q,pe,B,fe,I,Z,R,oe,ce,V,Y,S;return v=new ne({props:{code:`from transformers import AutoModelForQuestionAnswering, TrainingArguments, Trainer model = AutoModelForQuestionAnswering.from_pretrained("distilbert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForQuestionAnswering, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`}}),z=new Gt({props:{$$slots:{default:[Oa]},$$scope:{ctx:F}}}),Y=new ne({props:{code:`training_args = TrainingArguments( output_dir="./results", evaluation_strategy="epoch", learning_rate=2e-5, per_device_train_batch_size=16, per_device_eval_batch_size=16, num_train_epochs=3, weight_decay=0.01, ) trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_squad["train"], eval_dataset=tokenized_squad["validation"], tokenizer=tokenizer, data_collator=data_collator, ) trainer.train()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> per_device_eval_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=tokenized_squad[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=tokenized_squad[<span class="hljs-string">&quot;validation&quot;</span>], <span class="hljs-meta">... </span> tokenizer=tokenizer, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()`}}),{c(){a=i("p"),u=r("Load DistilBERT with "),n=i("a"),d=r("AutoModelForQuestionAnswering"),j=r(":"),$=_(),q(v.$$.fragment),C=_(),q(z.$$.fragment),k=_(),P=i("p"),M=r("At this point, only three steps remain:"),J=_(),L=i("ol"),K=i("li"),H=r("Define your training hyperparameters in "),O=i("a"),le=r("TrainingArguments"),W=r("."),re=_(),N=i("li"),ie=r("Pass the training arguments to "),Q=i("a"),pe=r("Trainer"),B=r(" along with the model, dataset, tokenizer, and data collator."),fe=_(),I=i("li"),Z=r("Call "),R=i("a"),oe=r("train()"),ce=r(" to fine-tune your model."),V=_(),q(Y.$$.fragment),this.h()},l(m){a=p(m,"P",{});var b=f(a);u=o(b,"Load DistilBERT with "),n=p(b,"A",{href:!0});var G=f(n);d=o(G,"AutoModelForQuestionAnswering"),G.forEach(s),j=o(b,":"),b.forEach(s),$=g(m),y(v.$$.fragment,m),C=g(m),y(z.$$.fragment,m),k=g(m),P=p(m,"P",{});var te=f(P);M=o(te,"At this point, only three steps remain:"),te.forEach(s),J=g(m),L=p(m,"OL",{});var U=f(L);K=p(U,"LI",{});var he=f(K);H=o(he,"Define your training hyperparameters in "),O=p(he,"A",{href:!0});var _e=f(O);le=o(_e,"TrainingArguments"),_e.forEach(s),W=o(he,"."),he.forEach(s),re=g(U),N=p(U,"LI",{});var X=f(N);ie=o(X,"Pass the training arguments to "),Q=p(X,"A",{href:!0});var se=f(Q);pe=o(se,"Trainer"),se.forEach(s),B=o(X," along with the model, dataset, tokenizer, and data collator."),X.forEach(s),fe=g(U),I=p(U,"LI",{});var ae=f(I);Z=o(ae,"Call "),R=p(ae,"A",{href:!0});var l=f(R);oe=o(l,"train()"),l.forEach(s),ce=o(ae," to fine-tune your model."),ae.forEach(s),U.forEach(s),V=g(m),y(Y.$$.fragment,m),this.h()},h(){w(n,"href","/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoModelForQuestionAnswering"),w(O,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments"),w(Q,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),w(R,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.train")},m(m,b){c(m,a,b),t(a,u),t(a,n),t(n,d),t(a,j),c(m,$,b),E(v,m,b),c(m,C,b),E(z,m,b),c(m,k,b),c(m,P,b),t(P,M),c(m,J,b),c(m,L,b),t(L,K),t(K,H),t(K,O),t(O,le),t(K,W),t(L,re),t(L,N),t(N,ie),t(N,Q),t(Q,pe),t(N,B),t(L,fe),t(L,I),t(I,Z),t(I,R),t(R,oe),t(I,ce),c(m,V,b),E(Y,m,b),S=!0},p(m,b){const G={};b&2&&(G.$$scope={dirty:b,ctx:m}),z.$set(G)},i(m){S||(A(v.$$.fragment,m),A(z.$$.fragment,m),A(Y.$$.fragment,m),S=!0)},o(m){T(v.$$.fragment,m),T(z.$$.fragment,m),T(Y.$$.fragment,m),S=!1},d(m){m&&s(a),m&&s($),D(v,m),m&&s(C),D(z,m),m&&s(k),m&&s(P),m&&s(J),m&&s(L),m&&s(V),D(Y,m)}}}function Ba(F){let a,u;return a=new Jt({props:{$$slots:{default:[Ia]},$$scope:{ctx:F}}}),{c(){q(a.$$.fragment)},l(n){y(a.$$.fragment,n)},m(n,d){E(a,n,d),u=!0},p(n,d){const j={};d&2&&(j.$$scope={dirty:d,ctx:n}),a.$set(j)},i(n){u||(A(a.$$.fragment,n),u=!0)},o(n){T(a.$$.fragment,n),u=!1},d(n){D(a,n)}}}function Na(F){let a,u,n,d,j;return{c(){a=i("p"),u=r("If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),n=i("a"),d=r("here"),j=r("!"),this.h()},l($){a=p($,"P",{});var v=f(a);u=o(v,"If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),n=p(v,"A",{href:!0});var C=f(n);d=o(C,"here"),C.forEach(s),j=o(v,"!"),v.forEach(s),this.h()},h(){w(n,"href","training#finetune-with-keras")},m($,v){c($,a,v),t(a,u),t(a,n),t(n,d),t(a,j)},d($){$&&s(a)}}}function Ra(F){let a,u,n,d,j,$,v,C,z,k,P,M,J,L,K,H,O,le,W,re,N,ie,Q,pe,B,fe,I,Z,R,oe,ce,V,Y,S,m,b,G,te,U,he,_e,X,se,ae;return k=new ne({props:{code:`tf_train_set = model.prepare_tf_dataset( tokenized_squad["train"], shuffle=True, batch_size=16, collate_fn=data_collator, ) tf_validation_set = model.prepare_tf_dataset( tokenized_squad["validation"], shuffle=False, batch_size=16, collate_fn=data_collator, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> tokenized_squad[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_validation_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> tokenized_squad[<span class="hljs-string">&quot;validation&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)`}}),M=new Gt({props:{$$slots:{default:[Na]},$$scope:{ctx:F}}}),O=new ne({props:{code:`from transformers import create_optimizer batch_size = 16 num_epochs = 2 total_train_steps = (len(tokenized_squad["train"]) // batch_size) * num_epochs optimizer, schedule = create_optimizer( init_lr=2e-5, num_warmup_steps=0, num_train_steps=total_train_steps, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer <span class="hljs-meta">&gt;&gt;&gt; </span>batch_size = <span class="hljs-number">16</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_epochs = <span class="hljs-number">2</span> <span class="hljs-meta">&gt;&gt;&gt; </span>total_train_steps = (<span class="hljs-built_in">len</span>(tokenized_squad[<span class="hljs-string">&quot;train&quot;</span>]) // batch_size) * num_epochs <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer, schedule = create_optimizer( <span class="hljs-meta">... </span> init_lr=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> num_warmup_steps=<span class="hljs-number">0</span>, <span class="hljs-meta">... </span> num_train_steps=total_train_steps, <span class="hljs-meta">... </span>)`}}),B=new ne({props:{code:`from transformers import TFAutoModelForQuestionAnswering model = TFAutoModelForQuestionAnswering("distilbert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForQuestionAnswering(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`}}),S=new ne({props:{code:`import tensorflow as tf model.compile(optimizer=optimizer)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)`}}),se=new ne({props:{code:"model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=3)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=<span class="hljs-number">3</span>)'}}),{c(){a=i("p"),u=r("To fine-tune a model in TensorFlow, start by converting your datasets to the "),n=i("code"),d=r("tf.data.Dataset"),j=r(" format with "),$=i("a"),v=r("prepare_tf_dataset()"),C=r("."),z=_(),q(k.$$.fragment),P=_(),q(M.$$.fragment),J=_(),L=i("p"),K=r("Set up an optimizer function, learning rate schedule, and some training hyperparameters:"),H=_(),q(O.$$.fragment),le=_(),W=i("p"),re=r("Load DistilBERT with "),N=i("a"),ie=r("TFAutoModelForQuestionAnswering"),Q=r(":"),pe=_(),q(B.$$.fragment),fe=_(),I=i("p"),Z=r("Configure the model for training with "),R=i("a"),oe=i("code"),ce=r("compile"),V=r(":"),Y=_(),q(S.$$.fragment),m=_(),b=i("p"),G=r("Call "),te=i("a"),U=i("code"),he=r("fit"),_e=r(" to fine-tune the model:"),X=_(),q(se.$$.fragment),this.h()},l(l){a=p(l,"P",{});var x=f(a);u=o(x,"To fine-tune a model in TensorFlow, start by converting your datasets to the "),n=p(x,"CODE",{});var me=f(n);d=o(me,"tf.data.Dataset"),me.forEach(s),j=o(x," format with "),$=p(x,"A",{href:!0});var Ge=f($);v=o(Ge,"prepare_tf_dataset()"),Ge.forEach(s),C=o(x,"."),x.forEach(s),z=g(l),y(k.$$.fragment,l),P=g(l),y(M.$$.fragment,l),J=g(l),L=p(l,"P",{});var Le=f(L);K=o(Le,"Set up an optimizer function, learning rate schedule, and some training hyperparameters:"),Le.forEach(s),H=g(l),y(O.$$.fragment,l),le=g(l),W=p(l,"P",{});var ue=f(W);re=o(ue,"Load DistilBERT with "),N=p(ue,"A",{href:!0});var Qe=f(N);ie=o(Qe,"TFAutoModelForQuestionAnswering"),Qe.forEach(s),Q=o(ue,":"),ue.forEach(s),pe=g(l),y(B.$$.fragment,l),fe=g(l),I=p(l,"P",{});var ee=f(I);Z=o(ee,"Configure the model for training with "),R=p(ee,"A",{href:!0,rel:!0});var Je=f(R);oe=p(Je,"CODE",{});var qe=f(oe);ce=o(qe,"compile"),qe.forEach(s),Je.forEach(s),V=o(ee,":"),ee.forEach(s),Y=g(l),y(S.$$.fragment,l),m=g(l),b=p(l,"P",{});var ye=f(b);G=o(ye,"Call "),te=p(ye,"A",{href:!0,rel:!0});var Ke=f(te);U=p(Ke,"CODE",{});var Ee=f(U);he=o(Ee,"fit"),Ee.forEach(s),Ke.forEach(s),_e=o(ye," to fine-tune the model:"),ye.forEach(s),X=g(l),y(se.$$.fragment,l),this.h()},h(){w($,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset"),w(N,"href","/docs/transformers/pr_19429/en/model_doc/auto#transformers.TFAutoModelForQuestionAnswering"),w(R,"href","https://keras.io/api/models/model_training_apis/#compile-method"),w(R,"rel","nofollow"),w(te,"href","https://keras.io/api/models/model_training_apis/#fit-method"),w(te,"rel","nofollow")},m(l,x){c(l,a,x),t(a,u),t(a,n),t(n,d),t(a,j),t(a,$),t($,v),t(a,C),c(l,z,x),E(k,l,x),c(l,P,x),E(M,l,x),c(l,J,x),c(l,L,x),t(L,K),c(l,H,x),E(O,l,x),c(l,le,x),c(l,W,x),t(W,re),t(W,N),t(N,ie),t(W,Q),c(l,pe,x),E(B,l,x),c(l,fe,x),c(l,I,x),t(I,Z),t(I,R),t(R,oe),t(oe,ce),t(I,V),c(l,Y,x),E(S,l,x),c(l,m,x),c(l,b,x),t(b,G),t(b,te),t(te,U),t(U,he),t(b,_e),c(l,X,x),E(se,l,x),ae=!0},p(l,x){const me={};x&2&&(me.$$scope={dirty:x,ctx:l}),M.$set(me)},i(l){ae||(A(k.$$.fragment,l),A(M.$$.fragment,l),A(O.$$.fragment,l),A(B.$$.fragment,l),A(S.$$.fragment,l),A(se.$$.fragment,l),ae=!0)},o(l){T(k.$$.fragment,l),T(M.$$.fragment,l),T(O.$$.fragment,l),T(B.$$.fragment,l),T(S.$$.fragment,l),T(se.$$.fragment,l),ae=!1},d(l){l&&s(a),l&&s(z),D(k,l),l&&s(P),D(M,l),l&&s(J),l&&s(L),l&&s(H),D(O,l),l&&s(le),l&&s(W),l&&s(pe),D(B,l),l&&s(fe),l&&s(I),l&&s(Y),D(S,l),l&&s(m),l&&s(b),l&&s(X),D(se,l)}}}function Ua(F){let a,u;return a=new Jt({props:{$$slots:{default:[Ra]},$$scope:{ctx:F}}}),{c(){q(a.$$.fragment)},l(n){y(a.$$.fragment,n)},m(n,d){E(a,n,d),u=!0},p(n,d){const j={};d&2&&(j.$$scope={dirty:d,ctx:n}),a.$set(j)},i(n){u||(A(a.$$.fragment,n),u=!0)},o(n){T(a.$$.fragment,n),u=!1},d(n){D(a,n)}}}function Ha(F){let a,u,n,d,j,$,v,C;return{c(){a=i("p"),u=r(`For a more in-depth example of how to fine-tune a model for question answering, take a look at the corresponding `),n=i("a"),d=r("PyTorch notebook"),j=r(` or `),$=i("a"),v=r("TensorFlow notebook"),C=r("."),this.h()},l(z){a=p(z,"P",{});var k=f(a);u=o(k,`For a more in-depth example of how to fine-tune a model for question answering, take a look at the corresponding `),n=p(k,"A",{href:!0,rel:!0});var P=f(n);d=o(P,"PyTorch notebook"),P.forEach(s),j=o(k,` or `),$=p(k,"A",{href:!0,rel:!0});var M=f($);v=o(M,"TensorFlow notebook"),M.forEach(s),C=o(k,"."),k.forEach(s),this.h()},h(){w(n,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb"),w(n,"rel","nofollow"),w($,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb"),w($,"rel","nofollow")},m(z,k){c(z,a,k),t(a,u),t(a,n),t(n,d),t(a,j),t(a,$),t($,v),t(a,C)},d(z){z&&s(a)}}}function Va(F){let a,u,n,d,j,$,v,C,z,k,P,M,J,L,K,H,O,le,W,re,N,ie,Q,pe,B,fe,I,Z,R,oe,ce,V,Y,S,m,b,G,te,U,he,_e,X,se,ae,l,x,me,Ge,Le,ue,Qe,ee,Je,qe,ye,Ke,Ee,Kt,Wt,yt,xe,Ae,rt,Oe,Xt,ot,Zt,Et,Ie,At,we,es,lt,ts,ss,it,as,ns,Tt,Be,Dt,We,rs,zt,je,ge,os,pt,ls,is,ft,ps,fs,ct,cs,hs,ms,ke,us,ht,ds,_s,mt,gs,$s,ws,$e,js,Ne,ut,vs,xs,dt,ks,bs,_t,qs,ys,Ct,Te,Es,gt,As,Ts,Pt,Re,Ft,de,Ds,Ue,zs,Cs,$t,Ps,Fs,wt,Ms,Ss,Mt,He,St,ve,Ls,Xe,Qs,Os,jt,Is,Bs,Lt,De,Qt,be,ze,vt,Ve,Ns,xt,Rs,Ot,Ce,It,Pe,Bt;return $=new Yt({}),P=new ya({props:{id:"ajPx5LwJD-I"}}),V=new Gt({props:{$$slots:{default:[Fa]},$$scope:{ctx:F}}}),G=new Yt({}),l=new ne({props:{code:`from datasets import load_dataset squad = load_dataset("squad")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>squad = load_dataset(<span class="hljs-string">&quot;squad&quot;</span>)`}}),ue=new ne({props:{code:'squad["train"][0]',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>squad[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;answers&#x27;</span>: {<span class="hljs-string">&#x27;answer_start&#x27;</span>: [<span class="hljs-number">515</span>], <span class="hljs-string">&#x27;text&#x27;</span>: [<span class="hljs-string">&#x27;Saint Bernadette Soubirous&#x27;</span>]}, <span class="hljs-string">&#x27;context&#x27;</span>: <span class="hljs-string">&#x27;Architecturally, the school has a Catholic character. Atop the Main Building\\&#x27;s gold dome is a golden statue of the Virgin Mary. Immediately in front of the Main Building and facing it, is a copper statue of Christ with arms upraised with the legend &quot;Venite Ad Me Omnes&quot;. Next to the Main Building is the Basilica of the Sacred Heart. Immediately behind the basilica is the Grotto, a Marian place of prayer and reflection. It is a replica of the grotto at Lourdes, France where the Virgin Mary reputedly appeared to Saint Bernadette Soubirous in 1858. At the end of the main drive (and in a direct line that connects through 3 statues and the Gold Dome), is a simple, modern stone statue of Mary.&#x27;</span>, <span class="hljs-string">&#x27;id&#x27;</span>: <span class="hljs-string">&#x27;5733be284776f41900661182&#x27;</span>, <span class="hljs-string">&#x27;question&#x27;</span>: <span class="hljs-string">&#x27;To whom did the Virgin Mary allegedly appear in 1858 in Lourdes France?&#x27;</span>, <span class="hljs-string">&#x27;title&#x27;</span>: <span class="hljs-string">&#x27;University_of_Notre_Dame&#x27;</span> }`}}),Oe=new Yt({}),Ie=new ya({props:{id:"qgaM0weJHpA"}}),Be=new ne({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`}}),Re=new ne({props:{code:`def preprocess_function(examples): questions = [q.strip() for q in examples["question"]] inputs = tokenizer( questions, examples["context"], max_length=384, truncation="only_second", return_offsets_mapping=True, padding="max_length", ) offset_mapping = inputs.pop("offset_mapping") answers = examples["answers"] start_positions = [] end_positions = [] for i, offset in enumerate(offset_mapping): answer = answers[i] start_char = answer["answer_start"][0] end_char = answer["answer_start"][0] + len(answer["text"][0]) sequence_ids = inputs.sequence_ids(i) # Find the start and end of the context idx = 0 while sequence_ids[idx] != 1: idx += 1 context_start = idx while sequence_ids[idx] == 1: idx += 1 context_end = idx - 1 # If the answer is not fully inside the context, label it (0, 0) if offset[context_start][0] > end_char or offset[context_end][1] < start_char: start_positions.append(0) end_positions.append(0) else: # Otherwise it's the start and end token positions idx = context_start while idx <= context_end and offset[idx][0] <= start_char: idx += 1 start_positions.append(idx - 1) idx = context_end while idx >= context_start and offset[idx][1] >= end_char: idx -= 1 end_positions.append(idx + 1) inputs["start_positions"] = start_positions inputs["end_positions"] = end_positions return inputs`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> questions = [q.strip() <span class="hljs-keyword">for</span> q <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;question&quot;</span>]] <span class="hljs-meta">... </span> inputs = tokenizer( <span class="hljs-meta">... </span> questions, <span class="hljs-meta">... </span> examples[<span class="hljs-string">&quot;context&quot;</span>], <span class="hljs-meta">... </span> max_length=<span class="hljs-number">384</span>, <span class="hljs-meta">... </span> truncation=<span class="hljs-string">&quot;only_second&quot;</span>, <span class="hljs-meta">... </span> return_offsets_mapping=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> padding=<span class="hljs-string">&quot;max_length&quot;</span>, <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span> offset_mapping = inputs.pop(<span class="hljs-string">&quot;offset_mapping&quot;</span>) <span class="hljs-meta">... </span> answers = examples[<span class="hljs-string">&quot;answers&quot;</span>] <span class="hljs-meta">... </span> start_positions = [] <span class="hljs-meta">... </span> end_positions = [] <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> i, offset <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(offset_mapping): <span class="hljs-meta">... </span> answer = answers[i] <span class="hljs-meta">... </span> start_char = answer[<span class="hljs-string">&quot;answer_start&quot;</span>][<span class="hljs-number">0</span>] <span class="hljs-meta">... </span> end_char = answer[<span class="hljs-string">&quot;answer_start&quot;</span>][<span class="hljs-number">0</span>] + <span class="hljs-built_in">len</span>(answer[<span class="hljs-string">&quot;text&quot;</span>][<span class="hljs-number">0</span>]) <span class="hljs-meta">... </span> sequence_ids = inputs.sequence_ids(i) <span class="hljs-meta">... </span> <span class="hljs-comment"># Find the start and end of the context</span> <span class="hljs-meta">... </span> idx = <span class="hljs-number">0</span> <span class="hljs-meta">... </span> <span class="hljs-keyword">while</span> sequence_ids[idx] != <span class="hljs-number">1</span>: <span class="hljs-meta">... </span> idx += <span class="hljs-number">1</span> <span class="hljs-meta">... </span> context_start = idx <span class="hljs-meta">... </span> <span class="hljs-keyword">while</span> sequence_ids[idx] == <span class="hljs-number">1</span>: <span class="hljs-meta">... </span> idx += <span class="hljs-number">1</span> <span class="hljs-meta">... </span> context_end = idx - <span class="hljs-number">1</span> <span class="hljs-meta">... </span> <span class="hljs-comment"># If the answer is not fully inside the context, label it (0, 0)</span> <span class="hljs-meta">... </span> <span class="hljs-keyword">if</span> offset[context_start][<span class="hljs-number">0</span>] &gt; end_char <span class="hljs-keyword">or</span> offset[context_end][<span class="hljs-number">1</span>] &lt; start_char: <span class="hljs-meta">... </span> start_positions.append(<span class="hljs-number">0</span>) <span class="hljs-meta">... </span> end_positions.append(<span class="hljs-number">0</span>) <span class="hljs-meta">... </span> <span class="hljs-keyword">else</span>: <span class="hljs-meta">... </span> <span class="hljs-comment"># Otherwise it&#x27;s the start and end token positions</span> <span class="hljs-meta">... </span> idx = context_start <span class="hljs-meta">... </span> <span class="hljs-keyword">while</span> idx &lt;= context_end <span class="hljs-keyword">and</span> offset[idx][<span class="hljs-number">0</span>] &lt;= start_char: <span class="hljs-meta">... </span> idx += <span class="hljs-number">1</span> <span class="hljs-meta">... </span> start_positions.append(idx - <span class="hljs-number">1</span>) <span class="hljs-meta">... </span> idx = context_end <span class="hljs-meta">... </span> <span class="hljs-keyword">while</span> idx &gt;= context_start <span class="hljs-keyword">and</span> offset[idx][<span class="hljs-number">1</span>] &gt;= end_char: <span class="hljs-meta">... </span> idx -= <span class="hljs-number">1</span> <span class="hljs-meta">... </span> end_positions.append(idx + <span class="hljs-number">1</span>) <span class="hljs-meta">... </span> inputs[<span class="hljs-string">&quot;start_positions&quot;</span>] = start_positions <span class="hljs-meta">... </span> inputs[<span class="hljs-string">&quot;end_positions&quot;</span>] = end_positions <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> inputs`}}),He=new ne({props:{code:'tokenized_squad = squad.map(preprocess_function, batched=True, remove_columns=squad["train"].column_names)',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_squad = squad.<span class="hljs-built_in">map</span>(preprocess_function, batched=<span class="hljs-literal">True</span>, remove_columns=squad[<span class="hljs-string">&quot;train&quot;</span>].column_names)'}}),De=new Ea({props:{pytorch:!0,tensorflow:!0,jax:!1,$$slots:{tensorflow:[Qa],pytorch:[Sa]},$$scope:{ctx:F}}}),Ve=new Yt({}),Ce=new Ea({props:{pytorch:!0,tensorflow:!0,jax:!1,$$slots:{tensorflow:[Ua],pytorch:[Ba]},$$scope:{ctx:F}}}),Pe=new Gt({props:{$$slots:{default:[Ha]},$$scope:{ctx:F}}}),{c(){a=i("meta"),u=_(),n=i("h1"),d=i("a"),j=i("span"),q($.$$.fragment),v=_(),C=i("span"),z=r("Question answering"),k=_(),q(P.$$.fragment),M=_(),J=i("p"),L=r("Question answering tasks return an answer given a question. There are two common forms of question answering:"),K=_(),H=i("ul"),O=i("li"),le=r("Extractive: extract the answer from the given context."),W=_(),re=i("li"),N=r("Abstractive: generate an answer from the context that correctly answers the question."),ie=_(),Q=i("p"),pe=r("This guide will show you how to fine-tune "),B=i("a"),fe=r("DistilBERT"),I=r(" on the "),Z=i("a"),R=r("SQuAD"),oe=r(" dataset for extractive question answering."),ce=_(),q(V.$$.fragment),Y=_(),S=i("h2"),m=i("a"),b=i("span"),q(G.$$.fragment),te=_(),U=i("span"),he=r("Load SQuAD dataset"),_e=_(),X=i("p"),se=r("Load the SQuAD dataset from the \u{1F917} Datasets library:"),ae=_(),q(l.$$.fragment),x=_(),me=i("p"),Ge=r("Then take a look at an example:"),Le=_(),q(ue.$$.fragment),Qe=_(),ee=i("p"),Je=r("The "),qe=i("code"),ye=r("answers"),Ke=r(" field is a dictionary containing the starting position of the answer and the "),Ee=i("code"),Kt=r("text"),Wt=r(" of the answer."),yt=_(),xe=i("h2"),Ae=i("a"),rt=i("span"),q(Oe.$$.fragment),Xt=_(),ot=i("span"),Zt=r("Preprocess"),Et=_(),q(Ie.$$.fragment),At=_(),we=i("p"),es=r("Load the DistilBERT tokenizer to process the "),lt=i("code"),ts=r("question"),ss=r(" and "),it=i("code"),as=r("context"),ns=r(" fields:"),Tt=_(),q(Be.$$.fragment),Dt=_(),We=i("p"),rs=r("There are a few preprocessing steps particular to question answering that you should be aware of:"),zt=_(),je=i("ol"),ge=i("li"),os=r("Some examples in a dataset may have a very long "),pt=i("code"),ls=r("context"),is=r(" that exceeds the maximum input length of the model. Truncate only the "),ft=i("code"),ps=r("context"),fs=r(" by setting "),ct=i("code"),cs=r('truncation="only_second"'),hs=r("."),ms=_(),ke=i("li"),us=r("Next, map the start and end positions of the answer to the original "),ht=i("code"),ds=r("context"),_s=r(` by setting `),mt=i("code"),gs=r("return_offset_mapping=True"),$s=r("."),ws=_(),$e=i("li"),js=r("With the mapping in hand, you can find the start and end tokens of the answer. Use the "),Ne=i("a"),ut=i("code"),vs=r("sequence_ids"),xs=r(` method to find which part of the offset corresponds to the `),dt=i("code"),ks=r("question"),bs=r(" and which corresponds to the "),_t=i("code"),qs=r("context"),ys=r("."),Ct=_(),Te=i("p"),Es=r("Here is how you can create a function to truncate and map the start and end tokens of the answer to the "),gt=i("code"),As=r("context"),Ts=r(":"),Pt=_(),q(Re.$$.fragment),Ft=_(),de=i("p"),Ds=r("Use \u{1F917} Datasets "),Ue=i("a"),zs=r("map"),Cs=r(" function to apply the preprocessing function over the entire dataset. You can speed up the "),$t=i("code"),Ps=r("map"),Fs=r(" function by setting "),wt=i("code"),Ms=r("batched=True"),Ss=r(" to process multiple elements of the dataset at once. Remove the columns you don\u2019t need:"),Mt=_(),q(He.$$.fragment),St=_(),ve=i("p"),Ls=r("Use "),Xe=i("a"),Qs=r("DefaultDataCollator"),Os=r(" to create a batch of examples. Unlike other data collators in \u{1F917} Transformers, the "),jt=i("code"),Is=r("DefaultDataCollator"),Bs=r(" does not apply additional preprocessing such as padding."),Lt=_(),q(De.$$.fragment),Qt=_(),be=i("h2"),ze=i("a"),vt=i("span"),q(Ve.$$.fragment),Ns=_(),xt=i("span"),Rs=r("Train"),Ot=_(),q(Ce.$$.fragment),It=_(),q(Pe.$$.fragment),this.h()},l(e){const h=Ca('[data-svelte="svelte-1phssyn"]',document.head);a=p(h,"META",{name:!0,content:!0}),h.forEach(s),u=g(e),n=p(e,"H1",{class:!0});var Ye=f(n);d=p(Ye,"A",{id:!0,class:!0,href:!0});var kt=f(d);j=p(kt,"SPAN",{});var bt=f(j);y($.$$.fragment,bt),bt.forEach(s),kt.forEach(s),v=g(Ye),C=p(Ye,"SPAN",{});var qt=f(C);z=o(qt,"Question answering"),qt.forEach(s),Ye.forEach(s),k=g(e),y(P.$$.fragment,e),M=g(e),J=p(e,"P",{});var Us=f(J);L=o(Us,"Question answering tasks return an answer given a question. There are two common forms of question answering:"),Us.forEach(s),K=g(e),H=p(e,"UL",{});var Nt=f(H);O=p(Nt,"LI",{});var Hs=f(O);le=o(Hs,"Extractive: extract the answer from the given context."),Hs.forEach(s),W=g(Nt),re=p(Nt,"LI",{});var Vs=f(re);N=o(Vs,"Abstractive: generate an answer from the context that correctly answers the question."),Vs.forEach(s),Nt.forEach(s),ie=g(e),Q=p(e,"P",{});var Ze=f(Q);pe=o(Ze,"This guide will show you how to fine-tune "),B=p(Ze,"A",{href:!0,rel:!0});var Ys=f(B);fe=o(Ys,"DistilBERT"),Ys.forEach(s),I=o(Ze," on the "),Z=p(Ze,"A",{href:!0,rel:!0});var Gs=f(Z);R=o(Gs,"SQuAD"),Gs.forEach(s),oe=o(Ze," dataset for extractive question answering."),Ze.forEach(s),ce=g(e),y(V.$$.fragment,e),Y=g(e),S=p(e,"H2",{class:!0});var Rt=f(S);m=p(Rt,"A",{id:!0,class:!0,href:!0});var Js=f(m);b=p(Js,"SPAN",{});var Ks=f(b);y(G.$$.fragment,Ks),Ks.forEach(s),Js.forEach(s),te=g(Rt),U=p(Rt,"SPAN",{});var Ws=f(U);he=o(Ws,"Load SQuAD dataset"),Ws.forEach(s),Rt.forEach(s),_e=g(e),X=p(e,"P",{});var Xs=f(X);se=o(Xs,"Load the SQuAD dataset from the \u{1F917} Datasets library:"),Xs.forEach(s),ae=g(e),y(l.$$.fragment,e),x=g(e),me=p(e,"P",{});var Zs=f(me);Ge=o(Zs,"Then take a look at an example:"),Zs.forEach(s),Le=g(e),y(ue.$$.fragment,e),Qe=g(e),ee=p(e,"P",{});var et=f(ee);Je=o(et,"The "),qe=p(et,"CODE",{});var ea=f(qe);ye=o(ea,"answers"),ea.forEach(s),Ke=o(et," field is a dictionary containing the starting position of the answer and the "),Ee=p(et,"CODE",{});var ta=f(Ee);Kt=o(ta,"text"),ta.forEach(s),Wt=o(et," of the answer."),et.forEach(s),yt=g(e),xe=p(e,"H2",{class:!0});var Ut=f(xe);Ae=p(Ut,"A",{id:!0,class:!0,href:!0});var sa=f(Ae);rt=p(sa,"SPAN",{});var aa=f(rt);y(Oe.$$.fragment,aa),aa.forEach(s),sa.forEach(s),Xt=g(Ut),ot=p(Ut,"SPAN",{});var na=f(ot);Zt=o(na,"Preprocess"),na.forEach(s),Ut.forEach(s),Et=g(e),y(Ie.$$.fragment,e),At=g(e),we=p(e,"P",{});var tt=f(we);es=o(tt,"Load the DistilBERT tokenizer to process the "),lt=p(tt,"CODE",{});var ra=f(lt);ts=o(ra,"question"),ra.forEach(s),ss=o(tt," and "),it=p(tt,"CODE",{});var oa=f(it);as=o(oa,"context"),oa.forEach(s),ns=o(tt," fields:"),tt.forEach(s),Tt=g(e),y(Be.$$.fragment,e),Dt=g(e),We=p(e,"P",{});var la=f(We);rs=o(la,"There are a few preprocessing steps particular to question answering that you should be aware of:"),la.forEach(s),zt=g(e),je=p(e,"OL",{});var st=f(je);ge=p(st,"LI",{});var Fe=f(ge);os=o(Fe,"Some examples in a dataset may have a very long "),pt=p(Fe,"CODE",{});var ia=f(pt);ls=o(ia,"context"),ia.forEach(s),is=o(Fe," that exceeds the maximum input length of the model. Truncate only the "),ft=p(Fe,"CODE",{});var pa=f(ft);ps=o(pa,"context"),pa.forEach(s),fs=o(Fe," by setting "),ct=p(Fe,"CODE",{});var fa=f(ct);cs=o(fa,'truncation="only_second"'),fa.forEach(s),hs=o(Fe,"."),Fe.forEach(s),ms=g(st),ke=p(st,"LI",{});var at=f(ke);us=o(at,"Next, map the start and end positions of the answer to the original "),ht=p(at,"CODE",{});var ca=f(ht);ds=o(ca,"context"),ca.forEach(s),_s=o(at,` by setting `),mt=p(at,"CODE",{});var ha=f(mt);gs=o(ha,"return_offset_mapping=True"),ha.forEach(s),$s=o(at,"."),at.forEach(s),ws=g(st),$e=p(st,"LI",{});var Me=f($e);js=o(Me,"With the mapping in hand, you can find the start and end tokens of the answer. Use the "),Ne=p(Me,"A",{href:!0,rel:!0});var ma=f(Ne);ut=p(ma,"CODE",{});var ua=f(ut);vs=o(ua,"sequence_ids"),ua.forEach(s),ma.forEach(s),xs=o(Me,` method to find which part of the offset corresponds to the `),dt=p(Me,"CODE",{});var da=f(dt);ks=o(da,"question"),da.forEach(s),bs=o(Me," and which corresponds to the "),_t=p(Me,"CODE",{});var _a=f(_t);qs=o(_a,"context"),_a.forEach(s),ys=o(Me,"."),Me.forEach(s),st.forEach(s),Ct=g(e),Te=p(e,"P",{});var Ht=f(Te);Es=o(Ht,"Here is how you can create a function to truncate and map the start and end tokens of the answer to the "),gt=p(Ht,"CODE",{});var ga=f(gt);As=o(ga,"context"),ga.forEach(s),Ts=o(Ht,":"),Ht.forEach(s),Pt=g(e),y(Re.$$.fragment,e),Ft=g(e),de=p(e,"P",{});var Se=f(de);Ds=o(Se,"Use \u{1F917} Datasets "),Ue=p(Se,"A",{href:!0,rel:!0});var $a=f(Ue);zs=o($a,"map"),$a.forEach(s),Cs=o(Se," function to apply the preprocessing function over the entire dataset. You can speed up the "),$t=p(Se,"CODE",{});var wa=f($t);Ps=o(wa,"map"),wa.forEach(s),Fs=o(Se," function by setting "),wt=p(Se,"CODE",{});var ja=f(wt);Ms=o(ja,"batched=True"),ja.forEach(s),Ss=o(Se," to process multiple elements of the dataset at once. Remove the columns you don\u2019t need:"),Se.forEach(s),Mt=g(e),y(He.$$.fragment,e),St=g(e),ve=p(e,"P",{});var nt=f(ve);Ls=o(nt,"Use "),Xe=p(nt,"A",{href:!0});var va=f(Xe);Qs=o(va,"DefaultDataCollator"),va.forEach(s),Os=o(nt," to create a batch of examples. Unlike other data collators in \u{1F917} Transformers, the "),jt=p(nt,"CODE",{});var xa=f(jt);Is=o(xa,"DefaultDataCollator"),xa.forEach(s),Bs=o(nt," does not apply additional preprocessing such as padding."),nt.forEach(s),Lt=g(e),y(De.$$.fragment,e),Qt=g(e),be=p(e,"H2",{class:!0});var Vt=f(be);ze=p(Vt,"A",{id:!0,class:!0,href:!0});var ka=f(ze);vt=p(ka,"SPAN",{});var ba=f(vt);y(Ve.$$.fragment,ba),ba.forEach(s),ka.forEach(s),Ns=g(Vt),xt=p(Vt,"SPAN",{});var qa=f(xt);Rs=o(qa,"Train"),qa.forEach(s),Vt.forEach(s),Ot=g(e),y(Ce.$$.fragment,e),It=g(e),y(Pe.$$.fragment,e),this.h()},h(){w(a,"name","hf:doc:metadata"),w(a,"content",JSON.stringify(Ya)),w(d,"id","question-answering"),w(d,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),w(d,"href","#question-answering"),w(n,"class","relative group"),w(B,"href","https://huggingface.co/distilbert-base-uncased"),w(B,"rel","nofollow"),w(Z,"href","https://huggingface.co/datasets/squad"),w(Z,"rel","nofollow"),w(m,"id","load-squad-dataset"),w(m,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),w(m,"href","#load-squad-dataset"),w(S,"class","relative group"),w(Ae,"id","preprocess"),w(Ae,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),w(Ae,"href","#preprocess"),w(xe,"class","relative group"),w(Ne,"href","https://huggingface.co/docs/tokenizers/python/latest/api/reference.html#tokenizers.Encoding.sequence_ids"),w(Ne,"rel","nofollow"),w(Ue,"href","https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.map"),w(Ue,"rel","nofollow"),w(Xe,"href","/docs/transformers/pr_19429/en/main_classes/data_collator#transformers.DefaultDataCollator"),w(ze,"id","train"),w(ze,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),w(ze,"href","#train"),w(be,"class","relative group")},m(e,h){t(document.head,a),c(e,u,h),c(e,n,h),t(n,d),t(d,j),E($,j,null),t(n,v),t(n,C),t(C,z),c(e,k,h),E(P,e,h),c(e,M,h),c(e,J,h),t(J,L),c(e,K,h),c(e,H,h),t(H,O),t(O,le),t(H,W),t(H,re),t(re,N),c(e,ie,h),c(e,Q,h),t(Q,pe),t(Q,B),t(B,fe),t(Q,I),t(Q,Z),t(Z,R),t(Q,oe),c(e,ce,h),E(V,e,h),c(e,Y,h),c(e,S,h),t(S,m),t(m,b),E(G,b,null),t(S,te),t(S,U),t(U,he),c(e,_e,h),c(e,X,h),t(X,se),c(e,ae,h),E(l,e,h),c(e,x,h),c(e,me,h),t(me,Ge),c(e,Le,h),E(ue,e,h),c(e,Qe,h),c(e,ee,h),t(ee,Je),t(ee,qe),t(qe,ye),t(ee,Ke),t(ee,Ee),t(Ee,Kt),t(ee,Wt),c(e,yt,h),c(e,xe,h),t(xe,Ae),t(Ae,rt),E(Oe,rt,null),t(xe,Xt),t(xe,ot),t(ot,Zt),c(e,Et,h),E(Ie,e,h),c(e,At,h),c(e,we,h),t(we,es),t(we,lt),t(lt,ts),t(we,ss),t(we,it),t(it,as),t(we,ns),c(e,Tt,h),E(Be,e,h),c(e,Dt,h),c(e,We,h),t(We,rs),c(e,zt,h),c(e,je,h),t(je,ge),t(ge,os),t(ge,pt),t(pt,ls),t(ge,is),t(ge,ft),t(ft,ps),t(ge,fs),t(ge,ct),t(ct,cs),t(ge,hs),t(je,ms),t(je,ke),t(ke,us),t(ke,ht),t(ht,ds),t(ke,_s),t(ke,mt),t(mt,gs),t(ke,$s),t(je,ws),t(je,$e),t($e,js),t($e,Ne),t(Ne,ut),t(ut,vs),t($e,xs),t($e,dt),t(dt,ks),t($e,bs),t($e,_t),t(_t,qs),t($e,ys),c(e,Ct,h),c(e,Te,h),t(Te,Es),t(Te,gt),t(gt,As),t(Te,Ts),c(e,Pt,h),E(Re,e,h),c(e,Ft,h),c(e,de,h),t(de,Ds),t(de,Ue),t(Ue,zs),t(de,Cs),t(de,$t),t($t,Ps),t(de,Fs),t(de,wt),t(wt,Ms),t(de,Ss),c(e,Mt,h),E(He,e,h),c(e,St,h),c(e,ve,h),t(ve,Ls),t(ve,Xe),t(Xe,Qs),t(ve,Os),t(ve,jt),t(jt,Is),t(ve,Bs),c(e,Lt,h),E(De,e,h),c(e,Qt,h),c(e,be,h),t(be,ze),t(ze,vt),E(Ve,vt,null),t(be,Ns),t(be,xt),t(xt,Rs),c(e,Ot,h),E(Ce,e,h),c(e,It,h),E(Pe,e,h),Bt=!0},p(e,[h]){const Ye={};h&2&&(Ye.$$scope={dirty:h,ctx:e}),V.$set(Ye);const kt={};h&2&&(kt.$$scope={dirty:h,ctx:e}),De.$set(kt);const bt={};h&2&&(bt.$$scope={dirty:h,ctx:e}),Ce.$set(bt);const qt={};h&2&&(qt.$$scope={dirty:h,ctx:e}),Pe.$set(qt)},i(e){Bt||(A($.$$.fragment,e),A(P.$$.fragment,e),A(V.$$.fragment,e),A(G.$$.fragment,e),A(l.$$.fragment,e),A(ue.$$.fragment,e),A(Oe.$$.fragment,e),A(Ie.$$.fragment,e),A(Be.$$.fragment,e),A(Re.$$.fragment,e),A(He.$$.fragment,e),A(De.$$.fragment,e),A(Ve.$$.fragment,e),A(Ce.$$.fragment,e),A(Pe.$$.fragment,e),Bt=!0)},o(e){T($.$$.fragment,e),T(P.$$.fragment,e),T(V.$$.fragment,e),T(G.$$.fragment,e),T(l.$$.fragment,e),T(ue.$$.fragment,e),T(Oe.$$.fragment,e),T(Ie.$$.fragment,e),T(Be.$$.fragment,e),T(Re.$$.fragment,e),T(He.$$.fragment,e),T(De.$$.fragment,e),T(Ve.$$.fragment,e),T(Ce.$$.fragment,e),T(Pe.$$.fragment,e),Bt=!1},d(e){s(a),e&&s(u),e&&s(n),D($),e&&s(k),D(P,e),e&&s(M),e&&s(J),e&&s(K),e&&s(H),e&&s(ie),e&&s(Q),e&&s(ce),D(V,e),e&&s(Y),e&&s(S),D(G),e&&s(_e),e&&s(X),e&&s(ae),D(l,e),e&&s(x),e&&s(me),e&&s(Le),D(ue,e),e&&s(Qe),e&&s(ee),e&&s(yt),e&&s(xe),D(Oe),e&&s(Et),D(Ie,e),e&&s(At),e&&s(we),e&&s(Tt),D(Be,e),e&&s(Dt),e&&s(We),e&&s(zt),e&&s(je),e&&s(Ct),e&&s(Te),e&&s(Pt),D(Re,e),e&&s(Ft),e&&s(de),e&&s(Mt),D(He,e),e&&s(St),e&&s(ve),e&&s(Lt),D(De,e),e&&s(Qt),e&&s(be),D(Ve),e&&s(Ot),D(Ce,e),e&&s(It),D(Pe,e)}}}const Ya={local:"question-answering",sections:[{local:"load-squad-dataset",title:"Load SQuAD dataset"},{local:"preprocess",title:"Preprocess"},{local:"train",title:"Train"}],title:"Question answering"};function Ga(F){return Pa(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class tn extends Ta{constructor(a){super();Da(this,a,Ga,Va,za,{})}}export{tn as default,Ya as metadata};
28
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/tasks/sequence_classification.mdx-hf-doc-builder.js
import{S as La,i as Oa,s as Wa,e as i,k as g,w as x,t as n,M as Ba,c as p,d as a,m as b,a as f,x as z,h as l,b as w,G as t,g as c,y as A,q,o as C,B as P,v as Na,L as Ma}from"../../chunks/vendor-hf-doc-builder.js";import{T as pt}from"../../chunks/Tip-hf-doc-builder.js";import{Y as Ra}from"../../chunks/Youtube-hf-doc-builder.js";import{I as At}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{C as ie}from"../../chunks/CodeBlock-hf-doc-builder.js";import{F as Ia,M as qt}from"../../chunks/Markdown-hf-doc-builder.js";function Ua(S){let s,m,r,u,$;return{c(){s=i("p"),m=n("See the text classification "),r=i("a"),u=n("task page"),$=n(" for more information about other forms of text classification and their associated models, datasets, and metrics."),this.h()},l(_){s=p(_,"P",{});var v=f(s);m=l(v,"See the text classification "),r=p(v,"A",{href:!0,rel:!0});var T=f(r);u=l(T,"task page"),T.forEach(a),$=l(v," for more information about other forms of text classification and their associated models, datasets, and metrics."),v.forEach(a),this.h()},h(){w(r,"href","https://huggingface.co/tasks/text-classification"),w(r,"rel","nofollow")},m(_,v){c(_,s,v),t(s,m),t(s,r),t(r,u),t(s,$)},d(_){_&&a(s)}}}function Ga(S){let s,m;return s=new ie({props:{code:`from transformers import DataCollatorWithPadding data_collator = DataCollatorWithPadding(tokenizer=tokenizer)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorWithPadding <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorWithPadding(tokenizer=tokenizer)`}}),{c(){x(s.$$.fragment)},l(r){z(s.$$.fragment,r)},m(r,u){A(s,r,u),m=!0},p:Ma,i(r){m||(q(s.$$.fragment,r),m=!0)},o(r){C(s.$$.fragment,r),m=!1},d(r){P(s,r)}}}function Ha(S){let s,m;return s=new qt({props:{$$slots:{default:[Ga]},$$scope:{ctx:S}}}),{c(){x(s.$$.fragment)},l(r){z(s.$$.fragment,r)},m(r,u){A(s,r,u),m=!0},p(r,u){const $={};u&2&&($.$$scope={dirty:u,ctx:r}),s.$set($)},i(r){m||(q(s.$$.fragment,r),m=!0)},o(r){C(s.$$.fragment,r),m=!1},d(r){P(s,r)}}}function Ya(S){let s,m;return s=new ie({props:{code:`from transformers import DataCollatorWithPadding data_collator = DataCollatorWithPadding(tokenizer=tokenizer, return_tensors="tf")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorWithPadding <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorWithPadding(tokenizer=tokenizer, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)`}}),{c(){x(s.$$.fragment)},l(r){z(s.$$.fragment,r)},m(r,u){A(s,r,u),m=!0},p:Ma,i(r){m||(q(s.$$.fragment,r),m=!0)},o(r){C(s.$$.fragment,r),m=!1},d(r){P(s,r)}}}function Ka(S){let s,m;return s=new qt({props:{$$slots:{default:[Ya]},$$scope:{ctx:S}}}),{c(){x(s.$$.fragment)},l(r){z(s.$$.fragment,r)},m(r,u){A(s,r,u),m=!0},p(r,u){const $={};u&2&&($.$$scope={dirty:u,ctx:r}),s.$set($)},i(r){m||(q(s.$$.fragment,r),m=!0)},o(r){C(s.$$.fragment,r),m=!1},d(r){P(s,r)}}}function Va(S){let s,m,r,u,$,_,v,T;return{c(){s=i("p"),m=n("If you aren\u2019t familiar with fine-tuning a model with the "),r=i("a"),u=n("Trainer"),$=n(", take a look at the basic tutorial "),_=i("a"),v=n("here"),T=n("!"),this.h()},l(E){s=p(E,"P",{});var j=f(s);m=l(j,"If you aren\u2019t familiar with fine-tuning a model with the "),r=p(j,"A",{href:!0});var D=f(r);u=l(D,"Trainer"),D.forEach(a),$=l(j,", take a look at the basic tutorial "),_=p(j,"A",{href:!0});var I=f(_);v=l(I,"here"),I.forEach(a),T=l(j,"!"),j.forEach(a),this.h()},h(){w(r,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),w(_,"href","../training#finetune-with-trainer")},m(E,j){c(E,s,j),t(s,m),t(s,r),t(r,u),t(s,$),t(s,_),t(_,v),t(s,T)},d(E){E&&a(s)}}}function Ja(S){let s,m,r,u,$,_,v;return{c(){s=i("p"),m=i("a"),r=n("Trainer"),u=n(" will apply dynamic padding by default when you pass "),$=i("code"),_=n("tokenizer"),v=n(" to it. In this case, you don\u2019t need to specify a data collator explicitly."),this.h()},l(T){s=p(T,"P",{});var E=f(s);m=p(E,"A",{href:!0});var j=f(m);r=l(j,"Trainer"),j.forEach(a),u=l(E," will apply dynamic padding by default when you pass "),$=p(E,"CODE",{});var D=f($);_=l(D,"tokenizer"),D.forEach(a),v=l(E," to it. In this case, you don\u2019t need to specify a data collator explicitly."),E.forEach(a),this.h()},h(){w(m,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer")},m(T,E){c(T,s,E),t(s,m),t(m,r),t(s,u),t(s,$),t($,_),t(s,v)},d(T){T&&a(s)}}}function Qa(S){let s,m,r,u,$,_,v,T,E,j,D,I,K,L,V,N,R,J,Q,ue,M,de,ae,pe,O,fe,F,X,W,G,_e,se,Z,H,B,Y;return v=new ie({props:{code:`from transformers import AutoModelForSequenceClassification, TrainingArguments, Trainer model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased", num_labels=2)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSequenceClassification, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, num_labels=<span class="hljs-number">2</span>)`}}),E=new pt({props:{$$slots:{default:[Va]},$$scope:{ctx:S}}}),Z=new ie({props:{code:`training_args = TrainingArguments( output_dir="./results", learning_rate=2e-5, per_device_train_batch_size=16, per_device_eval_batch_size=16, num_train_epochs=5, weight_decay=0.01, ) trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_imdb["train"], eval_dataset=tokenized_imdb["test"], tokenizer=tokenizer, data_collator=data_collator, ) trainer.train()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> per_device_eval_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">5</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=tokenized_imdb[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=tokenized_imdb[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> tokenizer=tokenizer, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()`}}),B=new pt({props:{$$slots:{default:[Ja]},$$scope:{ctx:S}}}),{c(){s=i("p"),m=n("Load DistilBERT with "),r=i("a"),u=n("AutoModelForSequenceClassification"),$=n(" along with the number of expected labels:"),_=g(),x(v.$$.fragment),T=g(),x(E.$$.fragment),j=g(),D=i("p"),I=n("At this point, only three steps remain:"),K=g(),L=i("ol"),V=i("li"),N=n("Define your training hyperparameters in "),R=i("a"),J=n("TrainingArguments"),Q=n("."),ue=g(),M=i("li"),de=n("Pass the training arguments to "),ae=i("a"),pe=n("Trainer"),O=n(" along with the model, dataset, tokenizer, and data collator."),fe=g(),F=i("li"),X=n("Call "),W=i("a"),G=n("train()"),_e=n(" to fine-tune your model."),se=g(),x(Z.$$.fragment),H=g(),x(B.$$.fragment),this.h()},l(h){s=p(h,"P",{});var y=f(s);m=l(y,"Load DistilBERT with "),r=p(y,"A",{href:!0});var ee=f(r);u=l(ee,"AutoModelForSequenceClassification"),ee.forEach(a),$=l(y," along with the number of expected labels:"),y.forEach(a),_=b(h),z(v.$$.fragment,h),T=b(h),z(E.$$.fragment,h),j=b(h),D=p(h,"P",{});var te=f(D);I=l(te,"At this point, only three steps remain:"),te.forEach(a),K=b(h),L=p(h,"OL",{});var re=f(L);V=p(re,"LI",{});var oe=f(V);N=l(oe,"Define your training hyperparameters in "),R=p(oe,"A",{href:!0});var U=f(R);J=l(U,"TrainingArguments"),U.forEach(a),Q=l(oe,"."),oe.forEach(a),ue=b(re),M=p(re,"LI",{});var ne=f(M);de=l(ne,"Pass the training arguments to "),ae=p(ne,"A",{href:!0});var o=f(ae);pe=l(o,"Trainer"),o.forEach(a),O=l(ne," along with the model, dataset, tokenizer, and data collator."),ne.forEach(a),fe=b(re),F=p(re,"LI",{});var k=f(F);X=l(k,"Call "),W=p(k,"A",{href:!0});var ce=f(W);G=l(ce,"train()"),ce.forEach(a),_e=l(k," to fine-tune your model."),k.forEach(a),re.forEach(a),se=b(h),z(Z.$$.fragment,h),H=b(h),z(B.$$.fragment,h),this.h()},h(){w(r,"href","/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoModelForSequenceClassification"),w(R,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments"),w(ae,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),w(W,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.train")},m(h,y){c(h,s,y),t(s,m),t(s,r),t(r,u),t(s,$),c(h,_,y),A(v,h,y),c(h,T,y),A(E,h,y),c(h,j,y),c(h,D,y),t(D,I),c(h,K,y),c(h,L,y),t(L,V),t(V,N),t(V,R),t(R,J),t(V,Q),t(L,ue),t(L,M),t(M,de),t(M,ae),t(ae,pe),t(M,O),t(L,fe),t(L,F),t(F,X),t(F,W),t(W,G),t(F,_e),c(h,se,y),A(Z,h,y),c(h,H,y),A(B,h,y),Y=!0},p(h,y){const ee={};y&2&&(ee.$$scope={dirty:y,ctx:h}),E.$set(ee);const te={};y&2&&(te.$$scope={dirty:y,ctx:h}),B.$set(te)},i(h){Y||(q(v.$$.fragment,h),q(E.$$.fragment,h),q(Z.$$.fragment,h),q(B.$$.fragment,h),Y=!0)},o(h){C(v.$$.fragment,h),C(E.$$.fragment,h),C(Z.$$.fragment,h),C(B.$$.fragment,h),Y=!1},d(h){h&&a(s),h&&a(_),P(v,h),h&&a(T),P(E,h),h&&a(j),h&&a(D),h&&a(K),h&&a(L),h&&a(se),P(Z,h),h&&a(H),P(B,h)}}}function Xa(S){let s,m;return s=new qt({props:{$$slots:{default:[Qa]},$$scope:{ctx:S}}}),{c(){x(s.$$.fragment)},l(r){z(s.$$.fragment,r)},m(r,u){A(s,r,u),m=!0},p(r,u){const $={};u&2&&($.$$scope={dirty:u,ctx:r}),s.$set($)},i(r){m||(q(s.$$.fragment,r),m=!0)},o(r){C(s.$$.fragment,r),m=!1},d(r){P(s,r)}}}function Za(S){let s,m,r,u,$;return{c(){s=i("p"),m=n("If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),r=i("a"),u=n("here"),$=n("!"),this.h()},l(_){s=p(_,"P",{});var v=f(s);m=l(v,"If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),r=p(v,"A",{href:!0});var T=f(r);u=l(T,"here"),T.forEach(a),$=l(v,"!"),v.forEach(a),this.h()},h(){w(r,"href","training#finetune-with-keras")},m(_,v){c(_,s,v),t(s,m),t(s,r),t(r,u),t(s,$)},d(_){_&&a(s)}}}function es(S){let s,m,r,u,$,_,v,T,E,j,D,I,K,L,V,N,R,J,Q,ue,M,de,ae,pe,O,fe,F,X,W,G,_e,se,Z,H,B,Y,h,y,ee,te,re,oe,U,ne;return j=new ie({props:{code:`tf_train_set = model.prepare_tf_dataset( tokenized_imdb["train"], shuffle=True, batch_size=16, collate_fn=data_collator, ) tf_validation_set = model.prepare_tf_dataset( tokenized_imdb["test"], shuffle=False, batch_size=16, collate_fn=data_collator, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> tokenized_imdb[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_validation_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> tokenized_imdb[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)`}}),I=new pt({props:{$$slots:{default:[Za]},$$scope:{ctx:S}}}),R=new ie({props:{code:`from transformers import create_optimizer import tensorflow as tf batch_size = 16 num_epochs = 5 batches_per_epoch = len(tokenized_imdb["train"]) // batch_size total_train_steps = int(batches_per_epoch * num_epochs) optimizer, schedule = create_optimizer(init_lr=2e-5, num_warmup_steps=0, num_train_steps=total_train_steps)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>batch_size = <span class="hljs-number">16</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_epochs = <span class="hljs-number">5</span> <span class="hljs-meta">&gt;&gt;&gt; </span>batches_per_epoch = <span class="hljs-built_in">len</span>(tokenized_imdb[<span class="hljs-string">&quot;train&quot;</span>]) // batch_size <span class="hljs-meta">&gt;&gt;&gt; </span>total_train_steps = <span class="hljs-built_in">int</span>(batches_per_epoch * num_epochs) <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer, schedule = create_optimizer(init_lr=<span class="hljs-number">2e-5</span>, num_warmup_steps=<span class="hljs-number">0</span>, num_train_steps=total_train_steps)`}}),O=new ie({props:{code:`from transformers import TFAutoModelForSequenceClassification model = TFAutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased", num_labels=2)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, num_labels=<span class="hljs-number">2</span>)`}}),H=new ie({props:{code:`import tensorflow as tf model.compile(optimizer=optimizer)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)`}}),U=new ie({props:{code:"model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=3)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=<span class="hljs-number">3</span>)'}}),{c(){s=i("p"),m=n("To fine-tune a model in TensorFlow, start by converting your datasets to the "),r=i("code"),u=n("tf.data.Dataset"),$=n(" format with "),_=i("a"),v=n("prepare_tf_dataset()"),T=n("."),E=g(),x(j.$$.fragment),D=g(),x(I.$$.fragment),K=g(),L=i("p"),V=n("Set up an optimizer function, learning rate schedule, and some training hyperparameters:"),N=g(),x(R.$$.fragment),J=g(),Q=i("p"),ue=n("Load DistilBERT with "),M=i("a"),de=n("TFAutoModelForSequenceClassification"),ae=n(" along with the number of expected labels:"),pe=g(),x(O.$$.fragment),fe=g(),F=i("p"),X=n("Configure the model for training with "),W=i("a"),G=i("code"),_e=n("compile"),se=n(":"),Z=g(),x(H.$$.fragment),B=g(),Y=i("p"),h=n("Call "),y=i("a"),ee=i("code"),te=n("fit"),re=n(" to fine-tune the model:"),oe=g(),x(U.$$.fragment),this.h()},l(o){s=p(o,"P",{});var k=f(s);m=l(k,"To fine-tune a model in TensorFlow, start by converting your datasets to the "),r=p(k,"CODE",{});var ce=f(r);u=l(ce,"tf.data.Dataset"),ce.forEach(a),$=l(k," format with "),_=p(k,"A",{href:!0});var $e=f(_);v=l($e,"prepare_tf_dataset()"),$e.forEach(a),T=l(k,"."),k.forEach(a),E=b(o),z(j.$$.fragment,o),D=b(o),z(I.$$.fragment,o),K=b(o),L=p(o,"P",{});var ve=f(L);V=l(ve,"Set up an optimizer function, learning rate schedule, and some training hyperparameters:"),ve.forEach(a),N=b(o),z(R.$$.fragment,o),J=b(o),Q=p(o,"P",{});var ge=f(Q);ue=l(ge,"Load DistilBERT with "),M=p(ge,"A",{href:!0});var Be=f(M);de=l(Be,"TFAutoModelForSequenceClassification"),Be.forEach(a),ae=l(ge," along with the number of expected labels:"),ge.forEach(a),pe=b(o),z(O.$$.fragment,o),fe=b(o),F=p(o,"P",{});var je=f(F);X=l(je,"Configure the model for training with "),W=p(je,"A",{href:!0,rel:!0});var Ne=f(W);G=p(Ne,"CODE",{});var me=f(G);_e=l(me,"compile"),me.forEach(a),Ne.forEach(a),se=l(je,":"),je.forEach(a),Z=b(o),z(H.$$.fragment,o),B=b(o),Y=p(o,"P",{});var be=f(Y);h=l(be,"Call "),y=p(be,"A",{href:!0,rel:!0});var Re=f(y);ee=p(Re,"CODE",{});var Ue=f(ee);te=l(Ue,"fit"),Ue.forEach(a),Re.forEach(a),re=l(be," to fine-tune the model:"),be.forEach(a),oe=b(o),z(U.$$.fragment,o),this.h()},h(){w(_,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset"),w(M,"href","/docs/transformers/pr_19429/en/model_doc/auto#transformers.TFAutoModelForSequenceClassification"),w(W,"href","https://keras.io/api/models/model_training_apis/#compile-method"),w(W,"rel","nofollow"),w(y,"href","https://keras.io/api/models/model_training_apis/#fit-method"),w(y,"rel","nofollow")},m(o,k){c(o,s,k),t(s,m),t(s,r),t(r,u),t(s,$),t(s,_),t(_,v),t(s,T),c(o,E,k),A(j,o,k),c(o,D,k),A(I,o,k),c(o,K,k),c(o,L,k),t(L,V),c(o,N,k),A(R,o,k),c(o,J,k),c(o,Q,k),t(Q,ue),t(Q,M),t(M,de),t(Q,ae),c(o,pe,k),A(O,o,k),c(o,fe,k),c(o,F,k),t(F,X),t(F,W),t(W,G),t(G,_e),t(F,se),c(o,Z,k),A(H,o,k),c(o,B,k),c(o,Y,k),t(Y,h),t(Y,y),t(y,ee),t(ee,te),t(Y,re),c(o,oe,k),A(U,o,k),ne=!0},p(o,k){const ce={};k&2&&(ce.$$scope={dirty:k,ctx:o}),I.$set(ce)},i(o){ne||(q(j.$$.fragment,o),q(I.$$.fragment,o),q(R.$$.fragment,o),q(O.$$.fragment,o),q(H.$$.fragment,o),q(U.$$.fragment,o),ne=!0)},o(o){C(j.$$.fragment,o),C(I.$$.fragment,o),C(R.$$.fragment,o),C(O.$$.fragment,o),C(H.$$.fragment,o),C(U.$$.fragment,o),ne=!1},d(o){o&&a(s),o&&a(E),P(j,o),o&&a(D),P(I,o),o&&a(K),o&&a(L),o&&a(N),P(R,o),o&&a(J),o&&a(Q),o&&a(pe),P(O,o),o&&a(fe),o&&a(F),o&&a(Z),P(H,o),o&&a(B),o&&a(Y),o&&a(oe),P(U,o)}}}function ts(S){let s,m;return s=new qt({props:{$$slots:{default:[es]},$$scope:{ctx:S}}}),{c(){x(s.$$.fragment)},l(r){z(s.$$.fragment,r)},m(r,u){A(s,r,u),m=!0},p(r,u){const $={};u&2&&($.$$scope={dirty:u,ctx:r}),s.$set($)},i(r){m||(q(s.$$.fragment,r),m=!0)},o(r){C(s.$$.fragment,r),m=!1},d(r){P(s,r)}}}function as(S){let s,m,r,u,$,_,v,T;return{c(){s=i("p"),m=n(`For a more in-depth example of how to fine-tune a model for text classification, take a look at the corresponding `),r=i("a"),u=n("PyTorch notebook"),$=n(` or `),_=i("a"),v=n("TensorFlow notebook"),T=n("."),this.h()},l(E){s=p(E,"P",{});var j=f(s);m=l(j,`For a more in-depth example of how to fine-tune a model for text classification, take a look at the corresponding `),r=p(j,"A",{href:!0,rel:!0});var D=f(r);u=l(D,"PyTorch notebook"),D.forEach(a),$=l(j,` or `),_=p(j,"A",{href:!0,rel:!0});var I=f(_);v=l(I,"TensorFlow notebook"),I.forEach(a),T=l(j,"."),j.forEach(a),this.h()},h(){w(r,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification.ipynb"),w(r,"rel","nofollow"),w(_,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb"),w(_,"rel","nofollow")},m(E,j){c(E,s,j),t(s,m),t(s,r),t(r,u),t(s,$),t(s,_),t(_,v),t(s,T)},d(E){E&&a(s)}}}function ss(S){let s,m,r,u,$,_,v,T,E,j,D,I,K,L,V,N,R,J,Q,ue,M,de,ae,pe,O,fe,F,X,W,G,_e,se,Z,H,B,Y,h,y,ee,te,re,oe,U,ne,o,k,ce,$e,ve,ge,Be,je,Ne,me,be,Re,Ue,Ye,Ct,Pt,Ke,Dt,St,ft,ke,Ee,Ve,De,Ft,Je,It,ct,Te,Mt,Qe,Lt,Ot,mt,Se,ht,xe,Wt,Xe,Bt,Nt,ut,Fe,dt,he,Rt,Ie,Ut,Gt,Ze,Ht,Yt,et,Kt,Vt,_t,Me,$t,le,Jt,Ge,Qt,Xt,tt,Zt,ea,at,ta,aa,st,sa,ra,gt,ze,bt,ye,Ae,rt,Le,oa,ot,na,wt,qe,vt,Ce,kt;return _=new At({}),D=new Ra({props:{id:"leNG9fN9FQU"}}),O=new pt({props:{$$slots:{default:[Ua]},$$scope:{ctx:S}}}),G=new At({}),y=new ie({props:{code:`from datasets import load_dataset imdb = load_dataset("imdb")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>imdb = load_dataset(<span class="hljs-string">&quot;imdb&quot;</span>)`}}),U=new ie({props:{code:'imdb["test"][0]',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>imdb[<span class="hljs-string">&quot;test&quot;</span>][<span class="hljs-number">0</span>] { <span class="hljs-string">&quot;label&quot;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&quot;text&quot;</span>: <span class="hljs-string">&quot;I love sci-fi and am willing to put up with a lot. Sci-fi movies/TV are usually underfunded, under-appreciated and misunderstood. I tried to like this, I really did, but it is to good TV sci-fi as Babylon 5 is to Star Trek (the original). Silly prosthetics, cheap cardboard sets, stilted dialogues, CG that doesn&#x27;t match the background, and painfully one-dimensional characters cannot be overcome with a &#x27;sci-fi&#x27; setting. (I&#x27;m sure there are those of you out there who think Babylon 5 is good sci-fi TV. It&#x27;s not. It&#x27;s clich\xE9d and uninspiring.) While US viewers might like emotion and character development, sci-fi is a genre that does not take itself seriously (cf. Star Trek). It may treat important issues, yet not as a serious philosophy. It&#x27;s really difficult to care about the characters here as they are not simply foolish, just missing a spark of life. Their actions and reactions are wooden and predictable, often painful to watch. The makers of Earth KNOW it&#x27;s rubbish as they have to always say \\&quot;Gene Roddenberry&#x27;s Earth...\\&quot; otherwise people would not continue watching. Roddenberry&#x27;s ashes must be turning in their orbit as this dull, cheap, poorly edited (watching it without advert breaks really brings this home) trudging Trabant of a show lumbers into space. Spoiler. So, kill off a main character. And then bring him back as another actor. Jeeez! Dallas all over again.&quot;</span>, }`}}),De=new At({}),Se=new ie({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`}}),Fe=new ie({props:{code:`def preprocess_function(examples): return tokenizer(examples["text"], truncation=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> tokenizer(examples[<span class="hljs-string">&quot;text&quot;</span>], truncation=<span class="hljs-literal">True</span>)`}}),Me=new ie({props:{code:"tokenized_imdb = imdb.map(preprocess_function, batched=True)",highlighted:'tokenized_imdb = imdb.<span class="hljs-built_in">map</span>(preprocess_function, batched=<span class="hljs-literal">True</span>)'}}),ze=new Ia({props:{pytorch:!0,tensorflow:!0,jax:!1,$$slots:{tensorflow:[Ka],pytorch:[Ha]},$$scope:{ctx:S}}}),Le=new At({}),qe=new Ia({props:{pytorch:!0,tensorflow:!0,jax:!1,$$slots:{tensorflow:[ts],pytorch:[Xa]},$$scope:{ctx:S}}}),Ce=new pt({props:{$$slots:{default:[as]},$$scope:{ctx:S}}}),{c(){s=i("meta"),m=g(),r=i("h1"),u=i("a"),$=i("span"),x(_.$$.fragment),v=g(),T=i("span"),E=n("Text classification"),j=g(),x(D.$$.fragment),I=g(),K=i("p"),L=n("Text classification is a common NLP task that assigns a label or class to text. There are many practical applications of text classification widely used in production by some of today\u2019s largest companies. One of the most popular forms of text classification is sentiment analysis, which assigns a label like positive, negative, or neutral to a sequence of text."),V=g(),N=i("p"),R=n("This guide will show you how to fine-tune "),J=i("a"),Q=n("DistilBERT"),ue=n(" on the "),M=i("a"),de=n("IMDb"),ae=n(" dataset to determine whether a movie review is positive or negative."),pe=g(),x(O.$$.fragment),fe=g(),F=i("h2"),X=i("a"),W=i("span"),x(G.$$.fragment),_e=g(),se=i("span"),Z=n("Load IMDb dataset"),H=g(),B=i("p"),Y=n("Load the IMDb dataset from the \u{1F917} Datasets library:"),h=g(),x(y.$$.fragment),ee=g(),te=i("p"),re=n("Then take a look at an example:"),oe=g(),x(U.$$.fragment),ne=g(),o=i("p"),k=n("There are two fields in this dataset:"),ce=g(),$e=i("ul"),ve=i("li"),ge=i("code"),Be=n("text"),je=n(": a string containing the text of the movie review."),Ne=g(),me=i("li"),be=i("code"),Re=n("label"),Ue=n(": a value that can either be "),Ye=i("code"),Ct=n("0"),Pt=n(" for a negative review or "),Ke=i("code"),Dt=n("1"),St=n(" for a positive review."),ft=g(),ke=i("h2"),Ee=i("a"),Ve=i("span"),x(De.$$.fragment),Ft=g(),Je=i("span"),It=n("Preprocess"),ct=g(),Te=i("p"),Mt=n("Load the DistilBERT tokenizer to process the "),Qe=i("code"),Lt=n("text"),Ot=n(" field:"),mt=g(),x(Se.$$.fragment),ht=g(),xe=i("p"),Wt=n("Create a preprocessing function to tokenize "),Xe=i("code"),Bt=n("text"),Nt=n(" and truncate sequences to be no longer than DistilBERT\u2019s maximum input length:"),ut=g(),x(Fe.$$.fragment),dt=g(),he=i("p"),Rt=n("Use \u{1F917} Datasets "),Ie=i("a"),Ut=n("map"),Gt=n(" function to apply the preprocessing function over the entire dataset. You can speed up the "),Ze=i("code"),Ht=n("map"),Yt=n(" function by setting "),et=i("code"),Kt=n("batched=True"),Vt=n(" to process multiple elements of the dataset at once:"),_t=g(),x(Me.$$.fragment),$t=g(),le=i("p"),Jt=n("Use "),Ge=i("a"),Qt=n("DataCollatorWithPadding"),Xt=n(" to create a batch of examples. It will also "),tt=i("em"),Zt=n("dynamically pad"),ea=n(" your text to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the "),at=i("code"),ta=n("tokenizer"),aa=n(" function by setting "),st=i("code"),sa=n("padding=True"),ra=n(", dynamic padding is more efficient."),gt=g(),x(ze.$$.fragment),bt=g(),ye=i("h2"),Ae=i("a"),rt=i("span"),x(Le.$$.fragment),oa=g(),ot=i("span"),na=n("Train"),wt=g(),x(qe.$$.fragment),vt=g(),x(Ce.$$.fragment),this.h()},l(e){const d=Ba('[data-svelte="svelte-1phssyn"]',document.head);s=p(d,"META",{name:!0,content:!0}),d.forEach(a),m=b(e),r=p(e,"H1",{class:!0});var Oe=f(r);u=p(Oe,"A",{id:!0,class:!0,href:!0});var nt=f(u);$=p(nt,"SPAN",{});var lt=f($);z(_.$$.fragment,lt),lt.forEach(a),nt.forEach(a),v=b(Oe),T=p(Oe,"SPAN",{});var it=f(T);E=l(it,"Text classification"),it.forEach(a),Oe.forEach(a),j=b(e),z(D.$$.fragment,e),I=b(e),K=p(e,"P",{});var ia=f(K);L=l(ia,"Text classification is a common NLP task that assigns a label or class to text. There are many practical applications of text classification widely used in production by some of today\u2019s largest companies. One of the most popular forms of text classification is sentiment analysis, which assigns a label like positive, negative, or neutral to a sequence of text."),ia.forEach(a),V=b(e),N=p(e,"P",{});var He=f(N);R=l(He,"This guide will show you how to fine-tune "),J=p(He,"A",{href:!0,rel:!0});var pa=f(J);Q=l(pa,"DistilBERT"),pa.forEach(a),ue=l(He," on the "),M=p(He,"A",{href:!0,rel:!0});var fa=f(M);de=l(fa,"IMDb"),fa.forEach(a),ae=l(He," dataset to determine whether a movie review is positive or negative."),He.forEach(a),pe=b(e),z(O.$$.fragment,e),fe=b(e),F=p(e,"H2",{class:!0});var yt=f(F);X=p(yt,"A",{id:!0,class:!0,href:!0});var ca=f(X);W=p(ca,"SPAN",{});var ma=f(W);z(G.$$.fragment,ma),ma.forEach(a),ca.forEach(a),_e=b(yt),se=p(yt,"SPAN",{});var ha=f(se);Z=l(ha,"Load IMDb dataset"),ha.forEach(a),yt.forEach(a),H=b(e),B=p(e,"P",{});var ua=f(B);Y=l(ua,"Load the IMDb dataset from the \u{1F917} Datasets library:"),ua.forEach(a),h=b(e),z(y.$$.fragment,e),ee=b(e),te=p(e,"P",{});var da=f(te);re=l(da,"Then take a look at an example:"),da.forEach(a),oe=b(e),z(U.$$.fragment,e),ne=b(e),o=p(e,"P",{});var _a=f(o);k=l(_a,"There are two fields in this dataset:"),_a.forEach(a),ce=b(e),$e=p(e,"UL",{});var jt=f($e);ve=p(jt,"LI",{});var la=f(ve);ge=p(la,"CODE",{});var $a=f(ge);Be=l($a,"text"),$a.forEach(a),je=l(la,": a string containing the text of the movie review."),la.forEach(a),Ne=b(jt),me=p(jt,"LI",{});var We=f(me);be=p(We,"CODE",{});var ga=f(be);Re=l(ga,"label"),ga.forEach(a),Ue=l(We,": a value that can either be "),Ye=p(We,"CODE",{});var ba=f(Ye);Ct=l(ba,"0"),ba.forEach(a),Pt=l(We," for a negative review or "),Ke=p(We,"CODE",{});var wa=f(Ke);Dt=l(wa,"1"),wa.forEach(a),St=l(We," for a positive review."),We.forEach(a),jt.forEach(a),ft=b(e),ke=p(e,"H2",{class:!0});var Et=f(ke);Ee=p(Et,"A",{id:!0,class:!0,href:!0});var va=f(Ee);Ve=p(va,"SPAN",{});var ka=f(Ve);z(De.$$.fragment,ka),ka.forEach(a),va.forEach(a),Ft=b(Et),Je=p(Et,"SPAN",{});var ya=f(Je);It=l(ya,"Preprocess"),ya.forEach(a),Et.forEach(a),ct=b(e),Te=p(e,"P",{});var Tt=f(Te);Mt=l(Tt,"Load the DistilBERT tokenizer to process the "),Qe=p(Tt,"CODE",{});var ja=f(Qe);Lt=l(ja,"text"),ja.forEach(a),Ot=l(Tt," field:"),Tt.forEach(a),mt=b(e),z(Se.$$.fragment,e),ht=b(e),xe=p(e,"P",{});var xt=f(xe);Wt=l(xt,"Create a preprocessing function to tokenize "),Xe=p(xt,"CODE",{});var Ea=f(Xe);Bt=l(Ea,"text"),Ea.forEach(a),Nt=l(xt," and truncate sequences to be no longer than DistilBERT\u2019s maximum input length:"),xt.forEach(a),ut=b(e),z(Fe.$$.fragment,e),dt=b(e),he=p(e,"P",{});var Pe=f(he);Rt=l(Pe,"Use \u{1F917} Datasets "),Ie=p(Pe,"A",{href:!0,rel:!0});var Ta=f(Ie);Ut=l(Ta,"map"),Ta.forEach(a),Gt=l(Pe," function to apply the preprocessing function over the entire dataset. You can speed up the "),Ze=p(Pe,"CODE",{});var xa=f(Ze);Ht=l(xa,"map"),xa.forEach(a),Yt=l(Pe," function by setting "),et=p(Pe,"CODE",{});var za=f(et);Kt=l(za,"batched=True"),za.forEach(a),Vt=l(Pe," to process multiple elements of the dataset at once:"),Pe.forEach(a),_t=b(e),z(Me.$$.fragment,e),$t=b(e),le=p(e,"P",{});var we=f(le);Jt=l(we,"Use "),Ge=p(we,"A",{href:!0});var Aa=f(Ge);Qt=l(Aa,"DataCollatorWithPadding"),Aa.forEach(a),Xt=l(we," to create a batch of examples. It will also "),tt=p(we,"EM",{});var qa=f(tt);Zt=l(qa,"dynamically pad"),qa.forEach(a),ea=l(we," your text to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the "),at=p(we,"CODE",{});var Ca=f(at);ta=l(Ca,"tokenizer"),Ca.forEach(a),aa=l(we," function by setting "),st=p(we,"CODE",{});var Pa=f(st);sa=l(Pa,"padding=True"),Pa.forEach(a),ra=l(we,", dynamic padding is more efficient."),we.forEach(a),gt=b(e),z(ze.$$.fragment,e),bt=b(e),ye=p(e,"H2",{class:!0});var zt=f(ye);Ae=p(zt,"A",{id:!0,class:!0,href:!0});var Da=f(Ae);rt=p(Da,"SPAN",{});var Sa=f(rt);z(Le.$$.fragment,Sa),Sa.forEach(a),Da.forEach(a),oa=b(zt),ot=p(zt,"SPAN",{});var Fa=f(ot);na=l(Fa,"Train"),Fa.forEach(a),zt.forEach(a),wt=b(e),z(qe.$$.fragment,e),vt=b(e),z(Ce.$$.fragment,e),this.h()},h(){w(s,"name","hf:doc:metadata"),w(s,"content",JSON.stringify(rs)),w(u,"id","text-classification"),w(u,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),w(u,"href","#text-classification"),w(r,"class","relative group"),w(J,"href","https://huggingface.co/distilbert-base-uncased"),w(J,"rel","nofollow"),w(M,"href","https://huggingface.co/datasets/imdb"),w(M,"rel","nofollow"),w(X,"id","load-imdb-dataset"),w(X,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),w(X,"href","#load-imdb-dataset"),w(F,"class","relative group"),w(Ee,"id","preprocess"),w(Ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),w(Ee,"href","#preprocess"),w(ke,"class","relative group"),w(Ie,"href","https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.map"),w(Ie,"rel","nofollow"),w(Ge,"href","/docs/transformers/pr_19429/en/main_classes/data_collator#transformers.DataCollatorWithPadding"),w(Ae,"id","train"),w(Ae,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),w(Ae,"href","#train"),w(ye,"class","relative group")},m(e,d){t(document.head,s),c(e,m,d),c(e,r,d),t(r,u),t(u,$),A(_,$,null),t(r,v),t(r,T),t(T,E),c(e,j,d),A(D,e,d),c(e,I,d),c(e,K,d),t(K,L),c(e,V,d),c(e,N,d),t(N,R),t(N,J),t(J,Q),t(N,ue),t(N,M),t(M,de),t(N,ae),c(e,pe,d),A(O,e,d),c(e,fe,d),c(e,F,d),t(F,X),t(X,W),A(G,W,null),t(F,_e),t(F,se),t(se,Z),c(e,H,d),c(e,B,d),t(B,Y),c(e,h,d),A(y,e,d),c(e,ee,d),c(e,te,d),t(te,re),c(e,oe,d),A(U,e,d),c(e,ne,d),c(e,o,d),t(o,k),c(e,ce,d),c(e,$e,d),t($e,ve),t(ve,ge),t(ge,Be),t(ve,je),t($e,Ne),t($e,me),t(me,be),t(be,Re),t(me,Ue),t(me,Ye),t(Ye,Ct),t(me,Pt),t(me,Ke),t(Ke,Dt),t(me,St),c(e,ft,d),c(e,ke,d),t(ke,Ee),t(Ee,Ve),A(De,Ve,null),t(ke,Ft),t(ke,Je),t(Je,It),c(e,ct,d),c(e,Te,d),t(Te,Mt),t(Te,Qe),t(Qe,Lt),t(Te,Ot),c(e,mt,d),A(Se,e,d),c(e,ht,d),c(e,xe,d),t(xe,Wt),t(xe,Xe),t(Xe,Bt),t(xe,Nt),c(e,ut,d),A(Fe,e,d),c(e,dt,d),c(e,he,d),t(he,Rt),t(he,Ie),t(Ie,Ut),t(he,Gt),t(he,Ze),t(Ze,Ht),t(he,Yt),t(he,et),t(et,Kt),t(he,Vt),c(e,_t,d),A(Me,e,d),c(e,$t,d),c(e,le,d),t(le,Jt),t(le,Ge),t(Ge,Qt),t(le,Xt),t(le,tt),t(tt,Zt),t(le,ea),t(le,at),t(at,ta),t(le,aa),t(le,st),t(st,sa),t(le,ra),c(e,gt,d),A(ze,e,d),c(e,bt,d),c(e,ye,d),t(ye,Ae),t(Ae,rt),A(Le,rt,null),t(ye,oa),t(ye,ot),t(ot,na),c(e,wt,d),A(qe,e,d),c(e,vt,d),A(Ce,e,d),kt=!0},p(e,[d]){const Oe={};d&2&&(Oe.$$scope={dirty:d,ctx:e}),O.$set(Oe);const nt={};d&2&&(nt.$$scope={dirty:d,ctx:e}),ze.$set(nt);const lt={};d&2&&(lt.$$scope={dirty:d,ctx:e}),qe.$set(lt);const it={};d&2&&(it.$$scope={dirty:d,ctx:e}),Ce.$set(it)},i(e){kt||(q(_.$$.fragment,e),q(D.$$.fragment,e),q(O.$$.fragment,e),q(G.$$.fragment,e),q(y.$$.fragment,e),q(U.$$.fragment,e),q(De.$$.fragment,e),q(Se.$$.fragment,e),q(Fe.$$.fragment,e),q(Me.$$.fragment,e),q(ze.$$.fragment,e),q(Le.$$.fragment,e),q(qe.$$.fragment,e),q(Ce.$$.fragment,e),kt=!0)},o(e){C(_.$$.fragment,e),C(D.$$.fragment,e),C(O.$$.fragment,e),C(G.$$.fragment,e),C(y.$$.fragment,e),C(U.$$.fragment,e),C(De.$$.fragment,e),C(Se.$$.fragment,e),C(Fe.$$.fragment,e),C(Me.$$.fragment,e),C(ze.$$.fragment,e),C(Le.$$.fragment,e),C(qe.$$.fragment,e),C(Ce.$$.fragment,e),kt=!1},d(e){a(s),e&&a(m),e&&a(r),P(_),e&&a(j),P(D,e),e&&a(I),e&&a(K),e&&a(V),e&&a(N),e&&a(pe),P(O,e),e&&a(fe),e&&a(F),P(G),e&&a(H),e&&a(B),e&&a(h),P(y,e),e&&a(ee),e&&a(te),e&&a(oe),P(U,e),e&&a(ne),e&&a(o),e&&a(ce),e&&a($e),e&&a(ft),e&&a(ke),P(De),e&&a(ct),e&&a(Te),e&&a(mt),P(Se,e),e&&a(ht),e&&a(xe),e&&a(ut),P(Fe,e),e&&a(dt),e&&a(he),e&&a(_t),P(Me,e),e&&a($t),e&&a(le),e&&a(gt),P(ze,e),e&&a(bt),e&&a(ye),P(Le),e&&a(wt),P(qe,e),e&&a(vt),P(Ce,e)}}}const rs={local:"text-classification",sections:[{local:"load-imdb-dataset",title:"Load IMDb dataset"},{local:"preprocess",title:"Preprocess"},{local:"train",title:"Train"}],title:"Text classification"};function os(S){return Na(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class ms extends La{constructor(s){super();Oa(this,s,os,ss,Wa,{})}}export{ms as default,rs as metadata};
29
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/tasks/language_modeling.mdx-hf-doc-builder.js
import{S as hn,i as un,s as cn,e as i,k as c,w as q,t as l,M as dn,c as p,d as a,m as d,a as f,x as D,h as n,b as g,G as t,g as m,y as z,q as C,o as L,B as P,v as gn,L as mn}from"../../chunks/vendor-hf-doc-builder.js";import{T as Tt}from"../../chunks/Tip-hf-doc-builder.js";import{Y as Xa}from"../../chunks/Youtube-hf-doc-builder.js";import{I as kt}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{C as ae}from"../../chunks/CodeBlock-hf-doc-builder.js";import{F as ml,M as At}from"../../chunks/Markdown-hf-doc-builder.js";function _n(Q){let s,w,o,_,b,k,v,F,j,x,M,I,G,A,N,J,O,S,R,$;return{c(){s=i("p"),w=l("You can fine-tune other architectures for language modeling such as "),o=i("a"),_=l("GPT-Neo"),b=l(", "),k=i("a"),v=l("GPT-J"),F=l(", and "),j=i("a"),x=l("BERT"),M=l(", following the same steps presented in this guide!"),I=c(),G=i("p"),A=l("See the text generation "),N=i("a"),J=l("task page"),O=l(" and fill mask "),S=i("a"),R=l("task page"),$=l(" for more information about their associated models, datasets, and metrics."),this.h()},l(y){s=p(y,"P",{});var Y=f(s);w=n(Y,"You can fine-tune other architectures for language modeling such as "),o=p(Y,"A",{href:!0,rel:!0});var B=f(o);_=n(B,"GPT-Neo"),B.forEach(a),b=n(Y,", "),k=p(Y,"A",{href:!0,rel:!0});var W=f(k);v=n(W,"GPT-J"),W.forEach(a),F=n(Y,", and "),j=p(Y,"A",{href:!0,rel:!0});var K=f(j);x=n(K,"BERT"),K.forEach(a),M=n(Y,", following the same steps presented in this guide!"),Y.forEach(a),I=d(y),G=p(y,"P",{});var X=f(G);A=n(X,"See the text generation "),N=p(X,"A",{href:!0,rel:!0});var H=f(N);J=n(H,"task page"),H.forEach(a),O=n(X," and fill mask "),S=p(X,"A",{href:!0,rel:!0});var fe=f(S);R=n(fe,"task page"),fe.forEach(a),$=n(X," for more information about their associated models, datasets, and metrics."),X.forEach(a),this.h()},h(){g(o,"href","https://huggingface.co/EleutherAI/gpt-neo-125M"),g(o,"rel","nofollow"),g(k,"href","https://huggingface.co/EleutherAI/gpt-j-6B"),g(k,"rel","nofollow"),g(j,"href","https://huggingface.co/bert-base-uncased"),g(j,"rel","nofollow"),g(N,"href","https://huggingface.co/tasks/text-generation"),g(N,"rel","nofollow"),g(S,"href","https://huggingface.co/tasks/fill-mask"),g(S,"rel","nofollow")},m(y,Y){m(y,s,Y),t(s,w),t(s,o),t(o,_),t(s,b),t(s,k),t(k,v),t(s,F),t(s,j),t(j,x),t(s,M),m(y,I,Y),m(y,G,Y),t(G,A),t(G,N),t(N,J),t(G,O),t(G,S),t(S,R),t(G,$)},d(y){y&&a(s),y&&a(I),y&&a(G)}}}function $n(Q){let s,w,o,_,b,k,v,F,j,x,M,I,G,A,N,J,O,S,R;return v=new ae({props:{code:`from transformers import DataCollatorForLanguageModeling tokenizer.pad_token = tokenizer.eos_token data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForLanguageModeling <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.pad_token = tokenizer.eos_token <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=<span class="hljs-literal">False</span>)`}}),S=new ae({props:{code:`from transformers import DataCollatorForLanguageModeling tokenizer.pad_token = tokenizer.eos_token data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=0.15)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForLanguageModeling <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.pad_token = tokenizer.eos_token <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=<span class="hljs-number">0.15</span>)`}}),{c(){s=i("p"),w=l("You can use the end of sequence token as the padding token, and set "),o=i("code"),_=l("mlm=False"),b=l(". This will use the inputs as labels shifted to the right by one element:"),k=c(),q(v.$$.fragment),F=c(),j=i("p"),x=l("For masked language modeling, use the same "),M=i("a"),I=l("DataCollatorForLanguageModeling"),G=l(" except you should specify "),A=i("code"),N=l("mlm_probability"),J=l(" to randomly mask tokens each time you iterate over the data."),O=c(),q(S.$$.fragment),this.h()},l($){s=p($,"P",{});var y=f(s);w=n(y,"You can use the end of sequence token as the padding token, and set "),o=p(y,"CODE",{});var Y=f(o);_=n(Y,"mlm=False"),Y.forEach(a),b=n(y,". This will use the inputs as labels shifted to the right by one element:"),y.forEach(a),k=d($),D(v.$$.fragment,$),F=d($),j=p($,"P",{});var B=f(j);x=n(B,"For masked language modeling, use the same "),M=p(B,"A",{href:!0});var W=f(M);I=n(W,"DataCollatorForLanguageModeling"),W.forEach(a),G=n(B," except you should specify "),A=p(B,"CODE",{});var K=f(A);N=n(K,"mlm_probability"),K.forEach(a),J=n(B," to randomly mask tokens each time you iterate over the data."),B.forEach(a),O=d($),D(S.$$.fragment,$),this.h()},h(){g(M,"href","/docs/transformers/pr_19429/en/main_classes/data_collator#transformers.DataCollatorForLanguageModeling")},m($,y){m($,s,y),t(s,w),t(s,o),t(o,_),t(s,b),m($,k,y),z(v,$,y),m($,F,y),m($,j,y),t(j,x),t(j,M),t(M,I),t(j,G),t(j,A),t(A,N),t(j,J),m($,O,y),z(S,$,y),R=!0},p:mn,i($){R||(C(v.$$.fragment,$),C(S.$$.fragment,$),R=!0)},o($){L(v.$$.fragment,$),L(S.$$.fragment,$),R=!1},d($){$&&a(s),$&&a(k),P(v,$),$&&a(F),$&&a(j),$&&a(O),P(S,$)}}}function kn(Q){let s,w;return s=new At({props:{$$slots:{default:[$n]},$$scope:{ctx:Q}}}),{c(){q(s.$$.fragment)},l(o){D(s.$$.fragment,o)},m(o,_){z(s,o,_),w=!0},p(o,_){const b={};_&2&&(b.$$scope={dirty:_,ctx:o}),s.$set(b)},i(o){w||(C(s.$$.fragment,o),w=!0)},o(o){L(s.$$.fragment,o),w=!1},d(o){P(s,o)}}}function wn(Q){let s,w,o,_,b,k,v,F,j,x,M,I,G,A,N,J,O,S,R;return v=new ae({props:{code:`from transformers import DataCollatorForLanguageModeling data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False, return_tensors="tf")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForLanguageModeling <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=<span class="hljs-literal">False</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)`}}),S=new ae({props:{code:`from transformers import DataCollatorForLanguageModeling data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False, return_tensors="tf")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForLanguageModeling <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=<span class="hljs-literal">False</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)`}}),{c(){s=i("p"),w=l("You can use the end of sequence token as the padding token, and set "),o=i("code"),_=l("mlm=False"),b=l(". This will use the inputs as labels shifted to the right by one element:"),k=c(),q(v.$$.fragment),F=c(),j=i("p"),x=l("For masked language modeling, use the same "),M=i("a"),I=l("DataCollatorForLanguageModeling"),G=l(" except you should specify "),A=i("code"),N=l("mlm_probability"),J=l(" to randomly mask tokens each time you iterate over the data."),O=c(),q(S.$$.fragment),this.h()},l($){s=p($,"P",{});var y=f(s);w=n(y,"You can use the end of sequence token as the padding token, and set "),o=p(y,"CODE",{});var Y=f(o);_=n(Y,"mlm=False"),Y.forEach(a),b=n(y,". This will use the inputs as labels shifted to the right by one element:"),y.forEach(a),k=d($),D(v.$$.fragment,$),F=d($),j=p($,"P",{});var B=f(j);x=n(B,"For masked language modeling, use the same "),M=p(B,"A",{href:!0});var W=f(M);I=n(W,"DataCollatorForLanguageModeling"),W.forEach(a),G=n(B," except you should specify "),A=p(B,"CODE",{});var K=f(A);N=n(K,"mlm_probability"),K.forEach(a),J=n(B," to randomly mask tokens each time you iterate over the data."),B.forEach(a),O=d($),D(S.$$.fragment,$),this.h()},h(){g(M,"href","/docs/transformers/pr_19429/en/main_classes/data_collator#transformers.DataCollatorForLanguageModeling")},m($,y){m($,s,y),t(s,w),t(s,o),t(o,_),t(s,b),m($,k,y),z(v,$,y),m($,F,y),m($,j,y),t(j,x),t(j,M),t(M,I),t(j,G),t(j,A),t(A,N),t(j,J),m($,O,y),z(S,$,y),R=!0},p:mn,i($){R||(C(v.$$.fragment,$),C(S.$$.fragment,$),R=!0)},o($){L(v.$$.fragment,$),L(S.$$.fragment,$),R=!1},d($){$&&a(s),$&&a(k),P(v,$),$&&a(F),$&&a(j),$&&a(O),P(S,$)}}}function jn(Q){let s,w;return s=new At({props:{$$slots:{default:[wn]},$$scope:{ctx:Q}}}),{c(){q(s.$$.fragment)},l(o){D(s.$$.fragment,o)},m(o,_){z(s,o,_),w=!0},p(o,_){const b={};_&2&&(b.$$scope={dirty:_,ctx:o}),s.$set(b)},i(o){w||(C(s.$$.fragment,o),w=!0)},o(o){L(s.$$.fragment,o),w=!1},d(o){P(s,o)}}}function yn(Q){let s,w,o,_,b,k,v,F;return{c(){s=i("p"),w=l("If you aren\u2019t familiar with fine-tuning a model with the "),o=i("a"),_=l("Trainer"),b=l(", take a look at the basic tutorial "),k=i("a"),v=l("here"),F=l("!"),this.h()},l(j){s=p(j,"P",{});var x=f(s);w=n(x,"If you aren\u2019t familiar with fine-tuning a model with the "),o=p(x,"A",{href:!0});var M=f(o);_=n(M,"Trainer"),M.forEach(a),b=n(x,", take a look at the basic tutorial "),k=p(x,"A",{href:!0});var I=f(k);v=n(I,"here"),I.forEach(a),F=n(x,"!"),x.forEach(a),this.h()},h(){g(o,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),g(k,"href","../training#finetune-with-trainer")},m(j,x){m(j,s,x),t(s,w),t(s,o),t(o,_),t(s,b),t(s,k),t(k,v),t(s,F)},d(j){j&&a(s)}}}function vn(Q){let s,w,o,_,b,k,v,F,j,x,M,I,G,A,N,J,O,S,R,$,y,Y,B,W,K,X,H,fe,U,oe,he,re,Z,ee;return v=new ae({props:{code:`from transformers import AutoModelForCausalLM, TrainingArguments, Trainer model = AutoModelForCausalLM.from_pretrained("distilgpt2")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForCausalLM, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>)`}}),j=new Tt({props:{$$slots:{default:[yn]},$$scope:{ctx:Q}}}),Z=new ae({props:{code:`training_args = TrainingArguments( output_dir="./results", evaluation_strategy="epoch", learning_rate=2e-5, weight_decay=0.01, ) trainer = Trainer( model=model, args=training_args, train_dataset=lm_dataset["train"], eval_dataset=lm_dataset["test"], data_collator=data_collator, ) trainer.train()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=lm_dataset[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=lm_dataset[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()`}}),{c(){s=i("p"),w=l("Load DistilGPT2 with "),o=i("a"),_=l("AutoModelForCausalLM"),b=l(":"),k=c(),q(v.$$.fragment),F=c(),q(j.$$.fragment),x=c(),M=i("p"),I=l("At this point, only three steps remain:"),G=c(),A=i("ol"),N=i("li"),J=l("Define your training hyperparameters in "),O=i("a"),S=l("TrainingArguments"),R=l("."),$=c(),y=i("li"),Y=l("Pass the training arguments to "),B=i("a"),W=l("Trainer"),K=l(" along with the model, datasets, and data collator."),X=c(),H=i("li"),fe=l("Call "),U=i("a"),oe=l("train()"),he=l(" to fine-tune your model."),re=c(),q(Z.$$.fragment),this.h()},l(h){s=p(h,"P",{});var T=f(s);w=n(T,"Load DistilGPT2 with "),o=p(T,"A",{href:!0});var ie=f(o);_=n(ie,"AutoModelForCausalLM"),ie.forEach(a),b=n(T,":"),T.forEach(a),k=d(h),D(v.$$.fragment,h),F=d(h),D(j.$$.fragment,h),x=d(h),M=p(h,"P",{});var ne=f(M);I=n(ne,"At this point, only three steps remain:"),ne.forEach(a),G=d(h),A=p(h,"OL",{});var V=f(A);N=p(V,"LI",{});var me=f(N);J=n(me,"Define your training hyperparameters in "),O=p(me,"A",{href:!0});var pe=f(O);S=n(pe,"TrainingArguments"),pe.forEach(a),R=n(me,"."),me.forEach(a),$=d(V),y=p(V,"LI",{});var te=f(y);Y=n(te,"Pass the training arguments to "),B=p(te,"A",{href:!0});var se=f(B);W=n(se,"Trainer"),se.forEach(a),K=n(te," along with the model, datasets, and data collator."),te.forEach(a),X=d(V),H=p(V,"LI",{});var le=f(H);fe=n(le,"Call "),U=p(le,"A",{href:!0});var r=f(U);oe=n(r,"train()"),r.forEach(a),he=n(le," to fine-tune your model."),le.forEach(a),V.forEach(a),re=d(h),D(Z.$$.fragment,h),this.h()},h(){g(o,"href","/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoModelForCausalLM"),g(O,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments"),g(B,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),g(U,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.train")},m(h,T){m(h,s,T),t(s,w),t(s,o),t(o,_),t(s,b),m(h,k,T),z(v,h,T),m(h,F,T),z(j,h,T),m(h,x,T),m(h,M,T),t(M,I),m(h,G,T),m(h,A,T),t(A,N),t(N,J),t(N,O),t(O,S),t(N,R),t(A,$),t(A,y),t(y,Y),t(y,B),t(B,W),t(y,K),t(A,X),t(A,H),t(H,fe),t(H,U),t(U,oe),t(H,he),m(h,re,T),z(Z,h,T),ee=!0},p(h,T){const ie={};T&2&&(ie.$$scope={dirty:T,ctx:h}),j.$set(ie)},i(h){ee||(C(v.$$.fragment,h),C(j.$$.fragment,h),C(Z.$$.fragment,h),ee=!0)},o(h){L(v.$$.fragment,h),L(j.$$.fragment,h),L(Z.$$.fragment,h),ee=!1},d(h){h&&a(s),h&&a(k),P(v,h),h&&a(F),P(j,h),h&&a(x),h&&a(M),h&&a(G),h&&a(A),h&&a(re),P(Z,h)}}}function bn(Q){let s,w;return s=new At({props:{$$slots:{default:[vn]},$$scope:{ctx:Q}}}),{c(){q(s.$$.fragment)},l(o){D(s.$$.fragment,o)},m(o,_){z(s,o,_),w=!0},p(o,_){const b={};_&2&&(b.$$scope={dirty:_,ctx:o}),s.$set(b)},i(o){w||(C(s.$$.fragment,o),w=!0)},o(o){L(s.$$.fragment,o),w=!1},d(o){P(s,o)}}}function xn(Q){let s,w,o,_,b;return{c(){s=i("p"),w=l("If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),o=i("a"),_=l("here"),b=l("!"),this.h()},l(k){s=p(k,"P",{});var v=f(s);w=n(v,"If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),o=p(v,"A",{href:!0});var F=f(o);_=n(F,"here"),F.forEach(a),b=n(v,"!"),v.forEach(a),this.h()},h(){g(o,"href","training#finetune-with-keras")},m(k,v){m(k,s,v),t(s,w),t(s,o),t(o,_),t(s,b)},d(k){k&&a(s)}}}function En(Q){let s,w,o,_,b,k,v,F,j,x,M,I,G,A,N,J,O,S,R,$,y,Y,B,W,K,X,H,fe,U,oe,he,re,Z,ee,h,T,ie,ne,V,me,pe,te,se,le;return x=new ae({props:{code:`tf_train_set = model.prepare_tf_dataset( lm_dataset["train"], shuffle=True, batch_size=16, collate_fn=data_collator, ) tf_test_set = model.prepare_tf_dataset( lm_dataset["test"], shuffle=False, batch_size=16, collate_fn=data_collator, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> lm_dataset[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_test_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> lm_dataset[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)`}}),I=new Tt({props:{$$slots:{default:[xn]},$$scope:{ctx:Q}}}),O=new ae({props:{code:`from transformers import create_optimizer, AdamWeightDecay optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer, AdamWeightDecay <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer = AdamWeightDecay(learning_rate=<span class="hljs-number">2e-5</span>, weight_decay_rate=<span class="hljs-number">0.01</span>)`}}),K=new ae({props:{code:`from transformers import TFAutoModelForCausalLM model = TFAutoModelForCausalLM.from_pretrained("distilgpt2")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>)`}}),ee=new ae({props:{code:`import tensorflow as tf model.compile(optimizer=optimizer)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)`}}),se=new ae({props:{code:"model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=<span class="hljs-number">3</span>)'}}),{c(){s=i("p"),w=l("To fine-tune a model in TensorFlow, start by converting your datasets to the "),o=i("code"),_=l("tf.data.Dataset"),b=l(" format with "),k=i("a"),v=l("prepare_tf_dataset()"),F=l("."),j=c(),q(x.$$.fragment),M=c(),q(I.$$.fragment),G=c(),A=i("p"),N=l("Set up an optimizer function, learning rate, and some training hyperparameters:"),J=c(),q(O.$$.fragment),S=c(),R=i("p"),$=l("Load DistilGPT2 with "),y=i("a"),Y=l("TFAutoModelForCausalLM"),B=l(":"),W=c(),q(K.$$.fragment),X=c(),H=i("p"),fe=l("Configure the model for training with "),U=i("a"),oe=i("code"),he=l("compile"),re=l(":"),Z=c(),q(ee.$$.fragment),h=c(),T=i("p"),ie=l("Call "),ne=i("a"),V=i("code"),me=l("fit"),pe=l(" to fine-tune the model:"),te=c(),q(se.$$.fragment),this.h()},l(r){s=p(r,"P",{});var E=f(s);w=n(E,"To fine-tune a model in TensorFlow, start by converting your datasets to the "),o=p(E,"CODE",{});var ce=f(o);_=n(ce,"tf.data.Dataset"),ce.forEach(a),b=n(E," format with "),k=p(E,"A",{href:!0});var ve=f(k);v=n(ve,"prepare_tf_dataset()"),ve.forEach(a),F=n(E,"."),E.forEach(a),j=d(r),D(x.$$.fragment,r),M=d(r),D(I.$$.fragment,r),G=d(r),A=p(r,"P",{});var we=f(A);N=n(we,"Set up an optimizer function, learning rate, and some training hyperparameters:"),we.forEach(a),J=d(r),D(O.$$.fragment,r),S=d(r),R=p(r,"P",{});var _e=f(R);$=n(_e,"Load DistilGPT2 with "),y=p(_e,"A",{href:!0});var be=f(y);Y=n(be,"TFAutoModelForCausalLM"),be.forEach(a),B=n(_e,":"),_e.forEach(a),W=d(r),D(K.$$.fragment,r),X=d(r),H=p(r,"P",{});var ue=f(H);fe=n(ue,"Configure the model for training with "),U=p(ue,"A",{href:!0,rel:!0});var xe=f(U);oe=p(xe,"CODE",{});var je=f(oe);he=n(je,"compile"),je.forEach(a),xe.forEach(a),re=n(ue,":"),ue.forEach(a),Z=d(r),D(ee.$$.fragment,r),h=d(r),T=p(r,"P",{});var $e=f(T);ie=n($e,"Call "),ne=p($e,"A",{href:!0,rel:!0});var Ee=f(ne);V=p(Ee,"CODE",{});var ke=f(V);me=n(ke,"fit"),ke.forEach(a),Ee.forEach(a),pe=n($e," to fine-tune the model:"),$e.forEach(a),te=d(r),D(se.$$.fragment,r),this.h()},h(){g(k,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset"),g(y,"href","/docs/transformers/pr_19429/en/model_doc/auto#transformers.TFAutoModelForCausalLM"),g(U,"href","https://keras.io/api/models/model_training_apis/#compile-method"),g(U,"rel","nofollow"),g(ne,"href","https://keras.io/api/models/model_training_apis/#fit-method"),g(ne,"rel","nofollow")},m(r,E){m(r,s,E),t(s,w),t(s,o),t(o,_),t(s,b),t(s,k),t(k,v),t(s,F),m(r,j,E),z(x,r,E),m(r,M,E),z(I,r,E),m(r,G,E),m(r,A,E),t(A,N),m(r,J,E),z(O,r,E),m(r,S,E),m(r,R,E),t(R,$),t(R,y),t(y,Y),t(R,B),m(r,W,E),z(K,r,E),m(r,X,E),m(r,H,E),t(H,fe),t(H,U),t(U,oe),t(oe,he),t(H,re),m(r,Z,E),z(ee,r,E),m(r,h,E),m(r,T,E),t(T,ie),t(T,ne),t(ne,V),t(V,me),t(T,pe),m(r,te,E),z(se,r,E),le=!0},p(r,E){const ce={};E&2&&(ce.$$scope={dirty:E,ctx:r}),I.$set(ce)},i(r){le||(C(x.$$.fragment,r),C(I.$$.fragment,r),C(O.$$.fragment,r),C(K.$$.fragment,r),C(ee.$$.fragment,r),C(se.$$.fragment,r),le=!0)},o(r){L(x.$$.fragment,r),L(I.$$.fragment,r),L(O.$$.fragment,r),L(K.$$.fragment,r),L(ee.$$.fragment,r),L(se.$$.fragment,r),le=!1},d(r){r&&a(s),r&&a(j),P(x,r),r&&a(M),P(I,r),r&&a(G),r&&a(A),r&&a(J),P(O,r),r&&a(S),r&&a(R),r&&a(W),P(K,r),r&&a(X),r&&a(H),r&&a(Z),P(ee,r),r&&a(h),r&&a(T),r&&a(te),P(se,r)}}}function Tn(Q){let s,w;return s=new At({props:{$$slots:{default:[En]},$$scope:{ctx:Q}}}),{c(){q(s.$$.fragment)},l(o){D(s.$$.fragment,o)},m(o,_){z(s,o,_),w=!0},p(o,_){const b={};_&2&&(b.$$scope={dirty:_,ctx:o}),s.$set(b)},i(o){w||(C(s.$$.fragment,o),w=!0)},o(o){L(s.$$.fragment,o),w=!1},d(o){P(s,o)}}}function An(Q){let s,w,o,_,b,k,v,F;return{c(){s=i("p"),w=l("If you aren\u2019t familiar with fine-tuning a model with the "),o=i("a"),_=l("Trainer"),b=l(", take a look at the basic tutorial "),k=i("a"),v=l("here"),F=l("!"),this.h()},l(j){s=p(j,"P",{});var x=f(s);w=n(x,"If you aren\u2019t familiar with fine-tuning a model with the "),o=p(x,"A",{href:!0});var M=f(o);_=n(M,"Trainer"),M.forEach(a),b=n(x,", take a look at the basic tutorial "),k=p(x,"A",{href:!0});var I=f(k);v=n(I,"here"),I.forEach(a),F=n(x,"!"),x.forEach(a),this.h()},h(){g(o,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),g(k,"href","../training#finetune-with-trainer")},m(j,x){m(j,s,x),t(s,w),t(s,o),t(o,_),t(s,b),t(s,k),t(k,v),t(s,F)},d(j){j&&a(s)}}}function Mn(Q){let s,w,o,_,b,k,v,F,j,x,M,I,G,A,N,J,O,S,R,$,y,Y,B,W,K,X,H,fe,U,oe,he,re,Z,ee;return v=new ae({props:{code:`from transformers import AutoModelForMaskedLM model = AutoModelForMaskedLM.from_pretrained("distilroberta-base")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForMaskedLM.from_pretrained(<span class="hljs-string">&quot;distilroberta-base&quot;</span>)`}}),j=new Tt({props:{$$slots:{default:[An]},$$scope:{ctx:Q}}}),Z=new ae({props:{code:`training_args = TrainingArguments( output_dir="./results", evaluation_strategy="epoch", learning_rate=2e-5, num_train_epochs=3, weight_decay=0.01, ) trainer = Trainer( model=model, args=training_args, train_dataset=lm_dataset["train"], eval_dataset=lm_dataset["test"], data_collator=data_collator, ) trainer.train()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=lm_dataset[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=lm_dataset[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()`}}),{c(){s=i("p"),w=l("Load DistilRoBERTa with "),o=i("code"),_=l("AutoModelForMaskedlM"),b=l(":"),k=c(),q(v.$$.fragment),F=c(),q(j.$$.fragment),x=c(),M=i("p"),I=l("At this point, only three steps remain:"),G=c(),A=i("ol"),N=i("li"),J=l("Define your training hyperparameters in "),O=i("a"),S=l("TrainingArguments"),R=l("."),$=c(),y=i("li"),Y=l("Pass the training arguments to "),B=i("a"),W=l("Trainer"),K=l(" along with the model, datasets, and data collator."),X=c(),H=i("li"),fe=l("Call "),U=i("a"),oe=l("train()"),he=l(" to fine-tune your model."),re=c(),q(Z.$$.fragment),this.h()},l(h){s=p(h,"P",{});var T=f(s);w=n(T,"Load DistilRoBERTa with "),o=p(T,"CODE",{});var ie=f(o);_=n(ie,"AutoModelForMaskedlM"),ie.forEach(a),b=n(T,":"),T.forEach(a),k=d(h),D(v.$$.fragment,h),F=d(h),D(j.$$.fragment,h),x=d(h),M=p(h,"P",{});var ne=f(M);I=n(ne,"At this point, only three steps remain:"),ne.forEach(a),G=d(h),A=p(h,"OL",{});var V=f(A);N=p(V,"LI",{});var me=f(N);J=n(me,"Define your training hyperparameters in "),O=p(me,"A",{href:!0});var pe=f(O);S=n(pe,"TrainingArguments"),pe.forEach(a),R=n(me,"."),me.forEach(a),$=d(V),y=p(V,"LI",{});var te=f(y);Y=n(te,"Pass the training arguments to "),B=p(te,"A",{href:!0});var se=f(B);W=n(se,"Trainer"),se.forEach(a),K=n(te," along with the model, datasets, and data collator."),te.forEach(a),X=d(V),H=p(V,"LI",{});var le=f(H);fe=n(le,"Call "),U=p(le,"A",{href:!0});var r=f(U);oe=n(r,"train()"),r.forEach(a),he=n(le," to fine-tune your model."),le.forEach(a),V.forEach(a),re=d(h),D(Z.$$.fragment,h),this.h()},h(){g(O,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments"),g(B,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),g(U,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.train")},m(h,T){m(h,s,T),t(s,w),t(s,o),t(o,_),t(s,b),m(h,k,T),z(v,h,T),m(h,F,T),z(j,h,T),m(h,x,T),m(h,M,T),t(M,I),m(h,G,T),m(h,A,T),t(A,N),t(N,J),t(N,O),t(O,S),t(N,R),t(A,$),t(A,y),t(y,Y),t(y,B),t(B,W),t(y,K),t(A,X),t(A,H),t(H,fe),t(H,U),t(U,oe),t(H,he),m(h,re,T),z(Z,h,T),ee=!0},p(h,T){const ie={};T&2&&(ie.$$scope={dirty:T,ctx:h}),j.$set(ie)},i(h){ee||(C(v.$$.fragment,h),C(j.$$.fragment,h),C(Z.$$.fragment,h),ee=!0)},o(h){L(v.$$.fragment,h),L(j.$$.fragment,h),L(Z.$$.fragment,h),ee=!1},d(h){h&&a(s),h&&a(k),P(v,h),h&&a(F),P(j,h),h&&a(x),h&&a(M),h&&a(G),h&&a(A),h&&a(re),P(Z,h)}}}function Fn(Q){let s,w;return s=new At({props:{$$slots:{default:[Mn]},$$scope:{ctx:Q}}}),{c(){q(s.$$.fragment)},l(o){D(s.$$.fragment,o)},m(o,_){z(s,o,_),w=!0},p(o,_){const b={};_&2&&(b.$$scope={dirty:_,ctx:o}),s.$set(b)},i(o){w||(C(s.$$.fragment,o),w=!0)},o(o){L(s.$$.fragment,o),w=!1},d(o){P(s,o)}}}function qn(Q){let s,w,o,_,b;return{c(){s=i("p"),w=l("If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),o=i("a"),_=l("here"),b=l("!"),this.h()},l(k){s=p(k,"P",{});var v=f(s);w=n(v,"If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),o=p(v,"A",{href:!0});var F=f(o);_=n(F,"here"),F.forEach(a),b=n(v,"!"),v.forEach(a),this.h()},h(){g(o,"href","training#finetune-with-keras")},m(k,v){m(k,s,v),t(s,w),t(s,o),t(o,_),t(s,b)},d(k){k&&a(s)}}}function Dn(Q){let s,w,o,_,b,k,v,F,j,x,M,I,G,A,N,J,O,S,R,$,y,Y,B,W,K,X,H,fe,U,oe,he,re,Z,ee,h,T,ie,ne,V,me,pe,te,se,le;return x=new ae({props:{code:`tf_train_set = model.prepare_tf_dataset( lm_dataset["train"], shuffle=True, batch_size=16, collate_fn=data_collator, ) tf_test_set = model.prepare_tf_dataset( lm_dataset["test"], shuffle=False, batch_size=16, collate_fn=data_collator, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> lm_dataset[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_test_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> lm_dataset[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)`}}),I=new Tt({props:{$$slots:{default:[qn]},$$scope:{ctx:Q}}}),O=new ae({props:{code:`from transformers import create_optimizer, AdamWeightDecay optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer, AdamWeightDecay <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer = AdamWeightDecay(learning_rate=<span class="hljs-number">2e-5</span>, weight_decay_rate=<span class="hljs-number">0.01</span>)`}}),K=new ae({props:{code:`from transformers import TFAutoModelForMaskedLM model = TFAutoModelForCausalLM.from_pretrained("distilroberta-base")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;distilroberta-base&quot;</span>)`}}),ee=new ae({props:{code:`import tensorflow as tf model.compile(optimizer=optimizer)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)`}}),se=new ae({props:{code:"model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=<span class="hljs-number">3</span>)'}}),{c(){s=i("p"),w=l("To fine-tune a model in TensorFlow, start by converting your datasets to the "),o=i("code"),_=l("tf.data.Dataset"),b=l(" format with "),k=i("a"),v=l("prepare_tf_dataset()"),F=l("."),j=c(),q(x.$$.fragment),M=c(),q(I.$$.fragment),G=c(),A=i("p"),N=l("Set up an optimizer function, learning rate, and some training hyperparameters:"),J=c(),q(O.$$.fragment),S=c(),R=i("p"),$=l("Load DistilRoBERTa with "),y=i("a"),Y=l("TFAutoModelForMaskedLM"),B=l(":"),W=c(),q(K.$$.fragment),X=c(),H=i("p"),fe=l("Configure the model for training with "),U=i("a"),oe=i("code"),he=l("compile"),re=l(":"),Z=c(),q(ee.$$.fragment),h=c(),T=i("p"),ie=l("Call "),ne=i("a"),V=i("code"),me=l("fit"),pe=l(" to fine-tune the model:"),te=c(),q(se.$$.fragment),this.h()},l(r){s=p(r,"P",{});var E=f(s);w=n(E,"To fine-tune a model in TensorFlow, start by converting your datasets to the "),o=p(E,"CODE",{});var ce=f(o);_=n(ce,"tf.data.Dataset"),ce.forEach(a),b=n(E," format with "),k=p(E,"A",{href:!0});var ve=f(k);v=n(ve,"prepare_tf_dataset()"),ve.forEach(a),F=n(E,"."),E.forEach(a),j=d(r),D(x.$$.fragment,r),M=d(r),D(I.$$.fragment,r),G=d(r),A=p(r,"P",{});var we=f(A);N=n(we,"Set up an optimizer function, learning rate, and some training hyperparameters:"),we.forEach(a),J=d(r),D(O.$$.fragment,r),S=d(r),R=p(r,"P",{});var _e=f(R);$=n(_e,"Load DistilRoBERTa with "),y=p(_e,"A",{href:!0});var be=f(y);Y=n(be,"TFAutoModelForMaskedLM"),be.forEach(a),B=n(_e,":"),_e.forEach(a),W=d(r),D(K.$$.fragment,r),X=d(r),H=p(r,"P",{});var ue=f(H);fe=n(ue,"Configure the model for training with "),U=p(ue,"A",{href:!0,rel:!0});var xe=f(U);oe=p(xe,"CODE",{});var je=f(oe);he=n(je,"compile"),je.forEach(a),xe.forEach(a),re=n(ue,":"),ue.forEach(a),Z=d(r),D(ee.$$.fragment,r),h=d(r),T=p(r,"P",{});var $e=f(T);ie=n($e,"Call "),ne=p($e,"A",{href:!0,rel:!0});var Ee=f(ne);V=p(Ee,"CODE",{});var ke=f(V);me=n(ke,"fit"),ke.forEach(a),Ee.forEach(a),pe=n($e," to fine-tune the model:"),$e.forEach(a),te=d(r),D(se.$$.fragment,r),this.h()},h(){g(k,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset"),g(y,"href","/docs/transformers/pr_19429/en/model_doc/auto#transformers.TFAutoModelForMaskedLM"),g(U,"href","https://keras.io/api/models/model_training_apis/#compile-method"),g(U,"rel","nofollow"),g(ne,"href","https://keras.io/api/models/model_training_apis/#fit-method"),g(ne,"rel","nofollow")},m(r,E){m(r,s,E),t(s,w),t(s,o),t(o,_),t(s,b),t(s,k),t(k,v),t(s,F),m(r,j,E),z(x,r,E),m(r,M,E),z(I,r,E),m(r,G,E),m(r,A,E),t(A,N),m(r,J,E),z(O,r,E),m(r,S,E),m(r,R,E),t(R,$),t(R,y),t(y,Y),t(R,B),m(r,W,E),z(K,r,E),m(r,X,E),m(r,H,E),t(H,fe),t(H,U),t(U,oe),t(oe,he),t(H,re),m(r,Z,E),z(ee,r,E),m(r,h,E),m(r,T,E),t(T,ie),t(T,ne),t(ne,V),t(V,me),t(T,pe),m(r,te,E),z(se,r,E),le=!0},p(r,E){const ce={};E&2&&(ce.$$scope={dirty:E,ctx:r}),I.$set(ce)},i(r){le||(C(x.$$.fragment,r),C(I.$$.fragment,r),C(O.$$.fragment,r),C(K.$$.fragment,r),C(ee.$$.fragment,r),C(se.$$.fragment,r),le=!0)},o(r){L(x.$$.fragment,r),L(I.$$.fragment,r),L(O.$$.fragment,r),L(K.$$.fragment,r),L(ee.$$.fragment,r),L(se.$$.fragment,r),le=!1},d(r){r&&a(s),r&&a(j),P(x,r),r&&a(M),P(I,r),r&&a(G),r&&a(A),r&&a(J),P(O,r),r&&a(S),r&&a(R),r&&a(W),P(K,r),r&&a(X),r&&a(H),r&&a(Z),P(ee,r),r&&a(h),r&&a(T),r&&a(te),P(se,r)}}}function zn(Q){let s,w;return s=new At({props:{$$slots:{default:[Dn]},$$scope:{ctx:Q}}}),{c(){q(s.$$.fragment)},l(o){D(s.$$.fragment,o)},m(o,_){z(s,o,_),w=!0},p(o,_){const b={};_&2&&(b.$$scope={dirty:_,ctx:o}),s.$set(b)},i(o){w||(C(s.$$.fragment,o),w=!0)},o(o){L(s.$$.fragment,o),w=!1},d(o){P(s,o)}}}function Cn(Q){let s,w,o,_,b,k,v,F;return{c(){s=i("p"),w=l(`For a more in-depth example of how to fine-tune a model for causal language modeling, take a look at the corresponding `),o=i("a"),_=l("PyTorch notebook"),b=l(` or `),k=i("a"),v=l("TensorFlow notebook"),F=l("."),this.h()},l(j){s=p(j,"P",{});var x=f(s);w=n(x,`For a more in-depth example of how to fine-tune a model for causal language modeling, take a look at the corresponding `),o=p(x,"A",{href:!0,rel:!0});var M=f(o);_=n(M,"PyTorch notebook"),M.forEach(a),b=n(x,` or `),k=p(x,"A",{href:!0,rel:!0});var I=f(k);v=n(I,"TensorFlow notebook"),I.forEach(a),F=n(x,"."),x.forEach(a),this.h()},h(){g(o,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb"),g(o,"rel","nofollow"),g(k,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb"),g(k,"rel","nofollow")},m(j,x){m(j,s,x),t(s,w),t(s,o),t(o,_),t(s,b),t(s,k),t(k,v),t(s,F)},d(j){j&&a(s)}}}function Ln(Q){let s,w,o,_,b,k,v,F,j,x,M,I,G,A,N,J,O,S,R,$,y,Y,B,W,K,X,H,fe,U,oe,he,re,Z,ee,h,T,ie,ne,V,me,pe,te,se,le,r,E,ce,ve,we,_e,be,ue,xe,je,$e,Ee,ke,ra,wt,Za,oa,Xe,ia,ye,es,Mt,ts,as,Ft,ss,ls,qt,ns,rs,pa,De,Ie,Dt,Ze,os,zt,is,fa,et,ma,Oe,ps,Ct,fs,ms,ha,tt,ua,at,ca,jt,hs,da,st,ga,Te,us,Lt,cs,ds,lt,Pt,gs,_s,_a,nt,$a,Ae,$s,It,ks,ws,Ot,js,ys,ka,yt,vs,wa,rt,ja,de,bs,ot,xs,Es,Rt,Ts,As,St,Ms,Fs,Nt,qs,Ds,ya,it,va,vt,zs,ba,Re,Gt,Cs,Ls,pt,Ps,Bt,Is,Os,xa,ft,Ea,Se,Rs,Wt,Ss,Ns,Ta,mt,Aa,ge,Gs,bt,Bs,Ws,Ht,Hs,Ys,Yt,Us,Js,Ut,Ks,Qs,Ma,Ne,Fa,ze,Ge,Jt,ht,Vs,Kt,Xs,qa,Be,Zs,ut,el,tl,Da,Ce,We,Qt,ct,al,Vt,sl,za,He,Ca,Le,Ye,Xt,dt,ll,Zt,nl,La,Ue,rl,gt,ol,il,Pa,Pe,Je,ea,_t,pl,ta,fl,Ia,Ke,Oa,Qe,Ra;return k=new kt({}),A=new Xa({props:{id:"Vpjb1lu0MDk"}}),R=new Xa({props:{id:"mqElG5QJWUg"}}),V=new Tt({props:{$$slots:{default:[_n]},$$scope:{ctx:Q}}}),le=new kt({}),ue=new ae({props:{code:`from datasets import load_dataset eli5 = load_dataset("eli5", split="train_asks[:5000]")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>eli5 = load_dataset(<span class="hljs-string">&quot;eli5&quot;</span>, split=<span class="hljs-string">&quot;train_asks[:5000]&quot;</span>)`}}),ke=new ae({props:{code:"eli5 = eli5.train_test_split(test_size=0.2)",highlighted:'eli5 = eli5.train_test_split(test_size=<span class="hljs-number">0.2</span>)'}}),Xe=new ae({props:{code:'eli5["train"][0]',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>eli5[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;answers&#x27;</span>: {<span class="hljs-string">&#x27;a_id&#x27;</span>: [<span class="hljs-string">&#x27;c3d1aib&#x27;</span>, <span class="hljs-string">&#x27;c3d4lya&#x27;</span>], <span class="hljs-string">&#x27;score&#x27;</span>: [<span class="hljs-number">6</span>, <span class="hljs-number">3</span>], <span class="hljs-string">&#x27;text&#x27;</span>: [<span class="hljs-string">&quot;The velocity needed to remain in orbit is equal to the square root of Newton&#x27;s constant times the mass of earth divided by the distance from the center of the earth. I don&#x27;t know the altitude of that specific mission, but they&#x27;re usually around 300 km. That means he&#x27;s going 7-8 km/s.\\n\\nIn space there are no other forces acting on either the shuttle or the guy, so they stay in the same position relative to each other. If he were to become unable to return to the ship, he would presumably run out of oxygen, or slowly fall into the atmosphere and burn up.&quot;</span>, <span class="hljs-string">&quot;Hope you don&#x27;t mind me asking another question, but why aren&#x27;t there any stars visible in this photo?&quot;</span>]}, <span class="hljs-string">&#x27;answers_urls&#x27;</span>: {<span class="hljs-string">&#x27;url&#x27;</span>: []}, <span class="hljs-string">&#x27;document&#x27;</span>: <span class="hljs-string">&#x27;&#x27;</span>, <span class="hljs-string">&#x27;q_id&#x27;</span>: <span class="hljs-string">&#x27;nyxfp&#x27;</span>, <span class="hljs-string">&#x27;selftext&#x27;</span>: <span class="hljs-string">&#x27;_URL_0_\\n\\nThis was on the front page earlier and I have a few questions about it. Is it possible to calculate how fast the astronaut would be orbiting the earth? Also how does he stay close to the shuttle so that he can return safely, i.e is he orbiting at the same speed and can therefore stay next to it? And finally if his propulsion system failed, would he eventually re-enter the atmosphere and presumably die?&#x27;</span>, <span class="hljs-string">&#x27;selftext_urls&#x27;</span>: {<span class="hljs-string">&#x27;url&#x27;</span>: [<span class="hljs-string">&#x27;http://apod.nasa.gov/apod/image/1201/freeflyer_nasa_3000.jpg&#x27;</span>]}, <span class="hljs-string">&#x27;subreddit&#x27;</span>: <span class="hljs-string">&#x27;askscience&#x27;</span>, <span class="hljs-string">&#x27;title&#x27;</span>: <span class="hljs-string">&#x27;Few questions about this space walk photograph.&#x27;</span>, <span class="hljs-string">&#x27;title_urls&#x27;</span>: {<span class="hljs-string">&#x27;url&#x27;</span>: []}}`}}),Ze=new kt({}),et=new Xa({props:{id:"ma1TrR7gE7I"}}),tt=new ae({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("distilgpt2")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>)`}}),at=new Xa({props:{id:"8PmhEIXhBvI"}}),st=new ae({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("distilroberta-base")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilroberta-base&quot;</span>)`}}),nt=new ae({props:{code:`eli5 = eli5.flatten() eli5["train"][0]`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>eli5 = eli5.flatten() <span class="hljs-meta">&gt;&gt;&gt; </span>eli5[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;answers.a_id&#x27;</span>: [<span class="hljs-string">&#x27;c3d1aib&#x27;</span>, <span class="hljs-string">&#x27;c3d4lya&#x27;</span>], <span class="hljs-string">&#x27;answers.score&#x27;</span>: [<span class="hljs-number">6</span>, <span class="hljs-number">3</span>], <span class="hljs-string">&#x27;answers.text&#x27;</span>: [<span class="hljs-string">&quot;The velocity needed to remain in orbit is equal to the square root of Newton&#x27;s constant times the mass of earth divided by the distance from the center of the earth. I don&#x27;t know the altitude of that specific mission, but they&#x27;re usually around 300 km. That means he&#x27;s going 7-8 km/s.\\n\\nIn space there are no other forces acting on either the shuttle or the guy, so they stay in the same position relative to each other. If he were to become unable to return to the ship, he would presumably run out of oxygen, or slowly fall into the atmosphere and burn up.&quot;</span>, <span class="hljs-string">&quot;Hope you don&#x27;t mind me asking another question, but why aren&#x27;t there any stars visible in this photo?&quot;</span>], <span class="hljs-string">&#x27;answers_urls.url&#x27;</span>: [], <span class="hljs-string">&#x27;document&#x27;</span>: <span class="hljs-string">&#x27;&#x27;</span>, <span class="hljs-string">&#x27;q_id&#x27;</span>: <span class="hljs-string">&#x27;nyxfp&#x27;</span>, <span class="hljs-string">&#x27;selftext&#x27;</span>: <span class="hljs-string">&#x27;_URL_0_\\n\\nThis was on the front page earlier and I have a few questions about it. Is it possible to calculate how fast the astronaut would be orbiting the earth? Also how does he stay close to the shuttle so that he can return safely, i.e is he orbiting at the same speed and can therefore stay next to it? And finally if his propulsion system failed, would he eventually re-enter the atmosphere and presumably die?&#x27;</span>, <span class="hljs-string">&#x27;selftext_urls.url&#x27;</span>: [<span class="hljs-string">&#x27;http://apod.nasa.gov/apod/image/1201/freeflyer_nasa_3000.jpg&#x27;</span>], <span class="hljs-string">&#x27;subreddit&#x27;</span>: <span class="hljs-string">&#x27;askscience&#x27;</span>, <span class="hljs-string">&#x27;title&#x27;</span>: <span class="hljs-string">&#x27;Few questions about this space walk photograph.&#x27;</span>, <span class="hljs-string">&#x27;title_urls.url&#x27;</span>: []}`}}),rt=new ae({props:{code:`def preprocess_function(examples): return tokenizer([" ".join(x) for x in examples["answers.text"]], truncation=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> tokenizer([<span class="hljs-string">&quot; &quot;</span>.join(x) <span class="hljs-keyword">for</span> x <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;answers.text&quot;</span>]], truncation=<span class="hljs-literal">True</span>)`}}),it=new ae({props:{code:`tokenized_eli5 = eli5.map( preprocess_function, batched=True, num_proc=4, remove_columns=eli5["train"].column_names, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_eli5 = eli5.<span class="hljs-built_in">map</span>( <span class="hljs-meta">... </span> preprocess_function, <span class="hljs-meta">... </span> batched=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> num_proc=<span class="hljs-number">4</span>, <span class="hljs-meta">... </span> remove_columns=eli5[<span class="hljs-string">&quot;train&quot;</span>].column_names, <span class="hljs-meta">... </span>)`}}),ft=new ae({props:{code:`block_size = 128 def group_texts(examples): concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) total_length = (total_length // block_size) * block_size result = { k: [t[i : i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items() } result["labels"] = result["input_ids"].copy() return result`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>block_size = <span class="hljs-number">128</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">group_texts</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> concatenated_examples = {k: <span class="hljs-built_in">sum</span>(examples[k], []) <span class="hljs-keyword">for</span> k <span class="hljs-keyword">in</span> examples.keys()} <span class="hljs-meta">... </span> total_length = <span class="hljs-built_in">len</span>(concatenated_examples[<span class="hljs-built_in">list</span>(examples.keys())[<span class="hljs-number">0</span>]]) <span class="hljs-meta">... </span> total_length = (total_length // block_size) * block_size <span class="hljs-meta">... </span> result = { <span class="hljs-meta">... </span> k: [t[i : i + block_size] <span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(<span class="hljs-number">0</span>, total_length, block_size)] <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> k, t <span class="hljs-keyword">in</span> concatenated_examples.items() <span class="hljs-meta">... </span> } <span class="hljs-meta">... </span> result[<span class="hljs-string">&quot;labels&quot;</span>] = result[<span class="hljs-string">&quot;input_ids&quot;</span>].copy() <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> result`}}),mt=new ae({props:{code:"lm_dataset = tokenized_eli5.map(group_texts, batched=True, num_proc=4)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>lm_dataset = tokenized_eli5.<span class="hljs-built_in">map</span>(group_texts, batched=<span class="hljs-literal">True</span>, num_proc=<span class="hljs-number">4</span>)'}}),Ne=new ml({props:{pytorch:!0,tensorflow:!0,jax:!1,$$slots:{tensorflow:[jn],pytorch:[kn]},$$scope:{ctx:Q}}}),ht=new kt({}),ct=new kt({}),He=new ml({props:{pytorch:!0,tensorflow:!0,jax:!1,$$slots:{tensorflow:[Tn],pytorch:[bn]},$$scope:{ctx:Q}}}),dt=new kt({}),_t=new kt({}),Ke=new ml({props:{pytorch:!0,tensorflow:!0,jax:!1,$$slots:{tensorflow:[zn],pytorch:[Fn]},$$scope:{ctx:Q}}}),Qe=new Tt({props:{$$slots:{default:[Cn]},$$scope:{ctx:Q}}}),{c(){s=i("meta"),w=c(),o=i("h1"),_=i("a"),b=i("span"),q(k.$$.fragment),v=c(),F=i("span"),j=l("Language modeling"),x=c(),M=i("p"),I=l("Language modeling predicts words in a sentence. There are two forms of language modeling."),G=c(),q(A.$$.fragment),N=c(),J=i("p"),O=l("Causal language modeling predicts the next token in a sequence of tokens, and the model can only attend to tokens on the left."),S=c(),q(R.$$.fragment),$=c(),y=i("p"),Y=l("Masked language modeling predicts a masked token in a sequence, and the model can attend to tokens bidirectionally."),B=c(),W=i("p"),K=l("This guide will show you how to fine-tune "),X=i("a"),H=l("DistilGPT2"),fe=l(" for causal language modeling and "),U=i("a"),oe=l("DistilRoBERTa"),he=l(" for masked language modeling on the "),re=i("a"),Z=l("r/askscience"),ee=l(" subset of the "),h=i("a"),T=l("ELI5"),ie=l(" dataset."),ne=c(),q(V.$$.fragment),me=c(),pe=i("h2"),te=i("a"),se=i("span"),q(le.$$.fragment),r=c(),E=i("span"),ce=l("Load ELI5 dataset"),ve=c(),we=i("p"),_e=l("Load only the first 5000 rows of the ELI5 dataset from the \u{1F917} Datasets library since it is pretty large:"),be=c(),q(ue.$$.fragment),xe=c(),je=i("p"),$e=l("Split this dataset into a train and test set:"),Ee=c(),q(ke.$$.fragment),ra=c(),wt=i("p"),Za=l("Then take a look at an example:"),oa=c(),q(Xe.$$.fragment),ia=c(),ye=i("p"),es=l("Notice "),Mt=i("code"),ts=l("text"),as=l(" is a subfield nested inside the "),Ft=i("code"),ss=l("answers"),ls=l(" dictionary. When you preprocess the dataset, you will need to extract the "),qt=i("code"),ns=l("text"),rs=l(" subfield into a separate column."),pa=c(),De=i("h2"),Ie=i("a"),Dt=i("span"),q(Ze.$$.fragment),os=c(),zt=i("span"),is=l("Preprocess"),fa=c(),q(et.$$.fragment),ma=c(),Oe=i("p"),ps=l("For causal language modeling, load the DistilGPT2 tokenizer to process the "),Ct=i("code"),fs=l("text"),ms=l(" subfield:"),ha=c(),q(tt.$$.fragment),ua=c(),q(at.$$.fragment),ca=c(),jt=i("p"),hs=l("For masked language modeling, load the DistilRoBERTa tokenizer instead:"),da=c(),q(st.$$.fragment),ga=c(),Te=i("p"),us=l("Extract the "),Lt=i("code"),cs=l("text"),ds=l(" subfield from its nested structure with the "),lt=i("a"),Pt=i("code"),gs=l("flatten"),_s=l(" method:"),_a=c(),q(nt.$$.fragment),$a=c(),Ae=i("p"),$s=l("Each subfield is now a separate column as indicated by the "),It=i("code"),ks=l("answers"),ws=l(" prefix. Notice that "),Ot=i("code"),js=l("answers.text"),ys=l(" is a list. Instead of tokenizing each sentence separately, convert the list to a string to jointly tokenize them."),ka=c(),yt=i("p"),vs=l("Here is how you can create a preprocessing function to convert the list to a string and truncate sequences to be no longer than DistilGPT2\u2019s maximum input length:"),wa=c(),q(rt.$$.fragment),ja=c(),de=i("p"),bs=l("Use \u{1F917} Datasets "),ot=i("a"),xs=l("map"),Es=l(" function to apply the preprocessing function over the entire dataset. You can speed up the "),Rt=i("code"),Ts=l("map"),As=l(" function by setting "),St=i("code"),Ms=l("batched=True"),Fs=l(" to process multiple elements of the dataset at once and increasing the number of processes with "),Nt=i("code"),qs=l("num_proc"),Ds=l(". Remove the columns you don\u2019t need:"),ya=c(),q(it.$$.fragment),va=c(),vt=i("p"),zs=l("Now you need a second preprocessing function to capture text truncated from any lengthy examples to prevent loss of information. This preprocessing function should:"),ba=c(),Re=i("ul"),Gt=i("li"),Cs=l("Concatenate all the text."),Ls=c(),pt=i("li"),Ps=l("Split the concatenated text into smaller chunks defined by "),Bt=i("code"),Is=l("block_size"),Os=l("."),xa=c(),q(ft.$$.fragment),Ea=c(),Se=i("p"),Rs=l("Apply the "),Wt=i("code"),Ss=l("group_texts"),Ns=l(" function over the entire dataset:"),Ta=c(),q(mt.$$.fragment),Aa=c(),ge=i("p"),Gs=l("For causal language modeling, use "),bt=i("a"),Bs=l("DataCollatorForLanguageModeling"),Ws=l(" to create a batch of examples. It will also "),Ht=i("em"),Hs=l("dynamically pad"),Ys=l(" your text to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the "),Yt=i("code"),Us=l("tokenizer"),Js=l(" function by setting "),Ut=i("code"),Ks=l("padding=True"),Qs=l(", dynamic padding is more efficient."),Ma=c(),q(Ne.$$.fragment),Fa=c(),ze=i("h2"),Ge=i("a"),Jt=i("span"),q(ht.$$.fragment),Vs=c(),Kt=i("span"),Xs=l("Causal language modeling"),qa=c(),Be=i("p"),Zs=l("Causal language modeling is frequently used for text generation. This section shows you how to fine-tune "),ut=i("a"),el=l("DistilGPT2"),tl=l(" to generate new text."),Da=c(),Ce=i("h3"),We=i("a"),Qt=i("span"),q(ct.$$.fragment),al=c(),Vt=i("span"),sl=l("Train"),za=c(),q(He.$$.fragment),Ca=c(),Le=i("h2"),Ye=i("a"),Xt=i("span"),q(dt.$$.fragment),ll=c(),Zt=i("span"),nl=l("Masked language modeling"),La=c(),Ue=i("p"),rl=l("Masked language modeling is also known as a fill-mask task because it predicts a masked token in a sequence. Models for masked language modeling require a good contextual understanding of an entire sequence instead of only the left context. This section shows you how to fine-tune "),gt=i("a"),ol=l("DistilRoBERTa"),il=l(" to predict a masked word."),Pa=c(),Pe=i("h3"),Je=i("a"),ea=i("span"),q(_t.$$.fragment),pl=c(),ta=i("span"),fl=l("Train"),Ia=c(),q(Ke.$$.fragment),Oa=c(),q(Qe.$$.fragment),this.h()},l(e){const u=dn('[data-svelte="svelte-1phssyn"]',document.head);s=p(u,"META",{name:!0,content:!0}),u.forEach(a),w=d(e),o=p(e,"H1",{class:!0});var $t=f(o);_=p($t,"A",{id:!0,class:!0,href:!0});var aa=f(_);b=p(aa,"SPAN",{});var sa=f(b);D(k.$$.fragment,sa),sa.forEach(a),aa.forEach(a),v=d($t),F=p($t,"SPAN",{});var la=f(F);j=n(la,"Language modeling"),la.forEach(a),$t.forEach(a),x=d(e),M=p(e,"P",{});var na=f(M);I=n(na,"Language modeling predicts words in a sentence. There are two forms of language modeling."),na.forEach(a),G=d(e),D(A.$$.fragment,e),N=d(e),J=p(e,"P",{});var hl=f(J);O=n(hl,"Causal language modeling predicts the next token in a sequence of tokens, and the model can only attend to tokens on the left."),hl.forEach(a),S=d(e),D(R.$$.fragment,e),$=d(e),y=p(e,"P",{});var ul=f(y);Y=n(ul,"Masked language modeling predicts a masked token in a sequence, and the model can attend to tokens bidirectionally."),ul.forEach(a),B=d(e),W=p(e,"P",{});var Me=f(W);K=n(Me,"This guide will show you how to fine-tune "),X=p(Me,"A",{href:!0,rel:!0});var cl=f(X);H=n(cl,"DistilGPT2"),cl.forEach(a),fe=n(Me," for causal language modeling and "),U=p(Me,"A",{href:!0,rel:!0});var dl=f(U);oe=n(dl,"DistilRoBERTa"),dl.forEach(a),he=n(Me," for masked language modeling on the "),re=p(Me,"A",{href:!0,rel:!0});var gl=f(re);Z=n(gl,"r/askscience"),gl.forEach(a),ee=n(Me," subset of the "),h=p(Me,"A",{href:!0,rel:!0});var _l=f(h);T=n(_l,"ELI5"),_l.forEach(a),ie=n(Me," dataset."),Me.forEach(a),ne=d(e),D(V.$$.fragment,e),me=d(e),pe=p(e,"H2",{class:!0});var Sa=f(pe);te=p(Sa,"A",{id:!0,class:!0,href:!0});var $l=f(te);se=p($l,"SPAN",{});var kl=f(se);D(le.$$.fragment,kl),kl.forEach(a),$l.forEach(a),r=d(Sa),E=p(Sa,"SPAN",{});var wl=f(E);ce=n(wl,"Load ELI5 dataset"),wl.forEach(a),Sa.forEach(a),ve=d(e),we=p(e,"P",{});var jl=f(we);_e=n(jl,"Load only the first 5000 rows of the ELI5 dataset from the \u{1F917} Datasets library since it is pretty large:"),jl.forEach(a),be=d(e),D(ue.$$.fragment,e),xe=d(e),je=p(e,"P",{});var yl=f(je);$e=n(yl,"Split this dataset into a train and test set:"),yl.forEach(a),Ee=d(e),D(ke.$$.fragment,e),ra=d(e),wt=p(e,"P",{});var vl=f(wt);Za=n(vl,"Then take a look at an example:"),vl.forEach(a),oa=d(e),D(Xe.$$.fragment,e),ia=d(e),ye=p(e,"P",{});var Ve=f(ye);es=n(Ve,"Notice "),Mt=p(Ve,"CODE",{});var bl=f(Mt);ts=n(bl,"text"),bl.forEach(a),as=n(Ve," is a subfield nested inside the "),Ft=p(Ve,"CODE",{});var xl=f(Ft);ss=n(xl,"answers"),xl.forEach(a),ls=n(Ve," dictionary. When you preprocess the dataset, you will need to extract the "),qt=p(Ve,"CODE",{});var El=f(qt);ns=n(El,"text"),El.forEach(a),rs=n(Ve," subfield into a separate column."),Ve.forEach(a),pa=d(e),De=p(e,"H2",{class:!0});var Na=f(De);Ie=p(Na,"A",{id:!0,class:!0,href:!0});var Tl=f(Ie);Dt=p(Tl,"SPAN",{});var Al=f(Dt);D(Ze.$$.fragment,Al),Al.forEach(a),Tl.forEach(a),os=d(Na),zt=p(Na,"SPAN",{});var Ml=f(zt);is=n(Ml,"Preprocess"),Ml.forEach(a),Na.forEach(a),fa=d(e),D(et.$$.fragment,e),ma=d(e),Oe=p(e,"P",{});var Ga=f(Oe);ps=n(Ga,"For causal language modeling, load the DistilGPT2 tokenizer to process the "),Ct=p(Ga,"CODE",{});var Fl=f(Ct);fs=n(Fl,"text"),Fl.forEach(a),ms=n(Ga," subfield:"),Ga.forEach(a),ha=d(e),D(tt.$$.fragment,e),ua=d(e),D(at.$$.fragment,e),ca=d(e),jt=p(e,"P",{});var ql=f(jt);hs=n(ql,"For masked language modeling, load the DistilRoBERTa tokenizer instead:"),ql.forEach(a),da=d(e),D(st.$$.fragment,e),ga=d(e),Te=p(e,"P",{});var xt=f(Te);us=n(xt,"Extract the "),Lt=p(xt,"CODE",{});var Dl=f(Lt);cs=n(Dl,"text"),Dl.forEach(a),ds=n(xt," subfield from its nested structure with the "),lt=p(xt,"A",{href:!0,rel:!0});var zl=f(lt);Pt=p(zl,"CODE",{});var Cl=f(Pt);gs=n(Cl,"flatten"),Cl.forEach(a),zl.forEach(a),_s=n(xt," method:"),xt.forEach(a),_a=d(e),D(nt.$$.fragment,e),$a=d(e),Ae=p(e,"P",{});var Et=f(Ae);$s=n(Et,"Each subfield is now a separate column as indicated by the "),It=p(Et,"CODE",{});var Ll=f(It);ks=n(Ll,"answers"),Ll.forEach(a),ws=n(Et," prefix. Notice that "),Ot=p(Et,"CODE",{});var Pl=f(Ot);js=n(Pl,"answers.text"),Pl.forEach(a),ys=n(Et," is a list. Instead of tokenizing each sentence separately, convert the list to a string to jointly tokenize them."),Et.forEach(a),ka=d(e),yt=p(e,"P",{});var Il=f(yt);vs=n(Il,"Here is how you can create a preprocessing function to convert the list to a string and truncate sequences to be no longer than DistilGPT2\u2019s maximum input length:"),Il.forEach(a),wa=d(e),D(rt.$$.fragment,e),ja=d(e),de=p(e,"P",{});var Fe=f(de);bs=n(Fe,"Use \u{1F917} Datasets "),ot=p(Fe,"A",{href:!0,rel:!0});var Ol=f(ot);xs=n(Ol,"map"),Ol.forEach(a),Es=n(Fe," function to apply the preprocessing function over the entire dataset. You can speed up the "),Rt=p(Fe,"CODE",{});var Rl=f(Rt);Ts=n(Rl,"map"),Rl.forEach(a),As=n(Fe," function by setting "),St=p(Fe,"CODE",{});var Sl=f(St);Ms=n(Sl,"batched=True"),Sl.forEach(a),Fs=n(Fe," to process multiple elements of the dataset at once and increasing the number of processes with "),Nt=p(Fe,"CODE",{});var Nl=f(Nt);qs=n(Nl,"num_proc"),Nl.forEach(a),Ds=n(Fe,". Remove the columns you don\u2019t need:"),Fe.forEach(a),ya=d(e),D(it.$$.fragment,e),va=d(e),vt=p(e,"P",{});var Gl=f(vt);zs=n(Gl,"Now you need a second preprocessing function to capture text truncated from any lengthy examples to prevent loss of information. This preprocessing function should:"),Gl.forEach(a),ba=d(e),Re=p(e,"UL",{});var Ba=f(Re);Gt=p(Ba,"LI",{});var Bl=f(Gt);Cs=n(Bl,"Concatenate all the text."),Bl.forEach(a),Ls=d(Ba),pt=p(Ba,"LI",{});var Wa=f(pt);Ps=n(Wa,"Split the concatenated text into smaller chunks defined by "),Bt=p(Wa,"CODE",{});var Wl=f(Bt);Is=n(Wl,"block_size"),Wl.forEach(a),Os=n(Wa,"."),Wa.forEach(a),Ba.forEach(a),xa=d(e),D(ft.$$.fragment,e),Ea=d(e),Se=p(e,"P",{});var Ha=f(Se);Rs=n(Ha,"Apply the "),Wt=p(Ha,"CODE",{});var Hl=f(Wt);Ss=n(Hl,"group_texts"),Hl.forEach(a),Ns=n(Ha," function over the entire dataset:"),Ha.forEach(a),Ta=d(e),D(mt.$$.fragment,e),Aa=d(e),ge=p(e,"P",{});var qe=f(ge);Gs=n(qe,"For causal language modeling, use "),bt=p(qe,"A",{href:!0});var Yl=f(bt);Bs=n(Yl,"DataCollatorForLanguageModeling"),Yl.forEach(a),Ws=n(qe," to create a batch of examples. It will also "),Ht=p(qe,"EM",{});var Ul=f(Ht);Hs=n(Ul,"dynamically pad"),Ul.forEach(a),Ys=n(qe," your text to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the "),Yt=p(qe,"CODE",{});var Jl=f(Yt);Us=n(Jl,"tokenizer"),Jl.forEach(a),Js=n(qe," function by setting "),Ut=p(qe,"CODE",{});var Kl=f(Ut);Ks=n(Kl,"padding=True"),Kl.forEach(a),Qs=n(qe,", dynamic padding is more efficient."),qe.forEach(a),Ma=d(e),D(Ne.$$.fragment,e),Fa=d(e),ze=p(e,"H2",{class:!0});var Ya=f(ze);Ge=p(Ya,"A",{id:!0,class:!0,href:!0});var Ql=f(Ge);Jt=p(Ql,"SPAN",{});var Vl=f(Jt);D(ht.$$.fragment,Vl),Vl.forEach(a),Ql.forEach(a),Vs=d(Ya),Kt=p(Ya,"SPAN",{});var Xl=f(Kt);Xs=n(Xl,"Causal language modeling"),Xl.forEach(a),Ya.forEach(a),qa=d(e),Be=p(e,"P",{});var Ua=f(Be);Zs=n(Ua,"Causal language modeling is frequently used for text generation. This section shows you how to fine-tune "),ut=p(Ua,"A",{href:!0,rel:!0});var Zl=f(ut);el=n(Zl,"DistilGPT2"),Zl.forEach(a),tl=n(Ua," to generate new text."),Ua.forEach(a),Da=d(e),Ce=p(e,"H3",{class:!0});var Ja=f(Ce);We=p(Ja,"A",{id:!0,class:!0,href:!0});var en=f(We);Qt=p(en,"SPAN",{});var tn=f(Qt);D(ct.$$.fragment,tn),tn.forEach(a),en.forEach(a),al=d(Ja),Vt=p(Ja,"SPAN",{});var an=f(Vt);sl=n(an,"Train"),an.forEach(a),Ja.forEach(a),za=d(e),D(He.$$.fragment,e),Ca=d(e),Le=p(e,"H2",{class:!0});var Ka=f(Le);Ye=p(Ka,"A",{id:!0,class:!0,href:!0});var sn=f(Ye);Xt=p(sn,"SPAN",{});var ln=f(Xt);D(dt.$$.fragment,ln),ln.forEach(a),sn.forEach(a),ll=d(Ka),Zt=p(Ka,"SPAN",{});var nn=f(Zt);nl=n(nn,"Masked language modeling"),nn.forEach(a),Ka.forEach(a),La=d(e),Ue=p(e,"P",{});var Qa=f(Ue);rl=n(Qa,"Masked language modeling is also known as a fill-mask task because it predicts a masked token in a sequence. Models for masked language modeling require a good contextual understanding of an entire sequence instead of only the left context. This section shows you how to fine-tune "),gt=p(Qa,"A",{href:!0,rel:!0});var rn=f(gt);ol=n(rn,"DistilRoBERTa"),rn.forEach(a),il=n(Qa," to predict a masked word."),Qa.forEach(a),Pa=d(e),Pe=p(e,"H3",{class:!0});var Va=f(Pe);Je=p(Va,"A",{id:!0,class:!0,href:!0});var on=f(Je);ea=p(on,"SPAN",{});var pn=f(ea);D(_t.$$.fragment,pn),pn.forEach(a),on.forEach(a),pl=d(Va),ta=p(Va,"SPAN",{});var fn=f(ta);fl=n(fn,"Train"),fn.forEach(a),Va.forEach(a),Ia=d(e),D(Ke.$$.fragment,e),Oa=d(e),D(Qe.$$.fragment,e),this.h()},h(){g(s,"name","hf:doc:metadata"),g(s,"content",JSON.stringify(Pn)),g(_,"id","language-modeling"),g(_,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),g(_,"href","#language-modeling"),g(o,"class","relative group"),g(X,"href","https://huggingface.co/distilgpt2"),g(X,"rel","nofollow"),g(U,"href","https://huggingface.co/distilroberta-base"),g(U,"rel","nofollow"),g(re,"href","https://www.reddit.com/r/askscience/"),g(re,"rel","nofollow"),g(h,"href","https://huggingface.co/datasets/eli5"),g(h,"rel","nofollow"),g(te,"id","load-eli5-dataset"),g(te,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),g(te,"href","#load-eli5-dataset"),g(pe,"class","relative group"),g(Ie,"id","preprocess"),g(Ie,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),g(Ie,"href","#preprocess"),g(De,"class","relative group"),g(lt,"href","https://huggingface.co/docs/datasets/process.html#flatten"),g(lt,"rel","nofollow"),g(ot,"href","https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.map"),g(ot,"rel","nofollow"),g(bt,"href","/docs/transformers/pr_19429/en/main_classes/data_collator#transformers.DataCollatorForLanguageModeling"),g(Ge,"id","causal-language-modeling"),g(Ge,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),g(Ge,"href","#causal-language-modeling"),g(ze,"class","relative group"),g(ut,"href","https://huggingface.co/distilgpt2"),g(ut,"rel","nofollow"),g(We,"id","train"),g(We,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),g(We,"href","#train"),g(Ce,"class","relative group"),g(Ye,"id","masked-language-modeling"),g(Ye,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),g(Ye,"href","#masked-language-modeling"),g(Le,"class","relative group"),g(gt,"href","https://huggingface.co/distilroberta-base"),g(gt,"rel","nofollow"),g(Je,"id","train"),g(Je,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),g(Je,"href","#train"),g(Pe,"class","relative group")},m(e,u){t(document.head,s),m(e,w,u),m(e,o,u),t(o,_),t(_,b),z(k,b,null),t(o,v),t(o,F),t(F,j),m(e,x,u),m(e,M,u),t(M,I),m(e,G,u),z(A,e,u),m(e,N,u),m(e,J,u),t(J,O),m(e,S,u),z(R,e,u),m(e,$,u),m(e,y,u),t(y,Y),m(e,B,u),m(e,W,u),t(W,K),t(W,X),t(X,H),t(W,fe),t(W,U),t(U,oe),t(W,he),t(W,re),t(re,Z),t(W,ee),t(W,h),t(h,T),t(W,ie),m(e,ne,u),z(V,e,u),m(e,me,u),m(e,pe,u),t(pe,te),t(te,se),z(le,se,null),t(pe,r),t(pe,E),t(E,ce),m(e,ve,u),m(e,we,u),t(we,_e),m(e,be,u),z(ue,e,u),m(e,xe,u),m(e,je,u),t(je,$e),m(e,Ee,u),z(ke,e,u),m(e,ra,u),m(e,wt,u),t(wt,Za),m(e,oa,u),z(Xe,e,u),m(e,ia,u),m(e,ye,u),t(ye,es),t(ye,Mt),t(Mt,ts),t(ye,as),t(ye,Ft),t(Ft,ss),t(ye,ls),t(ye,qt),t(qt,ns),t(ye,rs),m(e,pa,u),m(e,De,u),t(De,Ie),t(Ie,Dt),z(Ze,Dt,null),t(De,os),t(De,zt),t(zt,is),m(e,fa,u),z(et,e,u),m(e,ma,u),m(e,Oe,u),t(Oe,ps),t(Oe,Ct),t(Ct,fs),t(Oe,ms),m(e,ha,u),z(tt,e,u),m(e,ua,u),z(at,e,u),m(e,ca,u),m(e,jt,u),t(jt,hs),m(e,da,u),z(st,e,u),m(e,ga,u),m(e,Te,u),t(Te,us),t(Te,Lt),t(Lt,cs),t(Te,ds),t(Te,lt),t(lt,Pt),t(Pt,gs),t(Te,_s),m(e,_a,u),z(nt,e,u),m(e,$a,u),m(e,Ae,u),t(Ae,$s),t(Ae,It),t(It,ks),t(Ae,ws),t(Ae,Ot),t(Ot,js),t(Ae,ys),m(e,ka,u),m(e,yt,u),t(yt,vs),m(e,wa,u),z(rt,e,u),m(e,ja,u),m(e,de,u),t(de,bs),t(de,ot),t(ot,xs),t(de,Es),t(de,Rt),t(Rt,Ts),t(de,As),t(de,St),t(St,Ms),t(de,Fs),t(de,Nt),t(Nt,qs),t(de,Ds),m(e,ya,u),z(it,e,u),m(e,va,u),m(e,vt,u),t(vt,zs),m(e,ba,u),m(e,Re,u),t(Re,Gt),t(Gt,Cs),t(Re,Ls),t(Re,pt),t(pt,Ps),t(pt,Bt),t(Bt,Is),t(pt,Os),m(e,xa,u),z(ft,e,u),m(e,Ea,u),m(e,Se,u),t(Se,Rs),t(Se,Wt),t(Wt,Ss),t(Se,Ns),m(e,Ta,u),z(mt,e,u),m(e,Aa,u),m(e,ge,u),t(ge,Gs),t(ge,bt),t(bt,Bs),t(ge,Ws),t(ge,Ht),t(Ht,Hs),t(ge,Ys),t(ge,Yt),t(Yt,Us),t(ge,Js),t(ge,Ut),t(Ut,Ks),t(ge,Qs),m(e,Ma,u),z(Ne,e,u),m(e,Fa,u),m(e,ze,u),t(ze,Ge),t(Ge,Jt),z(ht,Jt,null),t(ze,Vs),t(ze,Kt),t(Kt,Xs),m(e,qa,u),m(e,Be,u),t(Be,Zs),t(Be,ut),t(ut,el),t(Be,tl),m(e,Da,u),m(e,Ce,u),t(Ce,We),t(We,Qt),z(ct,Qt,null),t(Ce,al),t(Ce,Vt),t(Vt,sl),m(e,za,u),z(He,e,u),m(e,Ca,u),m(e,Le,u),t(Le,Ye),t(Ye,Xt),z(dt,Xt,null),t(Le,ll),t(Le,Zt),t(Zt,nl),m(e,La,u),m(e,Ue,u),t(Ue,rl),t(Ue,gt),t(gt,ol),t(Ue,il),m(e,Pa,u),m(e,Pe,u),t(Pe,Je),t(Je,ea),z(_t,ea,null),t(Pe,pl),t(Pe,ta),t(ta,fl),m(e,Ia,u),z(Ke,e,u),m(e,Oa,u),z(Qe,e,u),Ra=!0},p(e,[u]){const $t={};u&2&&($t.$$scope={dirty:u,ctx:e}),V.$set($t);const aa={};u&2&&(aa.$$scope={dirty:u,ctx:e}),Ne.$set(aa);const sa={};u&2&&(sa.$$scope={dirty:u,ctx:e}),He.$set(sa);const la={};u&2&&(la.$$scope={dirty:u,ctx:e}),Ke.$set(la);const na={};u&2&&(na.$$scope={dirty:u,ctx:e}),Qe.$set(na)},i(e){Ra||(C(k.$$.fragment,e),C(A.$$.fragment,e),C(R.$$.fragment,e),C(V.$$.fragment,e),C(le.$$.fragment,e),C(ue.$$.fragment,e),C(ke.$$.fragment,e),C(Xe.$$.fragment,e),C(Ze.$$.fragment,e),C(et.$$.fragment,e),C(tt.$$.fragment,e),C(at.$$.fragment,e),C(st.$$.fragment,e),C(nt.$$.fragment,e),C(rt.$$.fragment,e),C(it.$$.fragment,e),C(ft.$$.fragment,e),C(mt.$$.fragment,e),C(Ne.$$.fragment,e),C(ht.$$.fragment,e),C(ct.$$.fragment,e),C(He.$$.fragment,e),C(dt.$$.fragment,e),C(_t.$$.fragment,e),C(Ke.$$.fragment,e),C(Qe.$$.fragment,e),Ra=!0)},o(e){L(k.$$.fragment,e),L(A.$$.fragment,e),L(R.$$.fragment,e),L(V.$$.fragment,e),L(le.$$.fragment,e),L(ue.$$.fragment,e),L(ke.$$.fragment,e),L(Xe.$$.fragment,e),L(Ze.$$.fragment,e),L(et.$$.fragment,e),L(tt.$$.fragment,e),L(at.$$.fragment,e),L(st.$$.fragment,e),L(nt.$$.fragment,e),L(rt.$$.fragment,e),L(it.$$.fragment,e),L(ft.$$.fragment,e),L(mt.$$.fragment,e),L(Ne.$$.fragment,e),L(ht.$$.fragment,e),L(ct.$$.fragment,e),L(He.$$.fragment,e),L(dt.$$.fragment,e),L(_t.$$.fragment,e),L(Ke.$$.fragment,e),L(Qe.$$.fragment,e),Ra=!1},d(e){a(s),e&&a(w),e&&a(o),P(k),e&&a(x),e&&a(M),e&&a(G),P(A,e),e&&a(N),e&&a(J),e&&a(S),P(R,e),e&&a($),e&&a(y),e&&a(B),e&&a(W),e&&a(ne),P(V,e),e&&a(me),e&&a(pe),P(le),e&&a(ve),e&&a(we),e&&a(be),P(ue,e),e&&a(xe),e&&a(je),e&&a(Ee),P(ke,e),e&&a(ra),e&&a(wt),e&&a(oa),P(Xe,e),e&&a(ia),e&&a(ye),e&&a(pa),e&&a(De),P(Ze),e&&a(fa),P(et,e),e&&a(ma),e&&a(Oe),e&&a(ha),P(tt,e),e&&a(ua),P(at,e),e&&a(ca),e&&a(jt),e&&a(da),P(st,e),e&&a(ga),e&&a(Te),e&&a(_a),P(nt,e),e&&a($a),e&&a(Ae),e&&a(ka),e&&a(yt),e&&a(wa),P(rt,e),e&&a(ja),e&&a(de),e&&a(ya),P(it,e),e&&a(va),e&&a(vt),e&&a(ba),e&&a(Re),e&&a(xa),P(ft,e),e&&a(Ea),e&&a(Se),e&&a(Ta),P(mt,e),e&&a(Aa),e&&a(ge),e&&a(Ma),P(Ne,e),e&&a(Fa),e&&a(ze),P(ht),e&&a(qa),e&&a(Be),e&&a(Da),e&&a(Ce),P(ct),e&&a(za),P(He,e),e&&a(Ca),e&&a(Le),P(dt),e&&a(La),e&&a(Ue),e&&a(Pa),e&&a(Pe),P(_t),e&&a(Ia),P(Ke,e),e&&a(Oa),P(Qe,e)}}}const Pn={local:"language-modeling",sections:[{local:"load-eli5-dataset",title:"Load ELI5 dataset"},{local:"preprocess",title:"Preprocess"},{local:"causal-language-modeling",sections:[{local:"train",title:"Train"}],title:"Causal language modeling"},{local:"masked-language-modeling",sections:[{local:"train",title:"Train"}],title:"Masked language modeling"}],title:"Language modeling"};function In(Q){return gn(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Wn extends hn{constructor(s){super();un(this,s,In,Ln,cn,{})}}export{Wn as default,Pn as metadata};
30
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/tasks/summarization.mdx-hf-doc-builder.js
import{S as ns,i as is,s as ls,e as l,k as g,w as j,t as o,M as ps,c as p,d as a,m as _,a as c,x as S,h as n,b,G as t,g as f,y as E,q as T,o as x,B as z,v as cs,L as os}from"../../chunks/vendor-hf-doc-builder.js";import{T as It}from"../../chunks/Tip-hf-doc-builder.js";import{Y as fs}from"../../chunks/Youtube-hf-doc-builder.js";import{I as Mt}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{C as ee}from"../../chunks/CodeBlock-hf-doc-builder.js";import{F as rs,M as Ot}from"../../chunks/Markdown-hf-doc-builder.js";function ms(F){let s,u,r,d,w;return{c(){s=l("p"),u=o("See the summarization "),r=l("a"),d=o("task page"),w=o(" for more information about its associated models, datasets, and metrics."),this.h()},l($){s=p($,"P",{});var v=c(s);u=n(v,"See the summarization "),r=p(v,"A",{href:!0,rel:!0});var C=c(r);d=n(C,"task page"),C.forEach(a),w=n(v," for more information about its associated models, datasets, and metrics."),v.forEach(a),this.h()},h(){b(r,"href","https://huggingface.co/tasks/summarization"),b(r,"rel","nofollow")},m($,v){f($,s,v),t(s,u),t(s,r),t(r,d),t(s,w)},d($){$&&a(s)}}}function hs(F){let s,u;return s=new ee({props:{code:`from transformers import DataCollatorForSeq2Seq data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForSeq2Seq <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model)`}}),{c(){j(s.$$.fragment)},l(r){S(s.$$.fragment,r)},m(r,d){E(s,r,d),u=!0},p:os,i(r){u||(T(s.$$.fragment,r),u=!0)},o(r){x(s.$$.fragment,r),u=!1},d(r){z(s,r)}}}function us(F){let s,u;return s=new Ot({props:{$$slots:{default:[hs]},$$scope:{ctx:F}}}),{c(){j(s.$$.fragment)},l(r){S(s.$$.fragment,r)},m(r,d){E(s,r,d),u=!0},p(r,d){const w={};d&2&&(w.$$scope={dirty:d,ctx:r}),s.$set(w)},i(r){u||(T(s.$$.fragment,r),u=!0)},o(r){x(s.$$.fragment,r),u=!1},d(r){z(s,r)}}}function ds(F){let s,u;return s=new ee({props:{code:`from transformers import DataCollatorForSeq2Seq data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model, return_tensors="tf")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForSeq2Seq <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)`}}),{c(){j(s.$$.fragment)},l(r){S(s.$$.fragment,r)},m(r,d){E(s,r,d),u=!0},p:os,i(r){u||(T(s.$$.fragment,r),u=!0)},o(r){x(s.$$.fragment,r),u=!1},d(r){z(s,r)}}}function gs(F){let s,u;return s=new Ot({props:{$$slots:{default:[ds]},$$scope:{ctx:F}}}),{c(){j(s.$$.fragment)},l(r){S(s.$$.fragment,r)},m(r,d){E(s,r,d),u=!0},p(r,d){const w={};d&2&&(w.$$scope={dirty:d,ctx:r}),s.$set(w)},i(r){u||(T(s.$$.fragment,r),u=!0)},o(r){x(s.$$.fragment,r),u=!1},d(r){z(s,r)}}}function _s(F){let s,u,r,d,w,$,v,C;return{c(){s=l("p"),u=o("If you aren\u2019t familiar with fine-tuning a model with the "),r=l("a"),d=o("Trainer"),w=o(", take a look at the basic tutorial "),$=l("a"),v=o("here"),C=o("!"),this.h()},l(A){s=p(A,"P",{});var k=c(s);u=n(k,"If you aren\u2019t familiar with fine-tuning a model with the "),r=p(k,"A",{href:!0});var P=c(r);d=n(P,"Trainer"),P.forEach(a),w=n(k,", take a look at the basic tutorial "),$=p(k,"A",{href:!0});var L=c($);v=n(L,"here"),L.forEach(a),C=n(k,"!"),k.forEach(a),this.h()},h(){b(r,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),b($,"href","../training#finetune-with-trainer")},m(A,k){f(A,s,k),t(s,u),t(s,r),t(r,d),t(s,w),t(s,$),t($,v),t(s,C)},d(A){A&&a(s)}}}function $s(F){let s,u,r,d,w,$,v,C,A,k,P,L,J,M,R,G,O,le,Q,oe,U,pe,I,ce,B,fe,N,Z,W,ne,me,Y,K,D;return v=new ee({props:{code:`from transformers import AutoModelForSeq2SeqLM, Seq2SeqTrainingArguments, Seq2SeqTrainer model = AutoModelForSeq2SeqLM.from_pretrained("t5-small")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSeq2SeqLM, Seq2SeqTrainingArguments, Seq2SeqTrainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>)`}}),A=new It({props:{$$slots:{default:[_s]},$$scope:{ctx:F}}}),K=new ee({props:{code:`training_args = Seq2SeqTrainingArguments( output_dir="./results", evaluation_strategy="epoch", learning_rate=2e-5, per_device_train_batch_size=16, per_device_eval_batch_size=16, weight_decay=0.01, save_total_limit=3, num_train_epochs=1, fp16=True, ) trainer = Seq2SeqTrainer( model=model, args=training_args, train_dataset=tokenized_billsum["train"], eval_dataset=tokenized_billsum["test"], tokenizer=tokenizer, data_collator=data_collator, ) trainer.train()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>training_args = Seq2SeqTrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> per_device_eval_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span> save_total_limit=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">1</span>, <span class="hljs-meta">... </span> fp16=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Seq2SeqTrainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=tokenized_billsum[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=tokenized_billsum[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> tokenizer=tokenizer, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()`}}),{c(){s=l("p"),u=o("Load T5 with "),r=l("a"),d=o("AutoModelForSeq2SeqLM"),w=o(":"),$=g(),j(v.$$.fragment),C=g(),j(A.$$.fragment),k=g(),P=l("p"),L=o("At this point, only three steps remain:"),J=g(),M=l("ol"),R=l("li"),G=o("Define your training hyperparameters in "),O=l("a"),le=o("Seq2SeqTrainingArguments"),Q=o("."),oe=g(),U=l("li"),pe=o("Pass the training arguments to "),I=l("a"),ce=o("Seq2SeqTrainer"),B=o(" along with the model, dataset, tokenizer, and data collator."),fe=g(),N=l("li"),Z=o("Call "),W=l("a"),ne=o("train()"),me=o(" to fine-tune your model."),Y=g(),j(K.$$.fragment),this.h()},l(h){s=p(h,"P",{});var q=c(s);u=n(q,"Load T5 with "),r=p(q,"A",{href:!0});var X=c(r);d=n(X,"AutoModelForSeq2SeqLM"),X.forEach(a),w=n(q,":"),q.forEach(a),$=_(h),S(v.$$.fragment,h),C=_(h),S(A.$$.fragment,h),k=_(h),P=p(h,"P",{});var te=c(P);L=n(te,"At this point, only three steps remain:"),te.forEach(a),J=_(h),M=p(h,"OL",{});var H=c(M);R=p(H,"LI",{});var he=c(R);G=n(he,"Define your training hyperparameters in "),O=p(he,"A",{href:!0});var $e=c(O);le=n($e,"Seq2SeqTrainingArguments"),$e.forEach(a),Q=n(he,"."),he.forEach(a),oe=_(H),U=p(H,"LI",{});var V=c(U);pe=n(V,"Pass the training arguments to "),I=p(V,"A",{href:!0});var ae=c(I);ce=n(ae,"Seq2SeqTrainer"),ae.forEach(a),B=n(V," along with the model, dataset, tokenizer, and data collator."),V.forEach(a),fe=_(H),N=p(H,"LI",{});var se=c(N);Z=n(se,"Call "),W=p(se,"A",{href:!0});var i=c(W);ne=n(i,"train()"),i.forEach(a),me=n(se," to fine-tune your model."),se.forEach(a),H.forEach(a),Y=_(h),S(K.$$.fragment,h),this.h()},h(){b(r,"href","/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoModelForSeq2SeqLM"),b(O,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Seq2SeqTrainingArguments"),b(I,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Seq2SeqTrainer"),b(W,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.train")},m(h,q){f(h,s,q),t(s,u),t(s,r),t(r,d),t(s,w),f(h,$,q),E(v,h,q),f(h,C,q),E(A,h,q),f(h,k,q),f(h,P,q),t(P,L),f(h,J,q),f(h,M,q),t(M,R),t(R,G),t(R,O),t(O,le),t(R,Q),t(M,oe),t(M,U),t(U,pe),t(U,I),t(I,ce),t(U,B),t(M,fe),t(M,N),t(N,Z),t(N,W),t(W,ne),t(N,me),f(h,Y,q),E(K,h,q),D=!0},p(h,q){const X={};q&2&&(X.$$scope={dirty:q,ctx:h}),A.$set(X)},i(h){D||(T(v.$$.fragment,h),T(A.$$.fragment,h),T(K.$$.fragment,h),D=!0)},o(h){x(v.$$.fragment,h),x(A.$$.fragment,h),x(K.$$.fragment,h),D=!1},d(h){h&&a(s),h&&a($),z(v,h),h&&a(C),z(A,h),h&&a(k),h&&a(P),h&&a(J),h&&a(M),h&&a(Y),z(K,h)}}}function bs(F){let s,u;return s=new Ot({props:{$$slots:{default:[$s]},$$scope:{ctx:F}}}),{c(){j(s.$$.fragment)},l(r){S(s.$$.fragment,r)},m(r,d){E(s,r,d),u=!0},p(r,d){const w={};d&2&&(w.$$scope={dirty:d,ctx:r}),s.$set(w)},i(r){u||(T(s.$$.fragment,r),u=!0)},o(r){x(s.$$.fragment,r),u=!1},d(r){z(s,r)}}}function ws(F){let s,u,r,d,w;return{c(){s=l("p"),u=o("If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),r=l("a"),d=o("here"),w=o("!"),this.h()},l($){s=p($,"P",{});var v=c(s);u=n(v,"If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),r=p(v,"A",{href:!0});var C=c(r);d=n(C,"here"),C.forEach(a),w=n(v,"!"),v.forEach(a),this.h()},h(){b(r,"href","training#finetune-with-keras")},m($,v){f($,s,v),t(s,u),t(s,r),t(r,d),t(s,w)},d($){$&&a(s)}}}function vs(F){let s,u,r,d,w,$,v,C,A,k,P,L,J,M,R,G,O,le,Q,oe,U,pe,I,ce,B,fe,N,Z,W,ne,me,Y,K,D,h,q,X,te,H,he,$e,V,ae,se;return k=new ee({props:{code:`tf_train_set = model.prepare_tf_dataset( tokenized_billsum["train"], shuffle=True, batch_size=16, collate_fn=data_collator, ) tf_test_set = model.prepare_tf_dataset( tokenized_billsum["test"], shuffle=False, batch_size=16, collate_fn=data_collator, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> tokenized_billsum[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_test_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> tokenized_billsum[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)`}}),L=new It({props:{$$slots:{default:[ws]},$$scope:{ctx:F}}}),O=new ee({props:{code:`from transformers import create_optimizer, AdamWeightDecay optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer, AdamWeightDecay <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer = AdamWeightDecay(learning_rate=<span class="hljs-number">2e-5</span>, weight_decay_rate=<span class="hljs-number">0.01</span>)`}}),B=new ee({props:{code:`from transformers import TFAutoModelForSeq2SeqLM model = TFAutoModelForSeq2SeqLM.from_pretrained("t5-small")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForSeq2SeqLM <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>)`}}),D=new ee({props:{code:"model.compile(optimizer=optimizer)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)'}}),ae=new ee({props:{code:"model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=<span class="hljs-number">3</span>)'}}),{c(){s=l("p"),u=o("To fine-tune a model in TensorFlow, start by converting your datasets to the "),r=l("code"),d=o("tf.data.Dataset"),w=o(" format with "),$=l("a"),v=o("prepare_tf_dataset()"),C=o("."),A=g(),j(k.$$.fragment),P=g(),j(L.$$.fragment),J=g(),M=l("p"),R=o("Set up an optimizer function, learning rate schedule, and some training hyperparameters:"),G=g(),j(O.$$.fragment),le=g(),Q=l("p"),oe=o("Load T5 with "),U=l("a"),pe=o("TFAutoModelForSeq2SeqLM"),I=o(":"),ce=g(),j(B.$$.fragment),fe=g(),N=l("p"),Z=o("Configure the model for training with "),W=l("a"),ne=l("code"),me=o("compile"),Y=o(":"),K=g(),j(D.$$.fragment),h=g(),q=l("p"),X=o("Call "),te=l("a"),H=l("code"),he=o("fit"),$e=o(" to fine-tune the model:"),V=g(),j(ae.$$.fragment),this.h()},l(i){s=p(i,"P",{});var y=c(s);u=n(y,"To fine-tune a model in TensorFlow, start by converting your datasets to the "),r=p(y,"CODE",{});var ue=c(r);d=n(ue,"tf.data.Dataset"),ue.forEach(a),w=n(y," format with "),$=p(y,"A",{href:!0});var He=c($);v=n(He,"prepare_tf_dataset()"),He.forEach(a),C=n(y,"."),y.forEach(a),A=_(i),S(k.$$.fragment,i),P=_(i),S(L.$$.fragment,i),J=_(i),M=p(i,"P",{});var Ae=c(M);R=n(Ae,"Set up an optimizer function, learning rate schedule, and some training hyperparameters:"),Ae.forEach(a),G=_(i),S(O.$$.fragment,i),le=_(i),Q=p(i,"P",{});var de=c(Q);oe=n(de,"Load T5 with "),U=p(de,"A",{href:!0});var Ce=c(U);pe=n(Ce,"TFAutoModelForSeq2SeqLM"),Ce.forEach(a),I=n(de,":"),de.forEach(a),ce=_(i),S(B.$$.fragment,i),fe=_(i),N=p(i,"P",{});var be=c(N);Z=n(be,"Configure the model for training with "),W=p(be,"A",{href:!0,rel:!0});var Ge=c(W);ne=p(Ge,"CODE",{});var Pe=c(ne);me=n(Pe,"compile"),Pe.forEach(a),Ge.forEach(a),Y=n(be,":"),be.forEach(a),K=_(i),S(D.$$.fragment,i),h=_(i),q=p(i,"P",{});var ge=c(q);X=n(ge,"Call "),te=p(ge,"A",{href:!0,rel:!0});var Fe=c(te);H=p(Fe,"CODE",{});var ie=c(H);he=n(ie,"fit"),ie.forEach(a),Fe.forEach(a),$e=n(ge," to fine-tune the model:"),ge.forEach(a),V=_(i),S(ae.$$.fragment,i),this.h()},h(){b($,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset"),b(U,"href","/docs/transformers/pr_19429/en/model_doc/auto#transformers.TFAutoModelForSeq2SeqLM"),b(W,"href","https://keras.io/api/models/model_training_apis/#compile-method"),b(W,"rel","nofollow"),b(te,"href","https://keras.io/api/models/model_training_apis/#fit-method"),b(te,"rel","nofollow")},m(i,y){f(i,s,y),t(s,u),t(s,r),t(r,d),t(s,w),t(s,$),t($,v),t(s,C),f(i,A,y),E(k,i,y),f(i,P,y),E(L,i,y),f(i,J,y),f(i,M,y),t(M,R),f(i,G,y),E(O,i,y),f(i,le,y),f(i,Q,y),t(Q,oe),t(Q,U),t(U,pe),t(Q,I),f(i,ce,y),E(B,i,y),f(i,fe,y),f(i,N,y),t(N,Z),t(N,W),t(W,ne),t(ne,me),t(N,Y),f(i,K,y),E(D,i,y),f(i,h,y),f(i,q,y),t(q,X),t(q,te),t(te,H),t(H,he),t(q,$e),f(i,V,y),E(ae,i,y),se=!0},p(i,y){const ue={};y&2&&(ue.$$scope={dirty:y,ctx:i}),L.$set(ue)},i(i){se||(T(k.$$.fragment,i),T(L.$$.fragment,i),T(O.$$.fragment,i),T(B.$$.fragment,i),T(D.$$.fragment,i),T(ae.$$.fragment,i),se=!0)},o(i){x(k.$$.fragment,i),x(L.$$.fragment,i),x(O.$$.fragment,i),x(B.$$.fragment,i),x(D.$$.fragment,i),x(ae.$$.fragment,i),se=!1},d(i){i&&a(s),i&&a(A),z(k,i),i&&a(P),z(L,i),i&&a(J),i&&a(M),i&&a(G),z(O,i),i&&a(le),i&&a(Q),i&&a(ce),z(B,i),i&&a(fe),i&&a(N),i&&a(K),z(D,i),i&&a(h),i&&a(q),i&&a(V),z(ae,i)}}}function ys(F){let s,u;return s=new Ot({props:{$$slots:{default:[vs]},$$scope:{ctx:F}}}),{c(){j(s.$$.fragment)},l(r){S(s.$$.fragment,r)},m(r,d){E(s,r,d),u=!0},p(r,d){const w={};d&2&&(w.$$scope={dirty:d,ctx:r}),s.$set(w)},i(r){u||(T(s.$$.fragment,r),u=!0)},o(r){x(s.$$.fragment,r),u=!1},d(r){z(s,r)}}}function ks(F){let s,u,r,d,w,$,v,C;return{c(){s=l("p"),u=o(`For a more in-depth example of how to fine-tune a model for summarization, take a look at the corresponding `),r=l("a"),d=o("PyTorch notebook"),w=o(` or `),$=l("a"),v=o("TensorFlow notebook"),C=o("."),this.h()},l(A){s=p(A,"P",{});var k=c(s);u=n(k,`For a more in-depth example of how to fine-tune a model for summarization, take a look at the corresponding `),r=p(k,"A",{href:!0,rel:!0});var P=c(r);d=n(P,"PyTorch notebook"),P.forEach(a),w=n(k,` or `),$=p(k,"A",{href:!0,rel:!0});var L=c($);v=n(L,"TensorFlow notebook"),L.forEach(a),C=n(k,"."),k.forEach(a),this.h()},h(){b(r,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/summarization.ipynb"),b(r,"rel","nofollow"),b($,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/summarization-tf.ipynb"),b($,"rel","nofollow")},m(A,k){f(A,s,k),t(s,u),t(s,r),t(r,d),t(s,w),t(s,$),t($,v),t(s,C)},d(A){A&&a(s)}}}function qs(F){let s,u,r,d,w,$,v,C,A,k,P,L,J,M,R,G,O,le,Q,oe,U,pe,I,ce,B,fe,N,Z,W,ne,me,Y,K,D,h,q,X,te,H,he,$e,V,ae,se,i,y,ue,He,Ae,de,Ce,be,Ge,Pe,ge,Fe,ie,Nt,Ve,Bt,Ut,Ze,Wt,Ht,_t,ke,je,et,Le,Gt,tt,Yt,$t,we,Kt,at,Xt,Jt,st,Rt,Qt,bt,De,wt,Ye,Vt,vt,ve,rt,Zt,ea,Me,ta,ot,aa,sa,ra,Ie,oa,nt,na,ia,yt,Oe,kt,_e,la,Ne,pa,ca,it,fa,ma,lt,ha,ua,qt,Be,jt,re,da,Ke,ga,_a,pt,$a,ba,ct,wa,va,ft,ya,ka,St,Se,Et,qe,Ee,mt,Ue,qa,ht,ja,Tt,Te,xt,xe,zt;return $=new Mt({}),P=new fs({props:{id:"yHnr5Dk2zCI"}}),Y=new It({props:{$$slots:{default:[ms]},$$scope:{ctx:F}}}),X=new Mt({}),i=new ee({props:{code:`from datasets import load_dataset billsum = load_dataset("billsum", split="ca_test")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>billsum = load_dataset(<span class="hljs-string">&quot;billsum&quot;</span>, split=<span class="hljs-string">&quot;ca_test&quot;</span>)`}}),de=new ee({props:{code:"billsum = billsum.train_test_split(test_size=0.2)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>billsum = billsum.train_test_split(test_size=<span class="hljs-number">0.2</span>)'}}),ge=new ee({props:{code:'billsum["train"][0]',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>billsum[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;summary&#x27;</span>: <span class="hljs-string">&#x27;Existing law authorizes state agencies to enter into contracts for the acquisition of goods or services upon approval by the Department of General Services. Existing law sets forth various requirements and prohibitions for those contracts, including, but not limited to, a prohibition on entering into contracts for the acquisition of goods or services of $100,000 or more with a contractor that discriminates between spouses and domestic partners or same-sex and different-sex couples in the provision of benefits. Existing law provides that a contract entered into in violation of those requirements and prohibitions is void and authorizes the state or any person acting on behalf of the state to bring a civil action seeking a determination that a contract is in violation and therefore void. Under existing law, a willful violation of those requirements and prohibitions is a misdemeanor.\\nThis bill would also prohibit a state agency from entering into contracts for the acquisition of goods or services of $100,000 or more with a contractor that discriminates between employees on the basis of gender identity in the provision of benefits, as specified. By expanding the scope of a crime, this bill would impose a state-mandated local program.\\nThe California Constitution requires the state to reimburse local agencies and school districts for certain costs mandated by the state. Statutory provisions establish procedures for making that reimbursement.\\nThis bill would provide that no reimbursement is required by this act for a specified reason.&#x27;</span>, <span class="hljs-string">&#x27;text&#x27;</span>: <span class="hljs-string">&#x27;The people of the State of California do enact as follows:\\n\\n\\nSECTION 1.\\nSection 10295.35 is added to the Public Contract Code, to read:\\n10295.35.\\n(a) (1) Notwithstanding any other law, a state agency shall not enter into any contract for the acquisition of goods or services in the amount of one hundred thousand dollars ($100,000) or more with a contractor that, in the provision of benefits, discriminates between employees on the basis of an employee\u2019s or dependent\u2019s actual or perceived gender identity, including, but not limited to, the employee\u2019s or dependent\u2019s identification as transgender.\\n(2) For purposes of this section, \u201Ccontract\u201D includes contracts with a cumulative amount of one hundred thousand dollars ($100,000) or more per contractor in each fiscal year.\\n(3) For purposes of this section, an employee health plan is discriminatory if the plan is not consistent with Section 1365.5 of the Health and Safety Code and Section 10140 of the Insurance Code.\\n(4) The requirements of this section shall apply only to those portions of a contractor\u2019s operations that occur under any of the following conditions:\\n(A) Within the state.\\n(B) On real property outside the state if the property is owned by the state or if the state has a right to occupy the property, and if the contractor\u2019s presence at that location is connected to a contract with the state.\\n(C) Elsewhere in the United States where work related to a state contract is being performed.\\n(b) Contractors shall treat as confidential, to the maximum extent allowed by law or by the requirement of the contractor\u2019s insurance provider, any request by an employee or applicant for employment benefits or any documentation of eligibility for benefits submitted by an employee or applicant for employment.\\n(c) After taking all reasonable measures to find a contractor that complies with this section, as determined by the state agency, the requirements of this section may be waived under any of the following circumstances:\\n(1) There is only one prospective contractor willing to enter into a specific contract with the state agency.\\n(2) The contract is necessary to respond to an emergency, as determined by the state agency, that endangers the public health, welfare, or safety, or the contract is necessary for the provision of essential services, and no entity that complies with the requirements of this section capable of responding to the emergency is immediately available.\\n(3) The requirements of this section violate, or are inconsistent with, the terms or conditions of a grant, subvention, or agreement, if the agency has made a good faith attempt to change the terms or conditions of any grant, subvention, or agreement to authorize application of this section.\\n(4) The contractor is providing wholesale or bulk water, power, or natural gas, the conveyance or transmission of the same, or ancillary services, as required for ensuring reliable services in accordance with good utility practice, if the purchase of the same cannot practically be accomplished through the standard competitive bidding procedures and the contractor is not providing direct retail services to end users.\\n(d) (1) A contractor shall not be deemed to discriminate in the provision of benefits if the contractor, in providing the benefits, pays the actual costs incurred in obtaining the benefit.\\n(2) If a contractor is unable to provide a certain benefit, despite taking reasonable measures to do so, the contractor shall not be deemed to discriminate in the provision of benefits.\\n(e) (1) Every contract subject to this chapter shall contain a statement by which the contractor certifies that the contractor is in compliance with this section.\\n(2) The department or other contracting agency shall enforce this section pursuant to its existing enforcement powers.\\n(3) (A) If a contractor falsely certifies that it is in compliance with this section, the contract with that contractor shall be subject to Article 9 (commencing with Section 10420), unless, within a time period specified by the department or other contracting agency, the contractor provides to the department or agency proof that it has complied, or is in the process of complying, with this section.\\n(B) The application of the remedies or penalties contained in Article 9 (commencing with Section 10420) to a contract subject to this chapter shall not preclude the application of any existing remedies otherwise available to the department or other contracting agency under its existing enforcement powers.\\n(f) Nothing in this section is intended to regulate the contracting practices of any local jurisdiction.\\n(g) This section shall be construed so as not to conflict with applicable federal laws, rules, or regulations. In the event that a court or agency of competent jurisdiction holds that federal law, rule, or regulation invalidates any clause, sentence, paragraph, or section of this code or the application thereof to any person or circumstances, it is the intent of the state that the court or agency sever that clause, sentence, paragraph, or section so that the remainder of this section shall remain in effect.\\nSEC. 2.\\nSection 10295.35 of the Public Contract Code shall not be construed to create any new enforcement authority or responsibility in the Department of General Services or any other contracting agency.\\nSEC. 3.\\nNo reimbursement is required by this act pursuant to Section 6 of Article XIII\\u2009B of the California Constitution because the only costs that may be incurred by a local agency or school district will be incurred because this act creates a new crime or infraction, eliminates a crime or infraction, or changes the penalty for a crime or infraction, within the meaning of Section 17556 of the Government Code, or changes the definition of a crime within the meaning of Section 6 of Article XIII\\u2009B of the California Constitution.&#x27;</span>, <span class="hljs-string">&#x27;title&#x27;</span>: <span class="hljs-string">&#x27;An act to add Section 10295.35 to the Public Contract Code, relating to public contracts.&#x27;</span>}`}}),Le=new Mt({}),De=new ee({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("t5-small")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>)`}}),Oe=new ee({props:{code:`prefix = "summarize: " def preprocess_function(examples): inputs = [prefix + doc for doc in examples["text"]] model_inputs = tokenizer(inputs, max_length=1024, truncation=True) labels = tokenizer(text_target=examples["summary"], max_length=128, truncation=True) model_inputs["labels"] = labels["input_ids"] return model_inputs`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>prefix = <span class="hljs-string">&quot;summarize: &quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> inputs = [prefix + doc <span class="hljs-keyword">for</span> doc <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;text&quot;</span>]] <span class="hljs-meta">... </span> model_inputs = tokenizer(inputs, max_length=<span class="hljs-number">1024</span>, truncation=<span class="hljs-literal">True</span>) <span class="hljs-meta">... </span> labels = tokenizer(text_target=examples[<span class="hljs-string">&quot;summary&quot;</span>], max_length=<span class="hljs-number">128</span>, truncation=<span class="hljs-literal">True</span>) <span class="hljs-meta">... </span> model_inputs[<span class="hljs-string">&quot;labels&quot;</span>] = labels[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> model_inputs`}}),Be=new ee({props:{code:"tokenized_billsum = billsum.map(preprocess_function, batched=True)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_billsum = billsum.<span class="hljs-built_in">map</span>(preprocess_function, batched=<span class="hljs-literal">True</span>)'}}),Se=new rs({props:{pytorch:!0,tensorflow:!0,jax:!1,$$slots:{tensorflow:[gs],pytorch:[us]},$$scope:{ctx:F}}}),Ue=new Mt({}),Te=new rs({props:{pytorch:!0,tensorflow:!0,jax:!1,$$slots:{tensorflow:[ys],pytorch:[bs]},$$scope:{ctx:F}}}),xe=new It({props:{$$slots:{default:[ks]},$$scope:{ctx:F}}}),{c(){s=l("meta"),u=g(),r=l("h1"),d=l("a"),w=l("span"),j($.$$.fragment),v=g(),C=l("span"),A=o("Summarization"),k=g(),j(P.$$.fragment),L=g(),J=l("p"),M=o("Summarization creates a shorter version of a document or an article that captures all the important information. Along with translation, it is another example of a task that can be formulated as a sequence-to-sequence task. Summarization can be:"),R=g(),G=l("ul"),O=l("li"),le=o("Extractive: extract the most relevant information from a document."),Q=g(),oe=l("li"),U=o("Abstractive: generate new text that captures the most relevant information."),pe=g(),I=l("p"),ce=o("This guide will show you how to fine-tune "),B=l("a"),fe=o("T5"),N=o(" on the California state bill subset of the "),Z=l("a"),W=o("BillSum"),ne=o(" dataset for abstractive summarization."),me=g(),j(Y.$$.fragment),K=g(),D=l("h2"),h=l("a"),q=l("span"),j(X.$$.fragment),te=g(),H=l("span"),he=o("Load BillSum dataset"),$e=g(),V=l("p"),ae=o("Load the BillSum dataset from the \u{1F917} Datasets library:"),se=g(),j(i.$$.fragment),y=g(),ue=l("p"),He=o("Split this dataset into a train and test set:"),Ae=g(),j(de.$$.fragment),Ce=g(),be=l("p"),Ge=o("Then take a look at an example:"),Pe=g(),j(ge.$$.fragment),Fe=g(),ie=l("p"),Nt=o("The "),Ve=l("code"),Bt=o("text"),Ut=o(" field is the input and the "),Ze=l("code"),Wt=o("summary"),Ht=o(" field is the target."),_t=g(),ke=l("h2"),je=l("a"),et=l("span"),j(Le.$$.fragment),Gt=g(),tt=l("span"),Yt=o("Preprocess"),$t=g(),we=l("p"),Kt=o("Load the T5 tokenizer to process "),at=l("code"),Xt=o("text"),Jt=o(" and "),st=l("code"),Rt=o("summary"),Qt=o(":"),bt=g(),j(De.$$.fragment),wt=g(),Ye=l("p"),Vt=o("The preprocessing function needs to:"),vt=g(),ve=l("ol"),rt=l("li"),Zt=o("Prefix the input with a prompt so T5 knows this is a summarization task. Some models capable of multiple NLP tasks require prompting for specific tasks."),ea=g(),Me=l("li"),ta=o("Use the keyword "),ot=l("code"),aa=o("text_target"),sa=o(" argument when tokenizing labels."),ra=g(),Ie=l("li"),oa=o("Truncate sequences to be no longer than the maximum length set by the "),nt=l("code"),na=o("max_length"),ia=o(" parameter."),yt=g(),j(Oe.$$.fragment),kt=g(),_e=l("p"),la=o("Use \u{1F917} Datasets "),Ne=l("a"),pa=o("map"),ca=o(" function to apply the preprocessing function over the entire dataset. You can speed up the "),it=l("code"),fa=o("map"),ma=o(" function by setting "),lt=l("code"),ha=o("batched=True"),ua=o(" to process multiple elements of the dataset at once:"),qt=g(),j(Be.$$.fragment),jt=g(),re=l("p"),da=o("Use "),Ke=l("a"),ga=o("DataCollatorForSeq2Seq"),_a=o(" to create a batch of examples. It will also "),pt=l("em"),$a=o("dynamically pad"),ba=o(" your text and labels to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the "),ct=l("code"),wa=o("tokenizer"),va=o(" function by setting "),ft=l("code"),ya=o("padding=True"),ka=o(", dynamic padding is more efficient."),St=g(),j(Se.$$.fragment),Et=g(),qe=l("h2"),Ee=l("a"),mt=l("span"),j(Ue.$$.fragment),qa=g(),ht=l("span"),ja=o("Train"),Tt=g(),j(Te.$$.fragment),xt=g(),j(xe.$$.fragment),this.h()},l(e){const m=ps('[data-svelte="svelte-1phssyn"]',document.head);s=p(m,"META",{name:!0,content:!0}),m.forEach(a),u=_(e),r=p(e,"H1",{class:!0});var We=c(r);d=p(We,"A",{id:!0,class:!0,href:!0});var ut=c(d);w=p(ut,"SPAN",{});var dt=c(w);S($.$$.fragment,dt),dt.forEach(a),ut.forEach(a),v=_(We),C=p(We,"SPAN",{});var gt=c(C);A=n(gt,"Summarization"),gt.forEach(a),We.forEach(a),k=_(e),S(P.$$.fragment,e),L=_(e),J=p(e,"P",{});var Sa=c(J);M=n(Sa,"Summarization creates a shorter version of a document or an article that captures all the important information. Along with translation, it is another example of a task that can be formulated as a sequence-to-sequence task. Summarization can be:"),Sa.forEach(a),R=_(e),G=p(e,"UL",{});var At=c(G);O=p(At,"LI",{});var Ea=c(O);le=n(Ea,"Extractive: extract the most relevant information from a document."),Ea.forEach(a),Q=_(At),oe=p(At,"LI",{});var Ta=c(oe);U=n(Ta,"Abstractive: generate new text that captures the most relevant information."),Ta.forEach(a),At.forEach(a),pe=_(e),I=p(e,"P",{});var Xe=c(I);ce=n(Xe,"This guide will show you how to fine-tune "),B=p(Xe,"A",{href:!0,rel:!0});var xa=c(B);fe=n(xa,"T5"),xa.forEach(a),N=n(Xe," on the California state bill subset of the "),Z=p(Xe,"A",{href:!0,rel:!0});var za=c(Z);W=n(za,"BillSum"),za.forEach(a),ne=n(Xe," dataset for abstractive summarization."),Xe.forEach(a),me=_(e),S(Y.$$.fragment,e),K=_(e),D=p(e,"H2",{class:!0});var Ct=c(D);h=p(Ct,"A",{id:!0,class:!0,href:!0});var Aa=c(h);q=p(Aa,"SPAN",{});var Ca=c(q);S(X.$$.fragment,Ca),Ca.forEach(a),Aa.forEach(a),te=_(Ct),H=p(Ct,"SPAN",{});var Pa=c(H);he=n(Pa,"Load BillSum dataset"),Pa.forEach(a),Ct.forEach(a),$e=_(e),V=p(e,"P",{});var Fa=c(V);ae=n(Fa,"Load the BillSum dataset from the \u{1F917} Datasets library:"),Fa.forEach(a),se=_(e),S(i.$$.fragment,e),y=_(e),ue=p(e,"P",{});var La=c(ue);He=n(La,"Split this dataset into a train and test set:"),La.forEach(a),Ae=_(e),S(de.$$.fragment,e),Ce=_(e),be=p(e,"P",{});var Da=c(be);Ge=n(Da,"Then take a look at an example:"),Da.forEach(a),Pe=_(e),S(ge.$$.fragment,e),Fe=_(e),ie=p(e,"P",{});var Je=c(ie);Nt=n(Je,"The "),Ve=p(Je,"CODE",{});var Ma=c(Ve);Bt=n(Ma,"text"),Ma.forEach(a),Ut=n(Je," field is the input and the "),Ze=p(Je,"CODE",{});var Ia=c(Ze);Wt=n(Ia,"summary"),Ia.forEach(a),Ht=n(Je," field is the target."),Je.forEach(a),_t=_(e),ke=p(e,"H2",{class:!0});var Pt=c(ke);je=p(Pt,"A",{id:!0,class:!0,href:!0});var Oa=c(je);et=p(Oa,"SPAN",{});var Na=c(et);S(Le.$$.fragment,Na),Na.forEach(a),Oa.forEach(a),Gt=_(Pt),tt=p(Pt,"SPAN",{});var Ba=c(tt);Yt=n(Ba,"Preprocess"),Ba.forEach(a),Pt.forEach(a),$t=_(e),we=p(e,"P",{});var Re=c(we);Kt=n(Re,"Load the T5 tokenizer to process "),at=p(Re,"CODE",{});var Ua=c(at);Xt=n(Ua,"text"),Ua.forEach(a),Jt=n(Re," and "),st=p(Re,"CODE",{});var Wa=c(st);Rt=n(Wa,"summary"),Wa.forEach(a),Qt=n(Re,":"),Re.forEach(a),bt=_(e),S(De.$$.fragment,e),wt=_(e),Ye=p(e,"P",{});var Ha=c(Ye);Vt=n(Ha,"The preprocessing function needs to:"),Ha.forEach(a),vt=_(e),ve=p(e,"OL",{});var Qe=c(ve);rt=p(Qe,"LI",{});var Ga=c(rt);Zt=n(Ga,"Prefix the input with a prompt so T5 knows this is a summarization task. Some models capable of multiple NLP tasks require prompting for specific tasks."),Ga.forEach(a),ea=_(Qe),Me=p(Qe,"LI",{});var Ft=c(Me);ta=n(Ft,"Use the keyword "),ot=p(Ft,"CODE",{});var Ya=c(ot);aa=n(Ya,"text_target"),Ya.forEach(a),sa=n(Ft," argument when tokenizing labels."),Ft.forEach(a),ra=_(Qe),Ie=p(Qe,"LI",{});var Lt=c(Ie);oa=n(Lt,"Truncate sequences to be no longer than the maximum length set by the "),nt=p(Lt,"CODE",{});var Ka=c(nt);na=n(Ka,"max_length"),Ka.forEach(a),ia=n(Lt," parameter."),Lt.forEach(a),Qe.forEach(a),yt=_(e),S(Oe.$$.fragment,e),kt=_(e),_e=p(e,"P",{});var ze=c(_e);la=n(ze,"Use \u{1F917} Datasets "),Ne=p(ze,"A",{href:!0,rel:!0});var Xa=c(Ne);pa=n(Xa,"map"),Xa.forEach(a),ca=n(ze," function to apply the preprocessing function over the entire dataset. You can speed up the "),it=p(ze,"CODE",{});var Ja=c(it);fa=n(Ja,"map"),Ja.forEach(a),ma=n(ze," function by setting "),lt=p(ze,"CODE",{});var Ra=c(lt);ha=n(Ra,"batched=True"),Ra.forEach(a),ua=n(ze," to process multiple elements of the dataset at once:"),ze.forEach(a),qt=_(e),S(Be.$$.fragment,e),jt=_(e),re=p(e,"P",{});var ye=c(re);da=n(ye,"Use "),Ke=p(ye,"A",{href:!0});var Qa=c(Ke);ga=n(Qa,"DataCollatorForSeq2Seq"),Qa.forEach(a),_a=n(ye," to create a batch of examples. It will also "),pt=p(ye,"EM",{});var Va=c(pt);$a=n(Va,"dynamically pad"),Va.forEach(a),ba=n(ye," your text and labels to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the "),ct=p(ye,"CODE",{});var Za=c(ct);wa=n(Za,"tokenizer"),Za.forEach(a),va=n(ye," function by setting "),ft=p(ye,"CODE",{});var es=c(ft);ya=n(es,"padding=True"),es.forEach(a),ka=n(ye,", dynamic padding is more efficient."),ye.forEach(a),St=_(e),S(Se.$$.fragment,e),Et=_(e),qe=p(e,"H2",{class:!0});var Dt=c(qe);Ee=p(Dt,"A",{id:!0,class:!0,href:!0});var ts=c(Ee);mt=p(ts,"SPAN",{});var as=c(mt);S(Ue.$$.fragment,as),as.forEach(a),ts.forEach(a),qa=_(Dt),ht=p(Dt,"SPAN",{});var ss=c(ht);ja=n(ss,"Train"),ss.forEach(a),Dt.forEach(a),Tt=_(e),S(Te.$$.fragment,e),xt=_(e),S(xe.$$.fragment,e),this.h()},h(){b(s,"name","hf:doc:metadata"),b(s,"content",JSON.stringify(js)),b(d,"id","summarization"),b(d,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),b(d,"href","#summarization"),b(r,"class","relative group"),b(B,"href","https://huggingface.co/t5-small"),b(B,"rel","nofollow"),b(Z,"href","https://huggingface.co/datasets/billsum"),b(Z,"rel","nofollow"),b(h,"id","load-billsum-dataset"),b(h,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),b(h,"href","#load-billsum-dataset"),b(D,"class","relative group"),b(je,"id","preprocess"),b(je,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),b(je,"href","#preprocess"),b(ke,"class","relative group"),b(Ne,"href","https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.map"),b(Ne,"rel","nofollow"),b(Ke,"href","/docs/transformers/pr_19429/en/main_classes/data_collator#transformers.DataCollatorForSeq2Seq"),b(Ee,"id","train"),b(Ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),b(Ee,"href","#train"),b(qe,"class","relative group")},m(e,m){t(document.head,s),f(e,u,m),f(e,r,m),t(r,d),t(d,w),E($,w,null),t(r,v),t(r,C),t(C,A),f(e,k,m),E(P,e,m),f(e,L,m),f(e,J,m),t(J,M),f(e,R,m),f(e,G,m),t(G,O),t(O,le),t(G,Q),t(G,oe),t(oe,U),f(e,pe,m),f(e,I,m),t(I,ce),t(I,B),t(B,fe),t(I,N),t(I,Z),t(Z,W),t(I,ne),f(e,me,m),E(Y,e,m),f(e,K,m),f(e,D,m),t(D,h),t(h,q),E(X,q,null),t(D,te),t(D,H),t(H,he),f(e,$e,m),f(e,V,m),t(V,ae),f(e,se,m),E(i,e,m),f(e,y,m),f(e,ue,m),t(ue,He),f(e,Ae,m),E(de,e,m),f(e,Ce,m),f(e,be,m),t(be,Ge),f(e,Pe,m),E(ge,e,m),f(e,Fe,m),f(e,ie,m),t(ie,Nt),t(ie,Ve),t(Ve,Bt),t(ie,Ut),t(ie,Ze),t(Ze,Wt),t(ie,Ht),f(e,_t,m),f(e,ke,m),t(ke,je),t(je,et),E(Le,et,null),t(ke,Gt),t(ke,tt),t(tt,Yt),f(e,$t,m),f(e,we,m),t(we,Kt),t(we,at),t(at,Xt),t(we,Jt),t(we,st),t(st,Rt),t(we,Qt),f(e,bt,m),E(De,e,m),f(e,wt,m),f(e,Ye,m),t(Ye,Vt),f(e,vt,m),f(e,ve,m),t(ve,rt),t(rt,Zt),t(ve,ea),t(ve,Me),t(Me,ta),t(Me,ot),t(ot,aa),t(Me,sa),t(ve,ra),t(ve,Ie),t(Ie,oa),t(Ie,nt),t(nt,na),t(Ie,ia),f(e,yt,m),E(Oe,e,m),f(e,kt,m),f(e,_e,m),t(_e,la),t(_e,Ne),t(Ne,pa),t(_e,ca),t(_e,it),t(it,fa),t(_e,ma),t(_e,lt),t(lt,ha),t(_e,ua),f(e,qt,m),E(Be,e,m),f(e,jt,m),f(e,re,m),t(re,da),t(re,Ke),t(Ke,ga),t(re,_a),t(re,pt),t(pt,$a),t(re,ba),t(re,ct),t(ct,wa),t(re,va),t(re,ft),t(ft,ya),t(re,ka),f(e,St,m),E(Se,e,m),f(e,Et,m),f(e,qe,m),t(qe,Ee),t(Ee,mt),E(Ue,mt,null),t(qe,qa),t(qe,ht),t(ht,ja),f(e,Tt,m),E(Te,e,m),f(e,xt,m),E(xe,e,m),zt=!0},p(e,[m]){const We={};m&2&&(We.$$scope={dirty:m,ctx:e}),Y.$set(We);const ut={};m&2&&(ut.$$scope={dirty:m,ctx:e}),Se.$set(ut);const dt={};m&2&&(dt.$$scope={dirty:m,ctx:e}),Te.$set(dt);const gt={};m&2&&(gt.$$scope={dirty:m,ctx:e}),xe.$set(gt)},i(e){zt||(T($.$$.fragment,e),T(P.$$.fragment,e),T(Y.$$.fragment,e),T(X.$$.fragment,e),T(i.$$.fragment,e),T(de.$$.fragment,e),T(ge.$$.fragment,e),T(Le.$$.fragment,e),T(De.$$.fragment,e),T(Oe.$$.fragment,e),T(Be.$$.fragment,e),T(Se.$$.fragment,e),T(Ue.$$.fragment,e),T(Te.$$.fragment,e),T(xe.$$.fragment,e),zt=!0)},o(e){x($.$$.fragment,e),x(P.$$.fragment,e),x(Y.$$.fragment,e),x(X.$$.fragment,e),x(i.$$.fragment,e),x(de.$$.fragment,e),x(ge.$$.fragment,e),x(Le.$$.fragment,e),x(De.$$.fragment,e),x(Oe.$$.fragment,e),x(Be.$$.fragment,e),x(Se.$$.fragment,e),x(Ue.$$.fragment,e),x(Te.$$.fragment,e),x(xe.$$.fragment,e),zt=!1},d(e){a(s),e&&a(u),e&&a(r),z($),e&&a(k),z(P,e),e&&a(L),e&&a(J),e&&a(R),e&&a(G),e&&a(pe),e&&a(I),e&&a(me),z(Y,e),e&&a(K),e&&a(D),z(X),e&&a($e),e&&a(V),e&&a(se),z(i,e),e&&a(y),e&&a(ue),e&&a(Ae),z(de,e),e&&a(Ce),e&&a(be),e&&a(Pe),z(ge,e),e&&a(Fe),e&&a(ie),e&&a(_t),e&&a(ke),z(Le),e&&a($t),e&&a(we),e&&a(bt),z(De,e),e&&a(wt),e&&a(Ye),e&&a(vt),e&&a(ve),e&&a(yt),z(Oe,e),e&&a(kt),e&&a(_e),e&&a(qt),z(Be,e),e&&a(jt),e&&a(re),e&&a(St),z(Se,e),e&&a(Et),e&&a(qe),z(Ue),e&&a(Tt),z(Te,e),e&&a(xt),z(xe,e)}}}const js={local:"summarization",sections:[{local:"load-billsum-dataset",title:"Load BillSum dataset"},{local:"preprocess",title:"Preprocess"},{local:"train",title:"Train"}],title:"Summarization"};function Ss(F){return cs(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Ps extends ns{constructor(s){super();is(this,s,Ss,qs,ls,{})}}export{Ps as default,js as metadata};
31
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/tasks/token_classification.mdx-hf-doc-builder.js
import{S as mn,i as dn,s as un,e as r,k as _,w as E,t as l,M as _n,c as i,d as t,m as g,a as p,x as y,h as o,b as j,G as e,g as f,y as T,q as z,o as q,B as C,v as gn,L as hn}from"../../chunks/vendor-hf-doc-builder.js";import{T as ht}from"../../chunks/Tip-hf-doc-builder.js";import{Y as cn}from"../../chunks/Youtube-hf-doc-builder.js";import{I as ft}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{C as ss}from"../../chunks/CodeBlock-hf-doc-builder.js";import{F as fn,M as mt}from"../../chunks/Markdown-hf-doc-builder.js";function $n(S){let a,m,n,u,k;return{c(){a=r("p"),m=l("See the token classification "),n=r("a"),u=l("task page"),k=l(" for more information about other forms of token classification and their associated models, datasets, and metrics."),this.h()},l($){a=i($,"P",{});var w=p(a);m=o(w,"See the token classification "),n=i(w,"A",{href:!0,rel:!0});var D=p(n);u=o(D,"task page"),D.forEach(t),k=o(w," for more information about other forms of token classification and their associated models, datasets, and metrics."),w.forEach(t),this.h()},h(){j(n,"href","https://huggingface.co/tasks/token-classification"),j(n,"rel","nofollow")},m($,w){f($,a,w),e(a,m),e(a,n),e(n,u),e(a,k)},d($){$&&t(a)}}}function jn(S){let a,m;return a=new ss({props:{code:`from transformers import DataCollatorForTokenClassification data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer)`}}),{c(){E(a.$$.fragment)},l(n){y(a.$$.fragment,n)},m(n,u){T(a,n,u),m=!0},p:hn,i(n){m||(z(a.$$.fragment,n),m=!0)},o(n){q(a.$$.fragment,n),m=!1},d(n){C(a,n)}}}function kn(S){let a,m;return a=new mt({props:{$$slots:{default:[jn]},$$scope:{ctx:S}}}),{c(){E(a.$$.fragment)},l(n){y(a.$$.fragment,n)},m(n,u){T(a,n,u),m=!0},p(n,u){const k={};u&2&&(k.$$scope={dirty:u,ctx:n}),a.$set(k)},i(n){m||(z(a.$$.fragment,n),m=!0)},o(n){q(a.$$.fragment,n),m=!1},d(n){C(a,n)}}}function wn(S){let a,m;return a=new ss({props:{code:`from transformers import DataCollatorForTokenClassification data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer, return_tensors="tf")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)`}}),{c(){E(a.$$.fragment)},l(n){y(a.$$.fragment,n)},m(n,u){T(a,n,u),m=!0},p:hn,i(n){m||(z(a.$$.fragment,n),m=!0)},o(n){q(a.$$.fragment,n),m=!1},d(n){C(a,n)}}}function bn(S){let a,m;return a=new mt({props:{$$slots:{default:[wn]},$$scope:{ctx:S}}}),{c(){E(a.$$.fragment)},l(n){y(a.$$.fragment,n)},m(n,u){T(a,n,u),m=!0},p(n,u){const k={};u&2&&(k.$$scope={dirty:u,ctx:n}),a.$set(k)},i(n){m||(z(a.$$.fragment,n),m=!0)},o(n){q(a.$$.fragment,n),m=!1},d(n){C(a,n)}}}function xn(S){let a,m,n,u,k,$,w,D;return{c(){a=r("p"),m=l("If you aren\u2019t familiar with fine-tuning a model with the "),n=r("a"),u=l("Trainer"),k=l(", take a look at the basic tutorial "),$=r("a"),w=l("here"),D=l("!"),this.h()},l(A){a=i(A,"P",{});var b=p(a);m=o(b,"If you aren\u2019t familiar with fine-tuning a model with the "),n=i(b,"A",{href:!0});var F=p(n);u=o(F,"Trainer"),F.forEach(t),k=o(b,", take a look at the basic tutorial "),$=i(b,"A",{href:!0});var O=p($);w=o(O,"here"),O.forEach(t),D=o(b,"!"),b.forEach(t),this.h()},h(){j(n,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),j($,"href","../training#finetune-with-trainer")},m(A,b){f(A,a,b),e(a,m),e(a,n),e(n,u),e(a,k),e(a,$),e($,w),e(a,D)},d(A){A&&t(a)}}}function vn(S){let a,m,n,u,k,$,w,D,A,b,F,O,V,I,Z,B,R,G,J,hs,L,ms,ts,is,N,ps,P,Q,M,Y,ds,as,X,H;return w=new ss({props:{code:`from transformers import AutoModelForTokenClassification, TrainingArguments, Trainer model = AutoModelForTokenClassification.from_pretrained("distilbert-base-uncased", num_labels=14)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForTokenClassification, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, num_labels=<span class="hljs-number">14</span>)`}}),A=new ht({props:{$$slots:{default:[xn]},$$scope:{ctx:S}}}),X=new ss({props:{code:`training_args = TrainingArguments( output_dir="./results", evaluation_strategy="epoch", learning_rate=2e-5, per_device_train_batch_size=16, per_device_eval_batch_size=16, num_train_epochs=3, weight_decay=0.01, ) trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_wnut["train"], eval_dataset=tokenized_wnut["test"], tokenizer=tokenizer, data_collator=data_collator, ) trainer.train()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> per_device_eval_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=tokenized_wnut[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=tokenized_wnut[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> tokenizer=tokenizer, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()`}}),{c(){a=r("p"),m=l("Load DistilBERT with "),n=r("a"),u=l("AutoModelForTokenClassification"),k=l(" along with the number of expected labels:"),$=_(),E(w.$$.fragment),D=_(),E(A.$$.fragment),b=_(),F=r("p"),O=l("At this point, only three steps remain:"),V=_(),I=r("ol"),Z=r("li"),B=l("Define your training hyperparameters in "),R=r("a"),G=l("TrainingArguments"),J=l("."),hs=_(),L=r("li"),ms=l("Pass the training arguments to "),ts=r("a"),is=l("Trainer"),N=l(" along with the model, dataset, tokenizer, and data collator."),ps=_(),P=r("li"),Q=l("Call "),M=r("a"),Y=l("train()"),ds=l(" to fine-tune your model."),as=_(),E(X.$$.fragment),this.h()},l(d){a=i(d,"P",{});var v=p(a);m=o(v,"Load DistilBERT with "),n=i(v,"A",{href:!0});var ns=p(n);u=o(ns,"AutoModelForTokenClassification"),ns.forEach(t),k=o(v," along with the number of expected labels:"),v.forEach(t),$=g(d),y(w.$$.fragment,d),D=g(d),y(A.$$.fragment,d),b=g(d),F=i(d,"P",{});var U=p(F);O=o(U,"At this point, only three steps remain:"),U.forEach(t),V=g(d),I=i(d,"OL",{});var K=p(I);Z=i(K,"LI",{});var es=p(Z);B=o(es,"Define your training hyperparameters in "),R=i(es,"A",{href:!0});var gs=p(R);G=o(gs,"TrainingArguments"),gs.forEach(t),J=o(es,"."),es.forEach(t),hs=g(K),L=i(K,"LI",{});var ls=p(L);ms=o(ls,"Pass the training arguments to "),ts=i(ls,"A",{href:!0});var W=p(ts);is=o(W,"Trainer"),W.forEach(t),N=o(ls," along with the model, dataset, tokenizer, and data collator."),ls.forEach(t),ps=g(K),P=i(K,"LI",{});var os=p(P);Q=o(os,"Call "),M=i(os,"A",{href:!0});var c=p(M);Y=o(c,"train()"),c.forEach(t),ds=o(os," to fine-tune your model."),os.forEach(t),K.forEach(t),as=g(d),y(X.$$.fragment,d),this.h()},h(){j(n,"href","/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoModelForTokenClassification"),j(R,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments"),j(ts,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),j(M,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.train")},m(d,v){f(d,a,v),e(a,m),e(a,n),e(n,u),e(a,k),f(d,$,v),T(w,d,v),f(d,D,v),T(A,d,v),f(d,b,v),f(d,F,v),e(F,O),f(d,V,v),f(d,I,v),e(I,Z),e(Z,B),e(Z,R),e(R,G),e(Z,J),e(I,hs),e(I,L),e(L,ms),e(L,ts),e(ts,is),e(L,N),e(I,ps),e(I,P),e(P,Q),e(P,M),e(M,Y),e(P,ds),f(d,as,v),T(X,d,v),H=!0},p(d,v){const ns={};v&2&&(ns.$$scope={dirty:v,ctx:d}),A.$set(ns)},i(d){H||(z(w.$$.fragment,d),z(A.$$.fragment,d),z(X.$$.fragment,d),H=!0)},o(d){q(w.$$.fragment,d),q(A.$$.fragment,d),q(X.$$.fragment,d),H=!1},d(d){d&&t(a),d&&t($),C(w,d),d&&t(D),C(A,d),d&&t(b),d&&t(F),d&&t(V),d&&t(I),d&&t(as),C(X,d)}}}function En(S){let a,m;return a=new mt({props:{$$slots:{default:[vn]},$$scope:{ctx:S}}}),{c(){E(a.$$.fragment)},l(n){y(a.$$.fragment,n)},m(n,u){T(a,n,u),m=!0},p(n,u){const k={};u&2&&(k.$$scope={dirty:u,ctx:n}),a.$set(k)},i(n){m||(z(a.$$.fragment,n),m=!0)},o(n){q(a.$$.fragment,n),m=!1},d(n){C(a,n)}}}function yn(S){let a,m,n,u,k;return{c(){a=r("p"),m=l("If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),n=r("a"),u=l("here"),k=l("!"),this.h()},l($){a=i($,"P",{});var w=p(a);m=o(w,"If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),n=i(w,"A",{href:!0});var D=p(n);u=o(D,"here"),D.forEach(t),k=o(w,"!"),w.forEach(t),this.h()},h(){j(n,"href","training#finetune-with-keras")},m($,w){f($,a,w),e(a,m),e(a,n),e(n,u),e(a,k)},d($){$&&t(a)}}}function Tn(S){let a,m,n,u,k,$,w,D,A,b,F,O,V,I,Z,B,R,G,J,hs,L,ms,ts,is,N,ps,P,Q,M,Y,ds,as,X,H,d,v,ns,U,K,es,gs,ls,W,os;return b=new ss({props:{code:`tf_train_set = model.prepare_tf_dataset( tokenized_wnut["train"], shuffle=True, batch_size=16, collate_fn=data_collator, ) tf_validation_set = model.prepare_tf_dataset( tokenized_wnut["validation"], shuffle=False, batch_size=16, collate_fn=data_collator, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> tokenized_wnut[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_validation_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> tokenized_wnut[<span class="hljs-string">&quot;validation&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)`}}),O=new ht({props:{$$slots:{default:[yn]},$$scope:{ctx:S}}}),R=new ss({props:{code:`from transformers import create_optimizer batch_size = 16 num_train_epochs = 3 num_train_steps = (len(tokenized_wnut["train"]) // batch_size) * num_train_epochs optimizer, lr_schedule = create_optimizer( init_lr=2e-5, num_train_steps=num_train_steps, weight_decay_rate=0.01, num_warmup_steps=0, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer <span class="hljs-meta">&gt;&gt;&gt; </span>batch_size = <span class="hljs-number">16</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_train_epochs = <span class="hljs-number">3</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_train_steps = (<span class="hljs-built_in">len</span>(tokenized_wnut[<span class="hljs-string">&quot;train&quot;</span>]) // batch_size) * num_train_epochs <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer, lr_schedule = create_optimizer( <span class="hljs-meta">... </span> init_lr=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> num_train_steps=num_train_steps, <span class="hljs-meta">... </span> weight_decay_rate=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span> num_warmup_steps=<span class="hljs-number">0</span>, <span class="hljs-meta">... </span>)`}}),N=new ss({props:{code:`from transformers import TFAutoModelForTokenClassification model = TFAutoModelForTokenClassification.from_pretrained("distilbert-base-uncased", num_labels=2)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, num_labels=<span class="hljs-number">2</span>)`}}),H=new ss({props:{code:`import tensorflow as tf model.compile(optimizer=optimizer)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)`}}),W=new ss({props:{code:"model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=3)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=<span class="hljs-number">3</span>)'}}),{c(){a=r("p"),m=l("To fine-tune a model in TensorFlow, start by converting your datasets to the "),n=r("code"),u=l("tf.data.Dataset"),k=l(" format with "),$=r("a"),w=l("prepare_tf_dataset()"),D=l("."),A=_(),E(b.$$.fragment),F=_(),E(O.$$.fragment),V=_(),I=r("p"),Z=l("Set up an optimizer function, learning rate schedule, and some training hyperparameters:"),B=_(),E(R.$$.fragment),G=_(),J=r("p"),hs=l("Load DistilBERT with "),L=r("a"),ms=l("TFAutoModelForTokenClassification"),ts=l(" along with the number of expected labels:"),is=_(),E(N.$$.fragment),ps=_(),P=r("p"),Q=l("Configure the model for training with "),M=r("a"),Y=r("code"),ds=l("compile"),as=l(":"),X=_(),E(H.$$.fragment),d=_(),v=r("p"),ns=l("Call "),U=r("a"),K=r("code"),es=l("fit"),gs=l(" to fine-tune the model:"),ls=_(),E(W.$$.fragment),this.h()},l(c){a=i(c,"P",{});var x=p(a);m=o(x,"To fine-tune a model in TensorFlow, start by converting your datasets to the "),n=i(x,"CODE",{});var us=p(n);u=o(us,"tf.data.Dataset"),us.forEach(t),k=o(x," format with "),$=i(x,"A",{href:!0});var Js=p($);w=o(Js,"prepare_tf_dataset()"),Js.forEach(t),D=o(x,"."),x.forEach(t),A=g(c),y(b.$$.fragment,c),F=g(c),y(O.$$.fragment,c),V=g(c),I=i(c,"P",{});var Qs=p(I);Z=o(Qs,"Set up an optimizer function, learning rate schedule, and some training hyperparameters:"),Qs.forEach(t),B=g(c),y(R.$$.fragment,c),G=g(c),J=i(c,"P",{});var Es=p(J);hs=o(Es,"Load DistilBERT with "),L=i(Es,"A",{href:!0});var $s=p(L);ms=o($s,"TFAutoModelForTokenClassification"),$s.forEach(t),ts=o(Es," along with the number of expected labels:"),Es.forEach(t),is=g(c),y(N.$$.fragment,c),ps=g(c),P=i(c,"P",{});var ys=p(P);Q=o(ys,"Configure the model for training with "),M=i(ys,"A",{href:!0,rel:!0});var cs=p(M);Y=i(cs,"CODE",{});var Xs=p(Y);ds=o(Xs,"compile"),Xs.forEach(t),cs.forEach(t),as=o(ys,":"),ys.forEach(t),X=g(c),y(H.$$.fragment,c),d=g(c),v=i(c,"P",{});var js=p(v);ns=o(js,"Call "),U=i(js,"A",{href:!0,rel:!0});var se=p(U);K=i(se,"CODE",{});var ee=p(K);es=o(ee,"fit"),ee.forEach(t),se.forEach(t),gs=o(js," to fine-tune the model:"),js.forEach(t),ls=g(c),y(W.$$.fragment,c),this.h()},h(){j($,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset"),j(L,"href","/docs/transformers/pr_19429/en/model_doc/auto#transformers.TFAutoModelForTokenClassification"),j(M,"href","https://keras.io/api/models/model_training_apis/#compile-method"),j(M,"rel","nofollow"),j(U,"href","https://keras.io/api/models/model_training_apis/#fit-method"),j(U,"rel","nofollow")},m(c,x){f(c,a,x),e(a,m),e(a,n),e(n,u),e(a,k),e(a,$),e($,w),e(a,D),f(c,A,x),T(b,c,x),f(c,F,x),T(O,c,x),f(c,V,x),f(c,I,x),e(I,Z),f(c,B,x),T(R,c,x),f(c,G,x),f(c,J,x),e(J,hs),e(J,L),e(L,ms),e(J,ts),f(c,is,x),T(N,c,x),f(c,ps,x),f(c,P,x),e(P,Q),e(P,M),e(M,Y),e(Y,ds),e(P,as),f(c,X,x),T(H,c,x),f(c,d,x),f(c,v,x),e(v,ns),e(v,U),e(U,K),e(K,es),e(v,gs),f(c,ls,x),T(W,c,x),os=!0},p(c,x){const us={};x&2&&(us.$$scope={dirty:x,ctx:c}),O.$set(us)},i(c){os||(z(b.$$.fragment,c),z(O.$$.fragment,c),z(R.$$.fragment,c),z(N.$$.fragment,c),z(H.$$.fragment,c),z(W.$$.fragment,c),os=!0)},o(c){q(b.$$.fragment,c),q(O.$$.fragment,c),q(R.$$.fragment,c),q(N.$$.fragment,c),q(H.$$.fragment,c),q(W.$$.fragment,c),os=!1},d(c){c&&t(a),c&&t(A),C(b,c),c&&t(F),C(O,c),c&&t(V),c&&t(I),c&&t(B),C(R,c),c&&t(G),c&&t(J),c&&t(is),C(N,c),c&&t(ps),c&&t(P),c&&t(X),C(H,c),c&&t(d),c&&t(v),c&&t(ls),C(W,c)}}}function zn(S){let a,m;return a=new mt({props:{$$slots:{default:[Tn]},$$scope:{ctx:S}}}),{c(){E(a.$$.fragment)},l(n){y(a.$$.fragment,n)},m(n,u){T(a,n,u),m=!0},p(n,u){const k={};u&2&&(k.$$scope={dirty:u,ctx:n}),a.$set(k)},i(n){m||(z(a.$$.fragment,n),m=!0)},o(n){q(a.$$.fragment,n),m=!1},d(n){C(a,n)}}}function qn(S){let a,m,n,u,k,$,w,D;return{c(){a=r("p"),m=l(`For a more in-depth example of how to fine-tune a model for token classification, take a look at the corresponding `),n=r("a"),u=l("PyTorch notebook"),k=l(` or `),$=r("a"),w=l("TensorFlow notebook"),D=l("."),this.h()},l(A){a=i(A,"P",{});var b=p(a);m=o(b,`For a more in-depth example of how to fine-tune a model for token classification, take a look at the corresponding `),n=i(b,"A",{href:!0,rel:!0});var F=p(n);u=o(F,"PyTorch notebook"),F.forEach(t),k=o(b,` or `),$=i(b,"A",{href:!0,rel:!0});var O=p($);w=o(O,"TensorFlow notebook"),O.forEach(t),D=o(b,"."),b.forEach(t),this.h()},h(){j(n,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb"),j(n,"rel","nofollow"),j($,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb"),j($,"rel","nofollow")},m(A,b){f(A,a,b),e(a,m),e(a,n),e(n,u),e(a,k),e(a,$),e($,w),e(a,D)},d(A){A&&t(a)}}}function Cn(S){let a,m,n,u,k,$,w,D,A,b,F,O,V,I,Z,B,R,G,J,hs,L,ms,ts,is,N,ps,P,Q,M,Y,ds,as,X,H,d,v,ns,U,K,es,gs,ls,W,os,c,x,us,Js,Qs,Es,$s,ys,cs,Xs,js,se,ee,fe,dt,ut,Ie,ks,te,he,_t,gt,$t,ws,me,jt,kt,de,wt,bt,ue,xt,vt,Et,ae,_e,yt,Tt,Ne,Ts,qs,ge,Is,zt,$e,qt,Me,Ns,Be,Cs,Ct,je,At,Dt,Re,Ms,Ue,As,Pt,ke,Ft,St,We,Bs,Ye,bs,Ot,we,Lt,It,be,Nt,Mt,He,xs,Rs,Bt,Us,xe,Rt,Ut,Wt,_s,Yt,ve,Ht,Kt,Ee,Vt,Zt,ye,Gt,Jt,Qt,Ws,Xt,Te,sa,ea,Ke,ne,ta,Ve,Ys,Ze,fs,aa,Hs,na,la,ze,oa,ra,qe,ia,pa,Ge,Ks,Je,rs,ca,le,fa,ha,Ce,ma,da,Ae,ua,_a,De,ga,$a,Qe,Ds,Xe,zs,Ps,Pe,Vs,ja,Fe,ka,st,Fs,et,Ss,tt;return $=new ft({}),F=new cn({props:{id:"wVHdVlPScxA"}}),N=new ht({props:{$$slots:{default:[$n]},$$scope:{ctx:S}}}),Y=new ft({}),U=new ss({props:{code:`from datasets import load_dataset wnut = load_dataset("wnut_17")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>wnut = load_dataset(<span class="hljs-string">&quot;wnut_17&quot;</span>)`}}),W=new ss({props:{code:'wnut["train"][0]',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>wnut[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;id&#x27;</span>: <span class="hljs-string">&#x27;0&#x27;</span>, <span class="hljs-string">&#x27;ner_tags&#x27;</span>: [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>, <span class="hljs-number">8</span>, <span class="hljs-number">0</span>, <span class="hljs-number">7</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], <span class="hljs-string">&#x27;tokens&#x27;</span>: [<span class="hljs-string">&#x27;@paulwalk&#x27;</span>, <span class="hljs-string">&#x27;It&#x27;</span>, <span class="hljs-string">&quot;&#x27;s&quot;</span>, <span class="hljs-string">&#x27;the&#x27;</span>, <span class="hljs-string">&#x27;view&#x27;</span>, <span class="hljs-string">&#x27;from&#x27;</span>, <span class="hljs-string">&#x27;where&#x27;</span>, <span class="hljs-string">&#x27;I&#x27;</span>, <span class="hljs-string">&quot;&#x27;m&quot;</span>, <span class="hljs-string">&#x27;living&#x27;</span>, <span class="hljs-string">&#x27;for&#x27;</span>, <span class="hljs-string">&#x27;two&#x27;</span>, <span class="hljs-string">&#x27;weeks&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;Empire&#x27;</span>, <span class="hljs-string">&#x27;State&#x27;</span>, <span class="hljs-string">&#x27;Building&#x27;</span>, <span class="hljs-string">&#x27;=&#x27;</span>, <span class="hljs-string">&#x27;ESB&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;Pretty&#x27;</span>, <span class="hljs-string">&#x27;bad&#x27;</span>, <span class="hljs-string">&#x27;storm&#x27;</span>, <span class="hljs-string">&#x27;here&#x27;</span>, <span class="hljs-string">&#x27;last&#x27;</span>, <span class="hljs-string">&#x27;evening&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>] }`}}),$s=new ss({props:{code:`label_list = wnut["train"].features[f"ner_tags"].feature.names label_list`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>label_list = wnut[<span class="hljs-string">&quot;train&quot;</span>].features[<span class="hljs-string">f&quot;ner_tags&quot;</span>].feature.names <span class="hljs-meta">&gt;&gt;&gt; </span>label_list [ <span class="hljs-string">&quot;O&quot;</span>, <span class="hljs-string">&quot;B-corporation&quot;</span>, <span class="hljs-string">&quot;I-corporation&quot;</span>, <span class="hljs-string">&quot;B-creative-work&quot;</span>, <span class="hljs-string">&quot;I-creative-work&quot;</span>, <span class="hljs-string">&quot;B-group&quot;</span>, <span class="hljs-string">&quot;I-group&quot;</span>, <span class="hljs-string">&quot;B-location&quot;</span>, <span class="hljs-string">&quot;I-location&quot;</span>, <span class="hljs-string">&quot;B-person&quot;</span>, <span class="hljs-string">&quot;I-person&quot;</span>, <span class="hljs-string">&quot;B-product&quot;</span>, <span class="hljs-string">&quot;I-product&quot;</span>, ]`}}),Is=new ft({}),Ns=new cn({props:{id:"iY2AZYdZAr0"}}),Ms=new ss({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`}}),Bs=new ss({props:{code:`tokenized_input = tokenizer(example["tokens"], is_split_into_words=True) tokens = tokenizer.convert_ids_to_tokens(tokenized_input["input_ids"]) tokens`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_input = tokenizer(example[<span class="hljs-string">&quot;tokens&quot;</span>], is_split_into_words=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokens = tokenizer.convert_ids_to_tokens(tokenized_input[<span class="hljs-string">&quot;input_ids&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>tokens [<span class="hljs-string">&#x27;[CLS]&#x27;</span>, <span class="hljs-string">&#x27;@&#x27;</span>, <span class="hljs-string">&#x27;paul&#x27;</span>, <span class="hljs-string">&#x27;##walk&#x27;</span>, <span class="hljs-string">&#x27;it&#x27;</span>, <span class="hljs-string">&quot;&#x27;&quot;</span>, <span class="hljs-string">&#x27;s&#x27;</span>, <span class="hljs-string">&#x27;the&#x27;</span>, <span class="hljs-string">&#x27;view&#x27;</span>, <span class="hljs-string">&#x27;from&#x27;</span>, <span class="hljs-string">&#x27;where&#x27;</span>, <span class="hljs-string">&#x27;i&#x27;</span>, <span class="hljs-string">&quot;&#x27;&quot;</span>, <span class="hljs-string">&#x27;m&#x27;</span>, <span class="hljs-string">&#x27;living&#x27;</span>, <span class="hljs-string">&#x27;for&#x27;</span>, <span class="hljs-string">&#x27;two&#x27;</span>, <span class="hljs-string">&#x27;weeks&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;empire&#x27;</span>, <span class="hljs-string">&#x27;state&#x27;</span>, <span class="hljs-string">&#x27;building&#x27;</span>, <span class="hljs-string">&#x27;=&#x27;</span>, <span class="hljs-string">&#x27;es&#x27;</span>, <span class="hljs-string">&#x27;##b&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;pretty&#x27;</span>, <span class="hljs-string">&#x27;bad&#x27;</span>, <span class="hljs-string">&#x27;storm&#x27;</span>, <span class="hljs-string">&#x27;here&#x27;</span>, <span class="hljs-string">&#x27;last&#x27;</span>, <span class="hljs-string">&#x27;evening&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;[SEP]&#x27;</span>]`}}),Ys=new ss({props:{code:`def tokenize_and_align_labels(examples): tokenized_inputs = tokenizer(examples["tokens"], truncation=True, is_split_into_words=True) labels = [] for i, label in enumerate(examples[f"ner_tags"]): word_ids = tokenized_inputs.word_ids(batch_index=i) # Map tokens to their respective word. previous_word_idx = None label_ids = [] for word_idx in word_ids: # Set the special tokens to -100. if word_idx is None: label_ids.append(-100) elif word_idx != previous_word_idx: # Only label the first token of a given word. label_ids.append(label[word_idx]) else: label_ids.append(-100) previous_word_idx = word_idx labels.append(label_ids) tokenized_inputs["labels"] = labels return tokenized_inputs`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">tokenize_and_align_labels</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> tokenized_inputs = tokenizer(examples[<span class="hljs-string">&quot;tokens&quot;</span>], truncation=<span class="hljs-literal">True</span>, is_split_into_words=<span class="hljs-literal">True</span>) <span class="hljs-meta">... </span> labels = [] <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> i, label <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(examples[<span class="hljs-string">f&quot;ner_tags&quot;</span>]): <span class="hljs-meta">... </span> word_ids = tokenized_inputs.word_ids(batch_index=i) <span class="hljs-comment"># Map tokens to their respective word.</span> <span class="hljs-meta">... </span> previous_word_idx = <span class="hljs-literal">None</span> <span class="hljs-meta">... </span> label_ids = [] <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> word_idx <span class="hljs-keyword">in</span> word_ids: <span class="hljs-comment"># Set the special tokens to -100.</span> <span class="hljs-meta">... </span> <span class="hljs-keyword">if</span> word_idx <span class="hljs-keyword">is</span> <span class="hljs-literal">None</span>: <span class="hljs-meta">... </span> label_ids.append(-<span class="hljs-number">100</span>) <span class="hljs-meta">... </span> <span class="hljs-keyword">elif</span> word_idx != previous_word_idx: <span class="hljs-comment"># Only label the first token of a given word.</span> <span class="hljs-meta">... </span> label_ids.append(label[word_idx]) <span class="hljs-meta">... </span> <span class="hljs-keyword">else</span>: <span class="hljs-meta">... </span> label_ids.append(-<span class="hljs-number">100</span>) <span class="hljs-meta">... </span> previous_word_idx = word_idx <span class="hljs-meta">... </span> labels.append(label_ids) <span class="hljs-meta">... </span> tokenized_inputs[<span class="hljs-string">&quot;labels&quot;</span>] = labels <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> tokenized_inputs`}}),Ks=new ss({props:{code:"tokenized_wnut = wnut.map(tokenize_and_align_labels, batched=True)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_wnut = wnut.<span class="hljs-built_in">map</span>(tokenize_and_align_labels, batched=<span class="hljs-literal">True</span>)'}}),Ds=new fn({props:{pytorch:!0,tensorflow:!0,jax:!1,$$slots:{tensorflow:[bn],pytorch:[kn]},$$scope:{ctx:S}}}),Vs=new ft({}),Fs=new fn({props:{pytorch:!0,tensorflow:!0,jax:!1,$$slots:{tensorflow:[zn],pytorch:[En]},$$scope:{ctx:S}}}),Ss=new ht({props:{$$slots:{default:[qn]},$$scope:{ctx:S}}}),{c(){a=r("meta"),m=_(),n=r("h1"),u=r("a"),k=r("span"),E($.$$.fragment),w=_(),D=r("span"),A=l("Token classification"),b=_(),E(F.$$.fragment),O=_(),V=r("p"),I=l("Token classification assigns a label to individual tokens in a sentence. One of the most common token classification tasks is Named Entity Recognition (NER). NER attempts to find a label for each entity in a sentence, such as a person, location, or organization."),Z=_(),B=r("p"),R=l("This guide will show you how to fine-tune "),G=r("a"),J=l("DistilBERT"),hs=l(" on the "),L=r("a"),ms=l("WNUT 17"),ts=l(" dataset to detect new entities."),is=_(),E(N.$$.fragment),ps=_(),P=r("h2"),Q=r("a"),M=r("span"),E(Y.$$.fragment),ds=_(),as=r("span"),X=l("Load WNUT 17 dataset"),H=_(),d=r("p"),v=l("Load the WNUT 17 dataset from the \u{1F917} Datasets library:"),ns=_(),E(U.$$.fragment),K=_(),es=r("p"),gs=l("Then take a look at an example:"),ls=_(),E(W.$$.fragment),os=_(),c=r("p"),x=l("Each number in "),us=r("code"),Js=l("ner_tags"),Qs=l(" represents an entity. Convert the number to a label name for more information:"),Es=_(),E($s.$$.fragment),ys=_(),cs=r("p"),Xs=l("The "),js=r("code"),se=l("ner_tag"),ee=l(" describes an entity, such as a corporation, location, or person. The letter that prefixes each "),fe=r("code"),dt=l("ner_tag"),ut=l(" indicates the token position of the entity:"),Ie=_(),ks=r("ul"),te=r("li"),he=r("code"),_t=l("B-"),gt=l(" indicates the beginning of an entity."),$t=_(),ws=r("li"),me=r("code"),jt=l("I-"),kt=l(" indicates a token is contained inside the same entity (e.g., the "),de=r("code"),wt=l("State"),bt=l(` token is a part of an entity like `),ue=r("code"),xt=l("Empire State Building"),vt=l(")."),Et=_(),ae=r("li"),_e=r("code"),yt=l("0"),Tt=l(" indicates the token doesn\u2019t correspond to any entity."),Ne=_(),Ts=r("h2"),qs=r("a"),ge=r("span"),E(Is.$$.fragment),zt=_(),$e=r("span"),qt=l("Preprocess"),Me=_(),E(Ns.$$.fragment),Be=_(),Cs=r("p"),Ct=l("Load the DistilBERT tokenizer to process the "),je=r("code"),At=l("tokens"),Dt=l(":"),Re=_(),E(Ms.$$.fragment),Ue=_(),As=r("p"),Pt=l("Since the input has already been split into words, set "),ke=r("code"),Ft=l("is_split_into_words=True"),St=l(" to tokenize the words into subwords:"),We=_(),E(Bs.$$.fragment),Ye=_(),bs=r("p"),Ot=l("Adding the special tokens "),we=r("code"),Lt=l("[CLS]"),It=l(" and "),be=r("code"),Nt=l("[SEP]"),Mt=l(" and subword tokenization creates a mismatch between the input and labels. A single word corresponding to a single label may be split into two subwords. You will need to realign the tokens and labels by:"),He=_(),xs=r("ol"),Rs=r("li"),Bt=l("Mapping all tokens to their corresponding word with the "),Us=r("a"),xe=r("code"),Rt=l("word_ids"),Ut=l(" method."),Wt=_(),_s=r("li"),Yt=l("Assigning the label "),ve=r("code"),Ht=l("-100"),Kt=l(" to the special tokens "),Ee=r("code"),Vt=l("[CLS]"),Zt=l(" and "),ye=r("code"),Gt=l("[SEP]"),Jt=l(` so the PyTorch loss function ignores them.`),Qt=_(),Ws=r("li"),Xt=l("Only labeling the first token of a given word. Assign "),Te=r("code"),sa=l("-100"),ea=l(" to other subtokens from the same word."),Ke=_(),ne=r("p"),ta=l("Here is how you can create a function to realign the tokens and labels, and truncate sequences to be no longer than DistilBERT\u2019s maximum input length::"),Ve=_(),E(Ys.$$.fragment),Ze=_(),fs=r("p"),aa=l("Use \u{1F917} Datasets "),Hs=r("a"),na=l("map"),la=l(" function to tokenize and align the labels over the entire dataset. You can speed up the "),ze=r("code"),oa=l("map"),ra=l(" function by setting "),qe=r("code"),ia=l("batched=True"),pa=l(" to process multiple elements of the dataset at once:"),Ge=_(),E(Ks.$$.fragment),Je=_(),rs=r("p"),ca=l("Use "),le=r("a"),fa=l("DataCollatorForTokenClassification"),ha=l(" to create a batch of examples. It will also "),Ce=r("em"),ma=l("dynamically pad"),da=l(" your text and labels to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the "),Ae=r("code"),ua=l("tokenizer"),_a=l(" function by setting "),De=r("code"),ga=l("padding=True"),$a=l(", dynamic padding is more efficient."),Qe=_(),E(Ds.$$.fragment),Xe=_(),zs=r("h2"),Ps=r("a"),Pe=r("span"),E(Vs.$$.fragment),ja=_(),Fe=r("span"),ka=l("Train"),st=_(),E(Fs.$$.fragment),et=_(),E(Ss.$$.fragment),this.h()},l(s){const h=_n('[data-svelte="svelte-1phssyn"]',document.head);a=i(h,"META",{name:!0,content:!0}),h.forEach(t),m=g(s),n=i(s,"H1",{class:!0});var Zs=p(n);u=i(Zs,"A",{id:!0,class:!0,href:!0});var Se=p(u);k=i(Se,"SPAN",{});var Oe=p(k);y($.$$.fragment,Oe),Oe.forEach(t),Se.forEach(t),w=g(Zs),D=i(Zs,"SPAN",{});var Le=p(D);A=o(Le,"Token classification"),Le.forEach(t),Zs.forEach(t),b=g(s),y(F.$$.fragment,s),O=g(s),V=i(s,"P",{});var xa=p(V);I=o(xa,"Token classification assigns a label to individual tokens in a sentence. One of the most common token classification tasks is Named Entity Recognition (NER). NER attempts to find a label for each entity in a sentence, such as a person, location, or organization."),xa.forEach(t),Z=g(s),B=i(s,"P",{});var oe=p(B);R=o(oe,"This guide will show you how to fine-tune "),G=i(oe,"A",{href:!0,rel:!0});var va=p(G);J=o(va,"DistilBERT"),va.forEach(t),hs=o(oe," on the "),L=i(oe,"A",{href:!0,rel:!0});var Ea=p(L);ms=o(Ea,"WNUT 17"),Ea.forEach(t),ts=o(oe," dataset to detect new entities."),oe.forEach(t),is=g(s),y(N.$$.fragment,s),ps=g(s),P=i(s,"H2",{class:!0});var at=p(P);Q=i(at,"A",{id:!0,class:!0,href:!0});var ya=p(Q);M=i(ya,"SPAN",{});var Ta=p(M);y(Y.$$.fragment,Ta),Ta.forEach(t),ya.forEach(t),ds=g(at),as=i(at,"SPAN",{});var za=p(as);X=o(za,"Load WNUT 17 dataset"),za.forEach(t),at.forEach(t),H=g(s),d=i(s,"P",{});var qa=p(d);v=o(qa,"Load the WNUT 17 dataset from the \u{1F917} Datasets library:"),qa.forEach(t),ns=g(s),y(U.$$.fragment,s),K=g(s),es=i(s,"P",{});var Ca=p(es);gs=o(Ca,"Then take a look at an example:"),Ca.forEach(t),ls=g(s),y(W.$$.fragment,s),os=g(s),c=i(s,"P",{});var nt=p(c);x=o(nt,"Each number in "),us=i(nt,"CODE",{});var Aa=p(us);Js=o(Aa,"ner_tags"),Aa.forEach(t),Qs=o(nt," represents an entity. Convert the number to a label name for more information:"),nt.forEach(t),Es=g(s),y($s.$$.fragment,s),ys=g(s),cs=i(s,"P",{});var re=p(cs);Xs=o(re,"The "),js=i(re,"CODE",{});var Da=p(js);se=o(Da,"ner_tag"),Da.forEach(t),ee=o(re," describes an entity, such as a corporation, location, or person. The letter that prefixes each "),fe=i(re,"CODE",{});var Pa=p(fe);dt=o(Pa,"ner_tag"),Pa.forEach(t),ut=o(re," indicates the token position of the entity:"),re.forEach(t),Ie=g(s),ks=i(s,"UL",{});var ie=p(ks);te=i(ie,"LI",{});var wa=p(te);he=i(wa,"CODE",{});var Fa=p(he);_t=o(Fa,"B-"),Fa.forEach(t),gt=o(wa," indicates the beginning of an entity."),wa.forEach(t),$t=g(ie),ws=i(ie,"LI",{});var Gs=p(ws);me=i(Gs,"CODE",{});var Sa=p(me);jt=o(Sa,"I-"),Sa.forEach(t),kt=o(Gs," indicates a token is contained inside the same entity (e.g., the "),de=i(Gs,"CODE",{});var Oa=p(de);wt=o(Oa,"State"),Oa.forEach(t),bt=o(Gs,` token is a part of an entity like `),ue=i(Gs,"CODE",{});var La=p(ue);xt=o(La,"Empire State Building"),La.forEach(t),vt=o(Gs,")."),Gs.forEach(t),Et=g(ie),ae=i(ie,"LI",{});var ba=p(ae);_e=i(ba,"CODE",{});var Ia=p(_e);yt=o(Ia,"0"),Ia.forEach(t),Tt=o(ba," indicates the token doesn\u2019t correspond to any entity."),ba.forEach(t),ie.forEach(t),Ne=g(s),Ts=i(s,"H2",{class:!0});var lt=p(Ts);qs=i(lt,"A",{id:!0,class:!0,href:!0});var Na=p(qs);ge=i(Na,"SPAN",{});var Ma=p(ge);y(Is.$$.fragment,Ma),Ma.forEach(t),Na.forEach(t),zt=g(lt),$e=i(lt,"SPAN",{});var Ba=p($e);qt=o(Ba,"Preprocess"),Ba.forEach(t),lt.forEach(t),Me=g(s),y(Ns.$$.fragment,s),Be=g(s),Cs=i(s,"P",{});var ot=p(Cs);Ct=o(ot,"Load the DistilBERT tokenizer to process the "),je=i(ot,"CODE",{});var Ra=p(je);At=o(Ra,"tokens"),Ra.forEach(t),Dt=o(ot,":"),ot.forEach(t),Re=g(s),y(Ms.$$.fragment,s),Ue=g(s),As=i(s,"P",{});var rt=p(As);Pt=o(rt,"Since the input has already been split into words, set "),ke=i(rt,"CODE",{});var Ua=p(ke);Ft=o(Ua,"is_split_into_words=True"),Ua.forEach(t),St=o(rt," to tokenize the words into subwords:"),rt.forEach(t),We=g(s),y(Bs.$$.fragment,s),Ye=g(s),bs=i(s,"P",{});var pe=p(bs);Ot=o(pe,"Adding the special tokens "),we=i(pe,"CODE",{});var Wa=p(we);Lt=o(Wa,"[CLS]"),Wa.forEach(t),It=o(pe," and "),be=i(pe,"CODE",{});var Ya=p(be);Nt=o(Ya,"[SEP]"),Ya.forEach(t),Mt=o(pe," and subword tokenization creates a mismatch between the input and labels. A single word corresponding to a single label may be split into two subwords. You will need to realign the tokens and labels by:"),pe.forEach(t),He=g(s),xs=i(s,"OL",{});var ce=p(xs);Rs=i(ce,"LI",{});var it=p(Rs);Bt=o(it,"Mapping all tokens to their corresponding word with the "),Us=i(it,"A",{href:!0,rel:!0});var Ha=p(Us);xe=i(Ha,"CODE",{});var Ka=p(xe);Rt=o(Ka,"word_ids"),Ka.forEach(t),Ha.forEach(t),Ut=o(it," method."),it.forEach(t),Wt=g(ce),_s=i(ce,"LI",{});var Os=p(_s);Yt=o(Os,"Assigning the label "),ve=i(Os,"CODE",{});var Va=p(ve);Ht=o(Va,"-100"),Va.forEach(t),Kt=o(Os," to the special tokens "),Ee=i(Os,"CODE",{});var Za=p(Ee);Vt=o(Za,"[CLS]"),Za.forEach(t),Zt=o(Os," and "),ye=i(Os,"CODE",{});var Ga=p(ye);Gt=o(Ga,"[SEP]"),Ga.forEach(t),Jt=o(Os,` so the PyTorch loss function ignores them.`),Os.forEach(t),Qt=g(ce),Ws=i(ce,"LI",{});var pt=p(Ws);Xt=o(pt,"Only labeling the first token of a given word. Assign "),Te=i(pt,"CODE",{});var Ja=p(Te);sa=o(Ja,"-100"),Ja.forEach(t),ea=o(pt," to other subtokens from the same word."),pt.forEach(t),ce.forEach(t),Ke=g(s),ne=i(s,"P",{});var Qa=p(ne);ta=o(Qa,"Here is how you can create a function to realign the tokens and labels, and truncate sequences to be no longer than DistilBERT\u2019s maximum input length::"),Qa.forEach(t),Ve=g(s),y(Ys.$$.fragment,s),Ze=g(s),fs=i(s,"P",{});var Ls=p(fs);aa=o(Ls,"Use \u{1F917} Datasets "),Hs=i(Ls,"A",{href:!0,rel:!0});var Xa=p(Hs);na=o(Xa,"map"),Xa.forEach(t),la=o(Ls," function to tokenize and align the labels over the entire dataset. You can speed up the "),ze=i(Ls,"CODE",{});var sn=p(ze);oa=o(sn,"map"),sn.forEach(t),ra=o(Ls," function by setting "),qe=i(Ls,"CODE",{});var en=p(qe);ia=o(en,"batched=True"),en.forEach(t),pa=o(Ls," to process multiple elements of the dataset at once:"),Ls.forEach(t),Ge=g(s),y(Ks.$$.fragment,s),Je=g(s),rs=i(s,"P",{});var vs=p(rs);ca=o(vs,"Use "),le=i(vs,"A",{href:!0});var tn=p(le);fa=o(tn,"DataCollatorForTokenClassification"),tn.forEach(t),ha=o(vs," to create a batch of examples. It will also "),Ce=i(vs,"EM",{});var an=p(Ce);ma=o(an,"dynamically pad"),an.forEach(t),da=o(vs," your text and labels to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the "),Ae=i(vs,"CODE",{});var nn=p(Ae);ua=o(nn,"tokenizer"),nn.forEach(t),_a=o(vs," function by setting "),De=i(vs,"CODE",{});var ln=p(De);ga=o(ln,"padding=True"),ln.forEach(t),$a=o(vs,", dynamic padding is more efficient."),vs.forEach(t),Qe=g(s),y(Ds.$$.fragment,s),Xe=g(s),zs=i(s,"H2",{class:!0});var ct=p(zs);Ps=i(ct,"A",{id:!0,class:!0,href:!0});var on=p(Ps);Pe=i(on,"SPAN",{});var rn=p(Pe);y(Vs.$$.fragment,rn),rn.forEach(t),on.forEach(t),ja=g(ct),Fe=i(ct,"SPAN",{});var pn=p(Fe);ka=o(pn,"Train"),pn.forEach(t),ct.forEach(t),st=g(s),y(Fs.$$.fragment,s),et=g(s),y(Ss.$$.fragment,s),this.h()},h(){j(a,"name","hf:doc:metadata"),j(a,"content",JSON.stringify(An)),j(u,"id","token-classification"),j(u,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),j(u,"href","#token-classification"),j(n,"class","relative group"),j(G,"href","https://huggingface.co/distilbert-base-uncased"),j(G,"rel","nofollow"),j(L,"href","https://huggingface.co/datasets/wnut_17"),j(L,"rel","nofollow"),j(Q,"id","load-wnut-17-dataset"),j(Q,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),j(Q,"href","#load-wnut-17-dataset"),j(P,"class","relative group"),j(qs,"id","preprocess"),j(qs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),j(qs,"href","#preprocess"),j(Ts,"class","relative group"),j(Us,"href","https://huggingface.co/docs/tokenizers/python/latest/api/reference.html#tokenizers.Encoding.word_ids"),j(Us,"rel","nofollow"),j(Hs,"href","https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.map"),j(Hs,"rel","nofollow"),j(le,"href","/docs/transformers/pr_19429/en/main_classes/data_collator#transformers.DataCollatorForTokenClassification"),j(Ps,"id","train"),j(Ps,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),j(Ps,"href","#train"),j(zs,"class","relative group")},m(s,h){e(document.head,a),f(s,m,h),f(s,n,h),e(n,u),e(u,k),T($,k,null),e(n,w),e(n,D),e(D,A),f(s,b,h),T(F,s,h),f(s,O,h),f(s,V,h),e(V,I),f(s,Z,h),f(s,B,h),e(B,R),e(B,G),e(G,J),e(B,hs),e(B,L),e(L,ms),e(B,ts),f(s,is,h),T(N,s,h),f(s,ps,h),f(s,P,h),e(P,Q),e(Q,M),T(Y,M,null),e(P,ds),e(P,as),e(as,X),f(s,H,h),f(s,d,h),e(d,v),f(s,ns,h),T(U,s,h),f(s,K,h),f(s,es,h),e(es,gs),f(s,ls,h),T(W,s,h),f(s,os,h),f(s,c,h),e(c,x),e(c,us),e(us,Js),e(c,Qs),f(s,Es,h),T($s,s,h),f(s,ys,h),f(s,cs,h),e(cs,Xs),e(cs,js),e(js,se),e(cs,ee),e(cs,fe),e(fe,dt),e(cs,ut),f(s,Ie,h),f(s,ks,h),e(ks,te),e(te,he),e(he,_t),e(te,gt),e(ks,$t),e(ks,ws),e(ws,me),e(me,jt),e(ws,kt),e(ws,de),e(de,wt),e(ws,bt),e(ws,ue),e(ue,xt),e(ws,vt),e(ks,Et),e(ks,ae),e(ae,_e),e(_e,yt),e(ae,Tt),f(s,Ne,h),f(s,Ts,h),e(Ts,qs),e(qs,ge),T(Is,ge,null),e(Ts,zt),e(Ts,$e),e($e,qt),f(s,Me,h),T(Ns,s,h),f(s,Be,h),f(s,Cs,h),e(Cs,Ct),e(Cs,je),e(je,At),e(Cs,Dt),f(s,Re,h),T(Ms,s,h),f(s,Ue,h),f(s,As,h),e(As,Pt),e(As,ke),e(ke,Ft),e(As,St),f(s,We,h),T(Bs,s,h),f(s,Ye,h),f(s,bs,h),e(bs,Ot),e(bs,we),e(we,Lt),e(bs,It),e(bs,be),e(be,Nt),e(bs,Mt),f(s,He,h),f(s,xs,h),e(xs,Rs),e(Rs,Bt),e(Rs,Us),e(Us,xe),e(xe,Rt),e(Rs,Ut),e(xs,Wt),e(xs,_s),e(_s,Yt),e(_s,ve),e(ve,Ht),e(_s,Kt),e(_s,Ee),e(Ee,Vt),e(_s,Zt),e(_s,ye),e(ye,Gt),e(_s,Jt),e(xs,Qt),e(xs,Ws),e(Ws,Xt),e(Ws,Te),e(Te,sa),e(Ws,ea),f(s,Ke,h),f(s,ne,h),e(ne,ta),f(s,Ve,h),T(Ys,s,h),f(s,Ze,h),f(s,fs,h),e(fs,aa),e(fs,Hs),e(Hs,na),e(fs,la),e(fs,ze),e(ze,oa),e(fs,ra),e(fs,qe),e(qe,ia),e(fs,pa),f(s,Ge,h),T(Ks,s,h),f(s,Je,h),f(s,rs,h),e(rs,ca),e(rs,le),e(le,fa),e(rs,ha),e(rs,Ce),e(Ce,ma),e(rs,da),e(rs,Ae),e(Ae,ua),e(rs,_a),e(rs,De),e(De,ga),e(rs,$a),f(s,Qe,h),T(Ds,s,h),f(s,Xe,h),f(s,zs,h),e(zs,Ps),e(Ps,Pe),T(Vs,Pe,null),e(zs,ja),e(zs,Fe),e(Fe,ka),f(s,st,h),T(Fs,s,h),f(s,et,h),T(Ss,s,h),tt=!0},p(s,[h]){const Zs={};h&2&&(Zs.$$scope={dirty:h,ctx:s}),N.$set(Zs);const Se={};h&2&&(Se.$$scope={dirty:h,ctx:s}),Ds.$set(Se);const Oe={};h&2&&(Oe.$$scope={dirty:h,ctx:s}),Fs.$set(Oe);const Le={};h&2&&(Le.$$scope={dirty:h,ctx:s}),Ss.$set(Le)},i(s){tt||(z($.$$.fragment,s),z(F.$$.fragment,s),z(N.$$.fragment,s),z(Y.$$.fragment,s),z(U.$$.fragment,s),z(W.$$.fragment,s),z($s.$$.fragment,s),z(Is.$$.fragment,s),z(Ns.$$.fragment,s),z(Ms.$$.fragment,s),z(Bs.$$.fragment,s),z(Ys.$$.fragment,s),z(Ks.$$.fragment,s),z(Ds.$$.fragment,s),z(Vs.$$.fragment,s),z(Fs.$$.fragment,s),z(Ss.$$.fragment,s),tt=!0)},o(s){q($.$$.fragment,s),q(F.$$.fragment,s),q(N.$$.fragment,s),q(Y.$$.fragment,s),q(U.$$.fragment,s),q(W.$$.fragment,s),q($s.$$.fragment,s),q(Is.$$.fragment,s),q(Ns.$$.fragment,s),q(Ms.$$.fragment,s),q(Bs.$$.fragment,s),q(Ys.$$.fragment,s),q(Ks.$$.fragment,s),q(Ds.$$.fragment,s),q(Vs.$$.fragment,s),q(Fs.$$.fragment,s),q(Ss.$$.fragment,s),tt=!1},d(s){t(a),s&&t(m),s&&t(n),C($),s&&t(b),C(F,s),s&&t(O),s&&t(V),s&&t(Z),s&&t(B),s&&t(is),C(N,s),s&&t(ps),s&&t(P),C(Y),s&&t(H),s&&t(d),s&&t(ns),C(U,s),s&&t(K),s&&t(es),s&&t(ls),C(W,s),s&&t(os),s&&t(c),s&&t(Es),C($s,s),s&&t(ys),s&&t(cs),s&&t(Ie),s&&t(ks),s&&t(Ne),s&&t(Ts),C(Is),s&&t(Me),C(Ns,s),s&&t(Be),s&&t(Cs),s&&t(Re),C(Ms,s),s&&t(Ue),s&&t(As),s&&t(We),C(Bs,s),s&&t(Ye),s&&t(bs),s&&t(He),s&&t(xs),s&&t(Ke),s&&t(ne),s&&t(Ve),C(Ys,s),s&&t(Ze),s&&t(fs),s&&t(Ge),C(Ks,s),s&&t(Je),s&&t(rs),s&&t(Qe),C(Ds,s),s&&t(Xe),s&&t(zs),C(Vs),s&&t(st),C(Fs,s),s&&t(et),C(Ss,s)}}}const An={local:"token-classification",sections:[{local:"load-wnut-17-dataset",title:"Load WNUT 17 dataset"},{local:"preprocess",title:"Preprocess"},{local:"train",title:"Train"}],title:"Token classification"};function Dn(S){return gn(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Nn extends mn{constructor(a){super();dn(this,a,Dn,Cn,un,{})}}export{Nn as default,An as metadata};
32
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/tasks/image_classification.mdx-hf-doc-builder.js
import{S as Ct,i as Pt,s as Dt,e as o,k as c,w as j,t as r,M as qt,c as n,d as a,m as u,a as i,x as y,h as l,b as d,G as t,g as p,y as k,q as E,o as x,B as T,v as Ft}from"../../chunks/vendor-hf-doc-builder.js";import{T as tt}from"../../chunks/Tip-hf-doc-builder.js";import{Y as It}from"../../chunks/Youtube-hf-doc-builder.js";import{I as Ia}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{C as H}from"../../chunks/CodeBlock-hf-doc-builder.js";import{F as zt,M as St}from"../../chunks/Markdown-hf-doc-builder.js";function Lt(O){let m,b,f,_,w;return{c(){m=o("p"),b=r("See the image classification "),f=o("a"),_=r("task page"),w=r(" for more information about its associated models, datasets, and metrics."),this.h()},l(g){m=n(g,"P",{});var $=i(m);b=l($,"See the image classification "),f=n($,"A",{href:!0,rel:!0});var A=i(f);_=l(A,"task page"),A.forEach(a),w=l($," for more information about its associated models, datasets, and metrics."),$.forEach(a),this.h()},h(){d(f,"href","https://huggingface.co/tasks/audio-classification"),d(f,"rel","nofollow")},m(g,$){p(g,m,$),t(m,b),t(m,f),t(f,_),t(m,w)},d(g){g&&a(m)}}}function Nt(O){let m,b,f,_,w,g,$,A;return{c(){m=o("p"),b=r("If you aren\u2019t familiar with fine-tuning a model with the "),f=o("a"),_=r("Trainer"),w=r(", take a look at the basic tutorial "),g=o("a"),$=r("here"),A=r("!"),this.h()},l(P){m=n(P,"P",{});var D=i(m);b=l(D,"If you aren\u2019t familiar with fine-tuning a model with the "),f=n(D,"A",{href:!0});var F=i(f);_=l(F,"Trainer"),F.forEach(a),w=l(D,", take a look at the basic tutorial "),g=n(D,"A",{href:!0});var V=i(g);$=l(V,"here"),V.forEach(a),A=l(D,"!"),D.forEach(a),this.h()},h(){d(f,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),d(g,"href","../training#finetune-with-trainer")},m(P,D){p(P,m,D),t(m,b),t(m,f),t(f,_),t(m,w),t(m,g),t(g,$),t(m,A)},d(P){P&&a(m)}}}function Mt(O){let m,b,f,_,w,g,$,A,P,D,F,V,G,I,C,z,ee,J,De,ne,Y,qe,ie,ge,R,ae,L,U,W,K,Fe,pe,Q,_e,N,Ie,$e,X,B,te,se,ve,Z,me,S,be;return $=new H({props:{code:`from transformers import AutoModelForImageClassification, TrainingArguments, Trainer model = AutoModelForImageClassification.from_pretrained( "google/vit-base-patch16-224-in21k", num_labels=len(labels), id2label=id2label, label2id=label2id, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForImageClassification, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForImageClassification.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;google/vit-base-patch16-224-in21k&quot;</span>, <span class="hljs-meta">... </span> num_labels=<span class="hljs-built_in">len</span>(labels), <span class="hljs-meta">... </span> id2label=id2label, <span class="hljs-meta">... </span> label2id=label2id, <span class="hljs-meta">... </span>)`}}),P=new tt({props:{$$slots:{default:[Nt]},$$scope:{ctx:O}}}),S=new H({props:{code:`training_args = TrainingArguments( output_dir="./results", per_device_train_batch_size=16, evaluation_strategy="steps", num_train_epochs=4, fp16=True, save_steps=100, eval_steps=100, logging_steps=10, learning_rate=2e-4, save_total_limit=2, remove_unused_columns=False, ) trainer = Trainer( model=model, args=training_args, data_collator=data_collator, train_dataset=food["train"], eval_dataset=food["test"], tokenizer=feature_extractor, ) trainer.train()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;steps&quot;</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">4</span>, <span class="hljs-meta">... </span> fp16=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> save_steps=<span class="hljs-number">100</span>, <span class="hljs-meta">... </span> eval_steps=<span class="hljs-number">100</span>, <span class="hljs-meta">... </span> logging_steps=<span class="hljs-number">10</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-4</span>, <span class="hljs-meta">... </span> save_total_limit=<span class="hljs-number">2</span>, <span class="hljs-meta">... </span> remove_unused_columns=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span> train_dataset=food[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=food[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> tokenizer=feature_extractor, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()`}}),{c(){m=o("p"),b=r("Load ViT with "),f=o("a"),_=r("AutoModelForImageClassification"),w=r(". Specify the number of labels, and pass the model the mapping between label number and label class:"),g=c(),j($.$$.fragment),A=c(),j(P.$$.fragment),D=c(),F=o("p"),V=r("At this point, only three steps remain:"),G=c(),I=o("ol"),C=o("li"),z=r("Define your training hyperparameters in "),ee=o("a"),J=r("TrainingArguments"),De=r(". It is important you don\u2019t remove unused columns because this will drop the "),ne=o("code"),Y=r("image"),qe=r(" column. Without the "),ie=o("code"),ge=r("image"),R=r(" column, you can\u2019t create "),ae=o("code"),L=r("pixel_values"),U=r(". Set "),W=o("code"),K=r("remove_unused_columns=False"),Fe=r(" to prevent this behavior!"),pe=c(),Q=o("li"),_e=r("Pass the training arguments to "),N=o("a"),Ie=r("Trainer"),$e=r(" along with the model, datasets, tokenizer, and data collator."),X=c(),B=o("li"),te=r("Call "),se=o("a"),ve=r("train()"),Z=r(" to fine-tune your model."),me=c(),j(S.$$.fragment),this.h()},l(h){m=n(h,"P",{});var v=i(m);b=l(v,"Load ViT with "),f=n(v,"A",{href:!0});var re=i(f);_=l(re,"AutoModelForImageClassification"),re.forEach(a),w=l(v,". Specify the number of labels, and pass the model the mapping between label number and label class:"),v.forEach(a),g=u(h),y($.$$.fragment,h),A=u(h),y(P.$$.fragment,h),D=u(h),F=n(h,"P",{});var M=i(F);V=l(M,"At this point, only three steps remain:"),M.forEach(a),G=u(h),I=n(h,"OL",{});var le=i(I);C=n(le,"LI",{});var q=i(C);z=l(q,"Define your training hyperparameters in "),ee=n(q,"A",{href:!0});var Ge=i(ee);J=l(Ge,"TrainingArguments"),Ge.forEach(a),De=l(q,". It is important you don\u2019t remove unused columns because this will drop the "),ne=n(q,"CODE",{});var Je=i(ne);Y=l(Je,"image"),Je.forEach(a),qe=l(q," column. Without the "),ie=n(q,"CODE",{});var we=i(ie);ge=l(we,"image"),we.forEach(a),R=l(q," column, you can\u2019t create "),ae=n(q,"CODE",{});var Ye=i(ae);L=l(Ye,"pixel_values"),Ye.forEach(a),U=l(q,". Set "),W=n(q,"CODE",{});var We=i(W);K=l(We,"remove_unused_columns=False"),We.forEach(a),Fe=l(q," to prevent this behavior!"),q.forEach(a),pe=u(le),Q=n(le,"LI",{});var fe=i(Q);_e=l(fe,"Pass the training arguments to "),N=n(fe,"A",{href:!0});var oe=i(N);Ie=l(oe,"Trainer"),oe.forEach(a),$e=l(fe," along with the model, datasets, tokenizer, and data collator."),fe.forEach(a),X=u(le),B=n(le,"LI",{});var he=i(B);te=l(he,"Call "),se=n(he,"A",{href:!0});var ce=i(se);ve=l(ce,"train()"),ce.forEach(a),Z=l(he," to fine-tune your model."),he.forEach(a),le.forEach(a),me=u(h),y(S.$$.fragment,h),this.h()},h(){d(f,"href","/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoModelForImageClassification"),d(ee,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments"),d(N,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),d(se,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.train")},m(h,v){p(h,m,v),t(m,b),t(m,f),t(f,_),t(m,w),p(h,g,v),k($,h,v),p(h,A,v),k(P,h,v),p(h,D,v),p(h,F,v),t(F,V),p(h,G,v),p(h,I,v),t(I,C),t(C,z),t(C,ee),t(ee,J),t(C,De),t(C,ne),t(ne,Y),t(C,qe),t(C,ie),t(ie,ge),t(C,R),t(C,ae),t(ae,L),t(C,U),t(C,W),t(W,K),t(C,Fe),t(I,pe),t(I,Q),t(Q,_e),t(Q,N),t(N,Ie),t(Q,$e),t(I,X),t(I,B),t(B,te),t(B,se),t(se,ve),t(B,Z),p(h,me,v),k(S,h,v),be=!0},p(h,v){const re={};v&2&&(re.$$scope={dirty:v,ctx:h}),P.$set(re)},i(h){be||(E($.$$.fragment,h),E(P.$$.fragment,h),E(S.$$.fragment,h),be=!0)},o(h){x($.$$.fragment,h),x(P.$$.fragment,h),x(S.$$.fragment,h),be=!1},d(h){h&&a(m),h&&a(g),T($,h),h&&a(A),T(P,h),h&&a(D),h&&a(F),h&&a(G),h&&a(I),h&&a(me),T(S,h)}}}function Ot(O){let m,b;return m=new St({props:{$$slots:{default:[Mt]},$$scope:{ctx:O}}}),{c(){j(m.$$.fragment)},l(f){y(m.$$.fragment,f)},m(f,_){k(m,f,_),b=!0},p(f,_){const w={};_&2&&(w.$$scope={dirty:_,ctx:f}),m.$set(w)},i(f){b||(E(m.$$.fragment,f),b=!0)},o(f){x(m.$$.fragment,f),b=!1},d(f){T(m,f)}}}function Rt(O){let m,b,f,_,w;return{c(){m=o("p"),b=r("For a more in-depth example of how to fine-tune a model for image classification, take a look at the corresponding "),f=o("a"),_=r("PyTorch notebook"),w=r("."),this.h()},l(g){m=n(g,"P",{});var $=i(m);b=l($,"For a more in-depth example of how to fine-tune a model for image classification, take a look at the corresponding "),f=n($,"A",{href:!0,rel:!0});var A=i(f);_=l(A,"PyTorch notebook"),A.forEach(a),w=l($,"."),$.forEach(a),this.h()},h(){d(f,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb"),d(f,"rel","nofollow")},m(g,$){p(g,m,$),t(m,b),t(m,f),t(f,_),t(m,w)},d(g){g&&a(m)}}}function Ut(O){let m,b,f,_,w,g,$,A,P,D,F,V,G,I,C,z,ee,J,De,ne,Y,qe,ie,ge,R,ae,L,U,W,K,Fe,pe,Q,_e,N,Ie,$e,X,B,te,se,ve,Z,me,S,be,h,v,re,M,le,q,Ge,Je,we,Ye,We,fe,oe,he,ce,za,pa,ze,ma,je,Sa,ea,La,Na,fa,ue,ye,aa,Se,Ma,ta,Oa,ha,Ke,Ra,ca,Le,ua,ke,Ua,Ne,sa,Ba,Ha,da,Me,ga,Ee,Va,ra,Ga,Ja,_a,Oe,$a,xe,Ya,Re,Wa,Ka,va,Ue,ba,Te,Qa,Qe,Xa,Za,wa,Be,ja,de,Ae,la,He,et,oa,at,ya,Ce,ka,Pe,Ea;return g=new Ia({}),F=new It({props:{id:"tjAIM7BOYhw"}}),R=new tt({props:{$$slots:{default:[Lt]},$$scope:{ctx:O}}}),K=new Ia({}),X=new H({props:{code:`from datasets import load_dataset food = load_dataset("food101", split="train[:5000]")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>food = load_dataset(<span class="hljs-string">&quot;food101&quot;</span>, split=<span class="hljs-string">&quot;train[:5000]&quot;</span>)`}}),Z=new H({props:{code:"food = food.train_test_split(test_size=0.2)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>food = food.train_test_split(test_size=<span class="hljs-number">0.2</span>)'}}),v=new H({props:{code:'food["train"][0]',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>food[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;image&#x27;</span>: &lt;PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=512x512 at <span class="hljs-number">0x7F52AFC8AC50</span>&gt;, <span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-number">79</span>}`}}),oe=new H({props:{code:`labels = food["train"].features["label"].names label2id, id2label = dict(), dict() for i, label in enumerate(labels): label2id[label] = str(i) id2label[str(i)] = label`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>labels = food[<span class="hljs-string">&quot;train&quot;</span>].features[<span class="hljs-string">&quot;label&quot;</span>].names <span class="hljs-meta">&gt;&gt;&gt; </span>label2id, id2label = <span class="hljs-built_in">dict</span>(), <span class="hljs-built_in">dict</span>() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> i, label <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(labels): <span class="hljs-meta">... </span> label2id[label] = <span class="hljs-built_in">str</span>(i) <span class="hljs-meta">... </span> id2label[<span class="hljs-built_in">str</span>(i)] = label`}}),ze=new H({props:{code:"id2label[str(79)]",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>id2label[<span class="hljs-built_in">str</span>(<span class="hljs-number">79</span>)] <span class="hljs-string">&#x27;prime_rib&#x27;</span>`}}),Se=new Ia({}),Le=new H({props:{code:`from transformers import AutoFeatureExtractor feature_extractor = AutoFeatureExtractor.from_pretrained("google/vit-base-patch16-224-in21k")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoFeatureExtractor <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = AutoFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;google/vit-base-patch16-224-in21k&quot;</span>)`}}),Me=new H({props:{code:`from torchvision.transforms import RandomResizedCrop, Compose, Normalize, ToTensor normalize = Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std) _transforms = Compose([RandomResizedCrop(feature_extractor.size), ToTensor(), normalize])`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> torchvision.transforms <span class="hljs-keyword">import</span> RandomResizedCrop, Compose, Normalize, ToTensor <span class="hljs-meta">&gt;&gt;&gt; </span>normalize = Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std) <span class="hljs-meta">&gt;&gt;&gt; </span>_transforms = Compose([RandomResizedCrop(feature_extractor.size), ToTensor(), normalize])`}}),Oe=new H({props:{code:`def transforms(examples): examples["pixel_values"] = [_transforms(img.convert("RGB")) for img in examples["image"]] del examples["image"] return examples`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">transforms</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> examples[<span class="hljs-string">&quot;pixel_values&quot;</span>] = [_transforms(img.convert(<span class="hljs-string">&quot;RGB&quot;</span>)) <span class="hljs-keyword">for</span> img <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;image&quot;</span>]] <span class="hljs-meta">... </span> <span class="hljs-keyword">del</span> examples[<span class="hljs-string">&quot;image&quot;</span>] <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> examples`}}),Ue=new H({props:{code:"food = food.with_transform(transforms)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>food = food.with_transform(transforms)'}}),Be=new H({props:{code:`from transformers import DefaultDataCollator data_collator = DefaultDataCollator()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DefaultDataCollator <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DefaultDataCollator()`}}),He=new Ia({}),Ce=new zt({props:{pytorch:!0,tensorflow:!1,jax:!1,$$slots:{pytorch:[Ot]},$$scope:{ctx:O}}}),Pe=new tt({props:{$$slots:{default:[Rt]},$$scope:{ctx:O}}}),{c(){m=o("meta"),b=c(),f=o("h1"),_=o("a"),w=o("span"),j(g.$$.fragment),$=c(),A=o("span"),P=r("Image classification"),D=c(),j(F.$$.fragment),V=c(),G=o("p"),I=r("Image classification assigns a label or class to an image. Unlike text or audio classification, the inputs are the pixel values that represent an image. There are many uses for image classification, like detecting damage after a disaster, monitoring crop health, or helping screen medical images for signs of disease."),C=c(),z=o("p"),ee=r("This guide will show you how to fine-tune "),J=o("a"),De=r("ViT"),ne=r(" on the "),Y=o("a"),qe=r("Food-101"),ie=r(" dataset to classify a food item in an image."),ge=c(),j(R.$$.fragment),ae=c(),L=o("h2"),U=o("a"),W=o("span"),j(K.$$.fragment),Fe=c(),pe=o("span"),Q=r("Load Food-101 dataset"),_e=c(),N=o("p"),Ie=r("Load only the first 5000 images of the Food-101 dataset from the \u{1F917} Datasets library since it is pretty large:"),$e=c(),j(X.$$.fragment),B=c(),te=o("p"),se=r("Split this dataset into a train and test set:"),ve=c(),j(Z.$$.fragment),me=c(),S=o("p"),be=r("Then take a look at an example:"),h=c(),j(v.$$.fragment),re=c(),M=o("p"),le=r("The "),q=o("code"),Ge=r("image"),Je=r(" field contains a PIL image, and each "),we=o("code"),Ye=r("label"),We=r(" is an integer that represents a class. Create a dictionary that maps a label name to an integer and vice versa. The mapping will help the model recover the label name from the label number:"),fe=c(),j(oe.$$.fragment),he=c(),ce=o("p"),za=r("Now you can convert the label number to a label name for more information:"),pa=c(),j(ze.$$.fragment),ma=c(),je=o("p"),Sa=r("Each food class - or label - corresponds to a number; "),ea=o("code"),La=r("79"),Na=r(" indicates a prime rib in the example above."),fa=c(),ue=o("h2"),ye=o("a"),aa=o("span"),j(Se.$$.fragment),Ma=c(),ta=o("span"),Oa=r("Preprocess"),ha=c(),Ke=o("p"),Ra=r("Load the ViT feature extractor to process the image into a tensor:"),ca=c(),j(Le.$$.fragment),ua=c(),ke=o("p"),Ua=r("Apply several image transformations to the dataset to make the model more robust against overfitting. Here you\u2019ll use torchvision\u2019s "),Ne=o("a"),sa=o("code"),Ba=r("transforms"),Ha=r(" module. Crop a random part of the image, resize it, and normalize it with the image mean and standard deviation:"),da=c(),j(Me.$$.fragment),ga=c(),Ee=o("p"),Va=r("Create a preprocessing function that will apply the transforms and return the "),ra=o("code"),Ga=r("pixel_values"),Ja=r(" - the inputs to the model - of the image:"),_a=c(),j(Oe.$$.fragment),$a=c(),xe=o("p"),Ya=r("Use \u{1F917} Dataset\u2019s "),Re=o("a"),Wa=r("with_transform"),Ka=r(" method to apply the transforms over the entire dataset. The transforms are applied on-the-fly when you load an element of the dataset:"),va=c(),j(Ue.$$.fragment),ba=c(),Te=o("p"),Qa=r("Use "),Qe=o("a"),Xa=r("DefaultDataCollator"),Za=r(" to create a batch of examples. Unlike other data collators in \u{1F917} Transformers, the DefaultDataCollator does not apply additional preprocessing such as padding."),wa=c(),j(Be.$$.fragment),ja=c(),de=o("h2"),Ae=o("a"),la=o("span"),j(He.$$.fragment),et=c(),oa=o("span"),at=r("Train"),ya=c(),j(Ce.$$.fragment),ka=c(),j(Pe.$$.fragment),this.h()},l(e){const s=qt('[data-svelte="svelte-1phssyn"]',document.head);m=n(s,"META",{name:!0,content:!0}),s.forEach(a),b=u(e),f=n(e,"H1",{class:!0});var Ve=i(f);_=n(Ve,"A",{id:!0,class:!0,href:!0});var na=i(_);w=n(na,"SPAN",{});var ia=i(w);y(g.$$.fragment,ia),ia.forEach(a),na.forEach(a),$=u(Ve),A=n(Ve,"SPAN",{});var st=i(A);P=l(st,"Image classification"),st.forEach(a),Ve.forEach(a),D=u(e),y(F.$$.fragment,e),V=u(e),G=n(e,"P",{});var rt=i(G);I=l(rt,"Image classification assigns a label or class to an image. Unlike text or audio classification, the inputs are the pixel values that represent an image. There are many uses for image classification, like detecting damage after a disaster, monitoring crop health, or helping screen medical images for signs of disease."),rt.forEach(a),C=u(e),z=n(e,"P",{});var Xe=i(z);ee=l(Xe,"This guide will show you how to fine-tune "),J=n(Xe,"A",{href:!0,rel:!0});var lt=i(J);De=l(lt,"ViT"),lt.forEach(a),ne=l(Xe," on the "),Y=n(Xe,"A",{href:!0,rel:!0});var ot=i(Y);qe=l(ot,"Food-101"),ot.forEach(a),ie=l(Xe," dataset to classify a food item in an image."),Xe.forEach(a),ge=u(e),y(R.$$.fragment,e),ae=u(e),L=n(e,"H2",{class:!0});var xa=i(L);U=n(xa,"A",{id:!0,class:!0,href:!0});var nt=i(U);W=n(nt,"SPAN",{});var it=i(W);y(K.$$.fragment,it),it.forEach(a),nt.forEach(a),Fe=u(xa),pe=n(xa,"SPAN",{});var pt=i(pe);Q=l(pt,"Load Food-101 dataset"),pt.forEach(a),xa.forEach(a),_e=u(e),N=n(e,"P",{});var mt=i(N);Ie=l(mt,"Load only the first 5000 images of the Food-101 dataset from the \u{1F917} Datasets library since it is pretty large:"),mt.forEach(a),$e=u(e),y(X.$$.fragment,e),B=u(e),te=n(e,"P",{});var ft=i(te);se=l(ft,"Split this dataset into a train and test set:"),ft.forEach(a),ve=u(e),y(Z.$$.fragment,e),me=u(e),S=n(e,"P",{});var ht=i(S);be=l(ht,"Then take a look at an example:"),ht.forEach(a),h=u(e),y(v.$$.fragment,e),re=u(e),M=n(e,"P",{});var Ze=i(M);le=l(Ze,"The "),q=n(Ze,"CODE",{});var ct=i(q);Ge=l(ct,"image"),ct.forEach(a),Je=l(Ze," field contains a PIL image, and each "),we=n(Ze,"CODE",{});var ut=i(we);Ye=l(ut,"label"),ut.forEach(a),We=l(Ze," is an integer that represents a class. Create a dictionary that maps a label name to an integer and vice versa. The mapping will help the model recover the label name from the label number:"),Ze.forEach(a),fe=u(e),y(oe.$$.fragment,e),he=u(e),ce=n(e,"P",{});var dt=i(ce);za=l(dt,"Now you can convert the label number to a label name for more information:"),dt.forEach(a),pa=u(e),y(ze.$$.fragment,e),ma=u(e),je=n(e,"P",{});var Ta=i(je);Sa=l(Ta,"Each food class - or label - corresponds to a number; "),ea=n(Ta,"CODE",{});var gt=i(ea);La=l(gt,"79"),gt.forEach(a),Na=l(Ta," indicates a prime rib in the example above."),Ta.forEach(a),fa=u(e),ue=n(e,"H2",{class:!0});var Aa=i(ue);ye=n(Aa,"A",{id:!0,class:!0,href:!0});var _t=i(ye);aa=n(_t,"SPAN",{});var $t=i(aa);y(Se.$$.fragment,$t),$t.forEach(a),_t.forEach(a),Ma=u(Aa),ta=n(Aa,"SPAN",{});var vt=i(ta);Oa=l(vt,"Preprocess"),vt.forEach(a),Aa.forEach(a),ha=u(e),Ke=n(e,"P",{});var bt=i(Ke);Ra=l(bt,"Load the ViT feature extractor to process the image into a tensor:"),bt.forEach(a),ca=u(e),y(Le.$$.fragment,e),ua=u(e),ke=n(e,"P",{});var Ca=i(ke);Ua=l(Ca,"Apply several image transformations to the dataset to make the model more robust against overfitting. Here you\u2019ll use torchvision\u2019s "),Ne=n(Ca,"A",{href:!0,rel:!0});var wt=i(Ne);sa=n(wt,"CODE",{});var jt=i(sa);Ba=l(jt,"transforms"),jt.forEach(a),wt.forEach(a),Ha=l(Ca," module. Crop a random part of the image, resize it, and normalize it with the image mean and standard deviation:"),Ca.forEach(a),da=u(e),y(Me.$$.fragment,e),ga=u(e),Ee=n(e,"P",{});var Pa=i(Ee);Va=l(Pa,"Create a preprocessing function that will apply the transforms and return the "),ra=n(Pa,"CODE",{});var yt=i(ra);Ga=l(yt,"pixel_values"),yt.forEach(a),Ja=l(Pa," - the inputs to the model - of the image:"),Pa.forEach(a),_a=u(e),y(Oe.$$.fragment,e),$a=u(e),xe=n(e,"P",{});var Da=i(xe);Ya=l(Da,"Use \u{1F917} Dataset\u2019s "),Re=n(Da,"A",{href:!0,rel:!0});var kt=i(Re);Wa=l(kt,"with_transform"),kt.forEach(a),Ka=l(Da," method to apply the transforms over the entire dataset. The transforms are applied on-the-fly when you load an element of the dataset:"),Da.forEach(a),va=u(e),y(Ue.$$.fragment,e),ba=u(e),Te=n(e,"P",{});var qa=i(Te);Qa=l(qa,"Use "),Qe=n(qa,"A",{href:!0});var Et=i(Qe);Xa=l(Et,"DefaultDataCollator"),Et.forEach(a),Za=l(qa," to create a batch of examples. Unlike other data collators in \u{1F917} Transformers, the DefaultDataCollator does not apply additional preprocessing such as padding."),qa.forEach(a),wa=u(e),y(Be.$$.fragment,e),ja=u(e),de=n(e,"H2",{class:!0});var Fa=i(de);Ae=n(Fa,"A",{id:!0,class:!0,href:!0});var xt=i(Ae);la=n(xt,"SPAN",{});var Tt=i(la);y(He.$$.fragment,Tt),Tt.forEach(a),xt.forEach(a),et=u(Fa),oa=n(Fa,"SPAN",{});var At=i(oa);at=l(At,"Train"),At.forEach(a),Fa.forEach(a),ya=u(e),y(Ce.$$.fragment,e),ka=u(e),y(Pe.$$.fragment,e),this.h()},h(){d(m,"name","hf:doc:metadata"),d(m,"content",JSON.stringify(Bt)),d(_,"id","image-classification"),d(_,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(_,"href","#image-classification"),d(f,"class","relative group"),d(J,"href","https://huggingface.co/docs/transformers/v4.16.2/en/model_doc/vit"),d(J,"rel","nofollow"),d(Y,"href","https://huggingface.co/datasets/food101"),d(Y,"rel","nofollow"),d(U,"id","load-food101-dataset"),d(U,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(U,"href","#load-food101-dataset"),d(L,"class","relative group"),d(ye,"id","preprocess"),d(ye,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(ye,"href","#preprocess"),d(ue,"class","relative group"),d(Ne,"href","https://pytorch.org/vision/stable/transforms.html"),d(Ne,"rel","nofollow"),d(Re,"href","https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.with_transform"),d(Re,"rel","nofollow"),d(Qe,"href","/docs/transformers/pr_19429/en/main_classes/data_collator#transformers.DefaultDataCollator"),d(Ae,"id","train"),d(Ae,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Ae,"href","#train"),d(de,"class","relative group")},m(e,s){t(document.head,m),p(e,b,s),p(e,f,s),t(f,_),t(_,w),k(g,w,null),t(f,$),t(f,A),t(A,P),p(e,D,s),k(F,e,s),p(e,V,s),p(e,G,s),t(G,I),p(e,C,s),p(e,z,s),t(z,ee),t(z,J),t(J,De),t(z,ne),t(z,Y),t(Y,qe),t(z,ie),p(e,ge,s),k(R,e,s),p(e,ae,s),p(e,L,s),t(L,U),t(U,W),k(K,W,null),t(L,Fe),t(L,pe),t(pe,Q),p(e,_e,s),p(e,N,s),t(N,Ie),p(e,$e,s),k(X,e,s),p(e,B,s),p(e,te,s),t(te,se),p(e,ve,s),k(Z,e,s),p(e,me,s),p(e,S,s),t(S,be),p(e,h,s),k(v,e,s),p(e,re,s),p(e,M,s),t(M,le),t(M,q),t(q,Ge),t(M,Je),t(M,we),t(we,Ye),t(M,We),p(e,fe,s),k(oe,e,s),p(e,he,s),p(e,ce,s),t(ce,za),p(e,pa,s),k(ze,e,s),p(e,ma,s),p(e,je,s),t(je,Sa),t(je,ea),t(ea,La),t(je,Na),p(e,fa,s),p(e,ue,s),t(ue,ye),t(ye,aa),k(Se,aa,null),t(ue,Ma),t(ue,ta),t(ta,Oa),p(e,ha,s),p(e,Ke,s),t(Ke,Ra),p(e,ca,s),k(Le,e,s),p(e,ua,s),p(e,ke,s),t(ke,Ua),t(ke,Ne),t(Ne,sa),t(sa,Ba),t(ke,Ha),p(e,da,s),k(Me,e,s),p(e,ga,s),p(e,Ee,s),t(Ee,Va),t(Ee,ra),t(ra,Ga),t(Ee,Ja),p(e,_a,s),k(Oe,e,s),p(e,$a,s),p(e,xe,s),t(xe,Ya),t(xe,Re),t(Re,Wa),t(xe,Ka),p(e,va,s),k(Ue,e,s),p(e,ba,s),p(e,Te,s),t(Te,Qa),t(Te,Qe),t(Qe,Xa),t(Te,Za),p(e,wa,s),k(Be,e,s),p(e,ja,s),p(e,de,s),t(de,Ae),t(Ae,la),k(He,la,null),t(de,et),t(de,oa),t(oa,at),p(e,ya,s),k(Ce,e,s),p(e,ka,s),k(Pe,e,s),Ea=!0},p(e,[s]){const Ve={};s&2&&(Ve.$$scope={dirty:s,ctx:e}),R.$set(Ve);const na={};s&2&&(na.$$scope={dirty:s,ctx:e}),Ce.$set(na);const ia={};s&2&&(ia.$$scope={dirty:s,ctx:e}),Pe.$set(ia)},i(e){Ea||(E(g.$$.fragment,e),E(F.$$.fragment,e),E(R.$$.fragment,e),E(K.$$.fragment,e),E(X.$$.fragment,e),E(Z.$$.fragment,e),E(v.$$.fragment,e),E(oe.$$.fragment,e),E(ze.$$.fragment,e),E(Se.$$.fragment,e),E(Le.$$.fragment,e),E(Me.$$.fragment,e),E(Oe.$$.fragment,e),E(Ue.$$.fragment,e),E(Be.$$.fragment,e),E(He.$$.fragment,e),E(Ce.$$.fragment,e),E(Pe.$$.fragment,e),Ea=!0)},o(e){x(g.$$.fragment,e),x(F.$$.fragment,e),x(R.$$.fragment,e),x(K.$$.fragment,e),x(X.$$.fragment,e),x(Z.$$.fragment,e),x(v.$$.fragment,e),x(oe.$$.fragment,e),x(ze.$$.fragment,e),x(Se.$$.fragment,e),x(Le.$$.fragment,e),x(Me.$$.fragment,e),x(Oe.$$.fragment,e),x(Ue.$$.fragment,e),x(Be.$$.fragment,e),x(He.$$.fragment,e),x(Ce.$$.fragment,e),x(Pe.$$.fragment,e),Ea=!1},d(e){a(m),e&&a(b),e&&a(f),T(g),e&&a(D),T(F,e),e&&a(V),e&&a(G),e&&a(C),e&&a(z),e&&a(ge),T(R,e),e&&a(ae),e&&a(L),T(K),e&&a(_e),e&&a(N),e&&a($e),T(X,e),e&&a(B),e&&a(te),e&&a(ve),T(Z,e),e&&a(me),e&&a(S),e&&a(h),T(v,e),e&&a(re),e&&a(M),e&&a(fe),T(oe,e),e&&a(he),e&&a(ce),e&&a(pa),T(ze,e),e&&a(ma),e&&a(je),e&&a(fa),e&&a(ue),T(Se),e&&a(ha),e&&a(Ke),e&&a(ca),T(Le,e),e&&a(ua),e&&a(ke),e&&a(da),T(Me,e),e&&a(ga),e&&a(Ee),e&&a(_a),T(Oe,e),e&&a($a),e&&a(xe),e&&a(va),T(Ue,e),e&&a(ba),e&&a(Te),e&&a(wa),T(Be,e),e&&a(ja),e&&a(de),T(He),e&&a(ya),T(Ce,e),e&&a(ka),T(Pe,e)}}}const Bt={local:"image-classification",sections:[{local:"load-food101-dataset",title:"Load Food-101 dataset"},{local:"preprocess",title:"Preprocess"},{local:"train",title:"Train"}],title:"Image classification"};function Ht(O){return Ft(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Qt extends Ct{constructor(m){super();Pt(this,m,Ht,Ut,Dt,{})}}export{Qt as default,Bt as metadata};
33
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/tasks/audio_classification.mdx-hf-doc-builder.js
import{S as Ot,i as Ft,s as Wt,e as r,k as h,w,t,M as Nt,c as o,d as s,m as u,a as i,x,h as n,b as d,G as a,g as p,y as E,q as y,o as k,B as A,v as Rt}from"../../chunks/vendor-hf-doc-builder.js";import{T as Xs}from"../../chunks/Tip-hf-doc-builder.js";import{Y as Vt}from"../../chunks/Youtube-hf-doc-builder.js";import{I as Xa}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{C as W}from"../../chunks/CodeBlock-hf-doc-builder.js";import{F as zt,M as Yt}from"../../chunks/Markdown-hf-doc-builder.js";function Ut(N){let c,b,m,g,v;return{c(){c=r("p"),b=t("See the audio classification "),m=r("a"),g=t("task page"),v=t(" for more information about its associated models, datasets, and metrics."),this.h()},l(_){c=o(_,"P",{});var $=i(c);b=n($,"See the audio classification "),m=o($,"A",{href:!0,rel:!0});var q=i(m);g=n(q,"task page"),q.forEach(s),v=n($," for more information about its associated models, datasets, and metrics."),$.forEach(s),this.h()},h(){d(m,"href","https://huggingface.co/tasks/audio-classification"),d(m,"rel","nofollow")},m(_,$){p(_,c,$),a(c,b),a(c,m),a(m,g),a(c,v)},d(_){_&&s(c)}}}function Ht(N){let c,b,m,g,v,_,$,q;return{c(){c=r("p"),b=t("If you aren\u2019t familiar with fine-tuning a model with the "),m=r("a"),g=t("Trainer"),v=t(", take a look at the basic tutorial "),_=r("a"),$=t("here"),q=t("!"),this.h()},l(T){c=o(T,"P",{});var P=i(c);b=n(P,"If you aren\u2019t familiar with fine-tuning a model with the "),m=o(P,"A",{href:!0});var D=i(m);g=n(D,"Trainer"),D.forEach(s),v=n(P,", take a look at the basic tutorial "),_=o(P,"A",{href:!0});var B=i(_);$=n(B,"here"),B.forEach(s),q=n(P,"!"),P.forEach(s),this.h()},h(){d(m,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),d(_,"href","../training#finetune-with-trainer")},m(T,P){p(T,c,P),a(c,b),a(c,m),a(m,g),a(c,v),a(c,_),a(_,$),a(c,q)},d(T){T&&s(c)}}}function Bt(N){let c,b,m,g,v,_,$,q,T,P,D,B,G,M,R,L,X,J,je,we,I,xe,Z,me,V,he,S,z,Y,K,Ee,ee,Q,re;return $=new W({props:{code:`from transformers import AutoModelForAudioClassification, TrainingArguments, Trainer num_labels = len(id2label) model = AutoModelForAudioClassification.from_pretrained( "facebook/wav2vec2-base", num_labels=num_labels, label2id=label2id, id2label=id2label )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForAudioClassification, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>num_labels = <span class="hljs-built_in">len</span>(id2label) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForAudioClassification.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;facebook/wav2vec2-base&quot;</span>, num_labels=num_labels, label2id=label2id, id2label=id2label <span class="hljs-meta">... </span>)`}}),T=new Xs({props:{$$slots:{default:[Ht]},$$scope:{ctx:N}}}),Q=new W({props:{code:`training_args = TrainingArguments( output_dir="./results", evaluation_strategy="epoch", save_strategy="epoch", learning_rate=3e-5, num_train_epochs=5, ) trainer = Trainer( model=model, args=training_args, train_dataset=encoded_minds["train"], eval_dataset=encoded_minds["test"], tokenizer=feature_extractor, ) trainer.train()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> save_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">3e-5</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">5</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=encoded_minds[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=encoded_minds[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> tokenizer=feature_extractor, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()`}}),{c(){c=r("p"),b=t("Load Wav2Vec2 with "),m=r("a"),g=t("AutoModelForAudioClassification"),v=t(". Specify the number of labels, and pass the model the mapping between label number and label class:"),_=h(),w($.$$.fragment),q=h(),w(T.$$.fragment),P=h(),D=r("p"),B=t("At this point, only three steps remain:"),G=h(),M=r("ol"),R=r("li"),L=t("Define your training hyperparameters in "),X=r("a"),J=t("TrainingArguments"),je=t("."),we=h(),I=r("li"),xe=t("Pass the training arguments to "),Z=r("a"),me=t("Trainer"),V=t(" along with the model, datasets, and feature extractor."),he=h(),S=r("li"),z=t("Call "),Y=r("a"),K=t("train()"),Ee=t(" to fine-tune your model."),ee=h(),w(Q.$$.fragment),this.h()},l(f){c=o(f,"P",{});var j=i(c);b=n(j,"Load Wav2Vec2 with "),m=o(j,"A",{href:!0});var F=i(m);g=n(F,"AutoModelForAudioClassification"),F.forEach(s),v=n(j,". Specify the number of labels, and pass the model the mapping between label number and label class:"),j.forEach(s),_=u(f),x($.$$.fragment,f),q=u(f),x(T.$$.fragment,f),P=u(f),D=o(f,"P",{});var Ye=i(D);B=n(Ye,"At this point, only three steps remain:"),Ye.forEach(s),G=u(f),M=o(f,"OL",{});var ae=i(M);R=o(ae,"LI",{});var oe=i(R);L=n(oe,"Define your training hyperparameters in "),X=o(oe,"A",{href:!0});var se=i(X);J=n(se,"TrainingArguments"),se.forEach(s),je=n(oe,"."),oe.forEach(s),we=u(ae),I=o(ae,"LI",{});var ie=i(I);xe=n(ie,"Pass the training arguments to "),Z=o(ie,"A",{href:!0});var pe=i(Z);me=n(pe,"Trainer"),pe.forEach(s),V=n(ie," along with the model, datasets, and feature extractor."),ie.forEach(s),he=u(ae),S=o(ae,"LI",{});var ue=i(S);z=n(ue,"Call "),Y=o(ue,"A",{href:!0});var ye=i(Y);K=n(ye,"train()"),ye.forEach(s),Ee=n(ue," to fine-tune your model."),ue.forEach(s),ae.forEach(s),ee=u(f),x(Q.$$.fragment,f),this.h()},h(){d(m,"href","/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoModelForAudioClassification"),d(X,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments"),d(Z,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),d(Y,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.train")},m(f,j){p(f,c,j),a(c,b),a(c,m),a(m,g),a(c,v),p(f,_,j),E($,f,j),p(f,q,j),E(T,f,j),p(f,P,j),p(f,D,j),a(D,B),p(f,G,j),p(f,M,j),a(M,R),a(R,L),a(R,X),a(X,J),a(R,je),a(M,we),a(M,I),a(I,xe),a(I,Z),a(Z,me),a(I,V),a(M,he),a(M,S),a(S,z),a(S,Y),a(Y,K),a(S,Ee),p(f,ee,j),E(Q,f,j),re=!0},p(f,j){const F={};j&2&&(F.$$scope={dirty:j,ctx:f}),T.$set(F)},i(f){re||(y($.$$.fragment,f),y(T.$$.fragment,f),y(Q.$$.fragment,f),re=!0)},o(f){k($.$$.fragment,f),k(T.$$.fragment,f),k(Q.$$.fragment,f),re=!1},d(f){f&&s(c),f&&s(_),A($,f),f&&s(q),A(T,f),f&&s(P),f&&s(D),f&&s(G),f&&s(M),f&&s(ee),A(Q,f)}}}function Gt(N){let c,b;return c=new Yt({props:{$$slots:{default:[Bt]},$$scope:{ctx:N}}}),{c(){w(c.$$.fragment)},l(m){x(c.$$.fragment,m)},m(m,g){E(c,m,g),b=!0},p(m,g){const v={};g&2&&(v.$$scope={dirty:g,ctx:m}),c.$set(v)},i(m){b||(y(c.$$.fragment,m),b=!0)},o(m){k(c.$$.fragment,m),b=!1},d(m){A(c,m)}}}function Jt(N){let c,b,m,g,v;return{c(){c=r("p"),b=t("For a more in-depth example of how to fine-tune a model for audio classification, take a look at the corresponding "),m=r("a"),g=t("PyTorch notebook"),v=t("."),this.h()},l(_){c=o(_,"P",{});var $=i(c);b=n($,"For a more in-depth example of how to fine-tune a model for audio classification, take a look at the corresponding "),m=o($,"A",{href:!0,rel:!0});var q=i(m);g=n(q,"PyTorch notebook"),q.forEach(s),v=n($,"."),$.forEach(s),this.h()},h(){d(m,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/audio_classification.ipynb"),d(m,"rel","nofollow")},m(_,$){p(_,c,$),a(c,b),a(c,m),a(m,g),a(c,v)},d(_){_&&s(c)}}}function Kt(N){let c,b,m,g,v,_,$,q,T,P,D,B,G,M,R,L,X,J,je,we,I,xe,Z,me,V,he,S,z,Y,K,Ee,ee,Q,re,f,j,F,Ye,ae,oe,se,ie,pe,ue,ye,ke,va,Ue,Za,ja,Ae,wa,O,es,Ze,as,ss,ea,ts,ns,aa,ls,rs,sa,os,is,xa,qe,Ea,He,ps,ya,Te,ka,U,cs,ta,fs,ms,na,hs,us,la,ds,_s,Aa,Pe,qa,Be,gs,Ta,De,Pa,te,$s,ra,bs,vs,oa,js,ws,Da,ce,de,ia,Se,xs,pa,Es,Sa,Ge,ys,Ca,Ce,Ia,_e,ks,Ie,As,qs,Ma,Me,La,Je,Ts,Oa,ne,Le,Ps,ca,Ds,Ss,Cs,Oe,Is,Fe,Ms,Ls,Os,fa,Fs,Fa,We,Wa,C,Ws,Ne,Ns,Rs,ma,Vs,zs,ha,Ys,Us,ua,Hs,Bs,da,Gs,Js,Na,Re,Ra,fe,ge,_a,Ve,Ks,ga,Qs,Va,$e,za,be,Ya;return _=new Xa({}),D=new Vt({props:{id:"KWwzcmG98Ds"}}),V=new Xs({props:{$$slots:{default:[Ut]},$$scope:{ctx:N}}}),K=new Xa({}),se=new W({props:{code:`from datasets import load_dataset, Audio minds = load_dataset("PolyAI/minds14", name="en-US", split="train")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset, Audio <span class="hljs-meta">&gt;&gt;&gt; </span>minds = load_dataset(<span class="hljs-string">&quot;PolyAI/minds14&quot;</span>, name=<span class="hljs-string">&quot;en-US&quot;</span>, split=<span class="hljs-string">&quot;train&quot;</span>)`}}),ke=new W({props:{code:"minds = minds.train_test_split(test_size=0.2)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>minds = minds.train_test_split(test_size=<span class="hljs-number">0.2</span>)'}}),Ae=new W({props:{code:"minds",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>minds DatasetDict({ train: Dataset({ features: [<span class="hljs-string">&#x27;path&#x27;</span>, <span class="hljs-string">&#x27;audio&#x27;</span>, <span class="hljs-string">&#x27;transcription&#x27;</span>, <span class="hljs-string">&#x27;english_transcription&#x27;</span>, <span class="hljs-string">&#x27;intent_class&#x27;</span>, <span class="hljs-string">&#x27;lang_id&#x27;</span>], num_rows: <span class="hljs-number">450</span> }) test: Dataset({ features: [<span class="hljs-string">&#x27;path&#x27;</span>, <span class="hljs-string">&#x27;audio&#x27;</span>, <span class="hljs-string">&#x27;transcription&#x27;</span>, <span class="hljs-string">&#x27;english_transcription&#x27;</span>, <span class="hljs-string">&#x27;intent_class&#x27;</span>, <span class="hljs-string">&#x27;lang_id&#x27;</span>], num_rows: <span class="hljs-number">113</span> }) })`}}),qe=new W({props:{code:'minds = minds.remove_columns(["path", "transcription", "english_transcription", "lang_id"])',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>minds = minds.remove_columns([<span class="hljs-string">&quot;path&quot;</span>, <span class="hljs-string">&quot;transcription&quot;</span>, <span class="hljs-string">&quot;english_transcription&quot;</span>, <span class="hljs-string">&quot;lang_id&quot;</span>])'}}),Te=new W({props:{code:'minds["train"][0]',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>minds[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;audio&#x27;</span>: {<span class="hljs-string">&#x27;array&#x27;</span>: array([ <span class="hljs-number">0.</span> , <span class="hljs-number">0.</span> , <span class="hljs-number">0.</span> , ..., -<span class="hljs-number">0.00048828</span>, -<span class="hljs-number">0.00024414</span>, -<span class="hljs-number">0.00024414</span>], dtype=float32), <span class="hljs-string">&#x27;path&#x27;</span>: <span class="hljs-string">&#x27;/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602b9a5fbb1e6d0fbce91f52.wav&#x27;</span>, <span class="hljs-string">&#x27;sampling_rate&#x27;</span>: <span class="hljs-number">8000</span>}, <span class="hljs-string">&#x27;intent_class&#x27;</span>: <span class="hljs-number">2</span>}`}}),Pe=new W({props:{code:`labels = minds["train"].features["intent_class"].names label2id, id2label = dict(), dict() for i, label in enumerate(labels): label2id[label] = str(i) id2label[str(i)] = label`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>labels = minds[<span class="hljs-string">&quot;train&quot;</span>].features[<span class="hljs-string">&quot;intent_class&quot;</span>].names <span class="hljs-meta">&gt;&gt;&gt; </span>label2id, id2label = <span class="hljs-built_in">dict</span>(), <span class="hljs-built_in">dict</span>() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> i, label <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(labels): <span class="hljs-meta">... </span> label2id[label] = <span class="hljs-built_in">str</span>(i) <span class="hljs-meta">... </span> id2label[<span class="hljs-built_in">str</span>(i)] = label`}}),De=new W({props:{code:"id2label[str(2)]",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>id2label[<span class="hljs-built_in">str</span>(<span class="hljs-number">2</span>)] <span class="hljs-string">&#x27;app_error&#x27;</span>`}}),Se=new Xa({}),Ce=new W({props:{code:`from transformers import AutoFeatureExtractor feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoFeatureExtractor <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = AutoFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-base&quot;</span>)`}}),Me=new W({props:{code:`minds = minds.cast_column("audio", Audio(sampling_rate=16_000)) minds["train"][0]`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>minds = minds.cast_column(<span class="hljs-string">&quot;audio&quot;</span>, Audio(sampling_rate=<span class="hljs-number">16_000</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>minds[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;audio&#x27;</span>: {<span class="hljs-string">&#x27;array&#x27;</span>: array([ <span class="hljs-number">2.2098757e-05</span>, <span class="hljs-number">4.6582241e-05</span>, -<span class="hljs-number">2.2803260e-05</span>, ..., -<span class="hljs-number">2.8419291e-04</span>, -<span class="hljs-number">2.3305941e-04</span>, -<span class="hljs-number">1.1425107e-04</span>], dtype=float32), <span class="hljs-string">&#x27;path&#x27;</span>: <span class="hljs-string">&#x27;/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602b9a5fbb1e6d0fbce91f52.wav&#x27;</span>, <span class="hljs-string">&#x27;sampling_rate&#x27;</span>: <span class="hljs-number">16000</span>}, <span class="hljs-string">&#x27;intent_class&#x27;</span>: <span class="hljs-number">2</span>}`}}),We=new W({props:{code:`def preprocess_function(examples): audio_arrays = [x["array"] for x in examples["audio"]] inputs = feature_extractor( audio_arrays, sampling_rate=feature_extractor.sampling_rate, max_length=16000, truncation=True ) return inputs`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> audio_arrays = [x[<span class="hljs-string">&quot;array&quot;</span>] <span class="hljs-keyword">for</span> x <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;audio&quot;</span>]] <span class="hljs-meta">... </span> inputs = feature_extractor( <span class="hljs-meta">... </span> audio_arrays, sampling_rate=feature_extractor.sampling_rate, max_length=<span class="hljs-number">16000</span>, truncation=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> inputs`}}),Re=new W({props:{code:`encoded_minds = minds.map(preprocess_function, remove_columns="audio", batched=True) encoded_minds = encoded_minds.rename_column("intent_class", "label")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>encoded_minds = minds.<span class="hljs-built_in">map</span>(preprocess_function, remove_columns=<span class="hljs-string">&quot;audio&quot;</span>, batched=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoded_minds = encoded_minds.rename_column(<span class="hljs-string">&quot;intent_class&quot;</span>, <span class="hljs-string">&quot;label&quot;</span>)`}}),Ve=new Xa({}),$e=new zt({props:{pytorch:!0,tensorflow:!1,jax:!1,$$slots:{pytorch:[Gt]},$$scope:{ctx:N}}}),be=new Xs({props:{$$slots:{default:[Jt]},$$scope:{ctx:N}}}),{c(){c=r("meta"),b=h(),m=r("h1"),g=r("a"),v=r("span"),w(_.$$.fragment),$=h(),q=r("span"),T=t("Audio classification"),P=h(),w(D.$$.fragment),B=h(),G=r("p"),M=t("Audio classification assigns a label or class to audio data. It is similar to text classification, except an audio input is continuous and must be discretized, whereas text can be split into tokens. Some practical applications of audio classification include identifying intent, speakers, and even animal species by their sounds."),R=h(),L=r("p"),X=t("This guide will show you how to fine-tune "),J=r("a"),je=t("Wav2Vec2"),we=t(" on the "),I=r("a"),xe=t("MInDS-14"),Z=t(" to classify intent."),me=h(),w(V.$$.fragment),he=h(),S=r("h2"),z=r("a"),Y=r("span"),w(K.$$.fragment),Ee=h(),ee=r("span"),Q=t("Load MInDS-14 dataset"),re=h(),f=r("p"),j=t("Load the "),F=r("a"),Ye=t("MInDS-14"),ae=t(" from the \u{1F917} Datasets library:"),oe=h(),w(se.$$.fragment),ie=h(),pe=r("p"),ue=t("Split this dataset into a train and test set:"),ye=h(),w(ke.$$.fragment),va=h(),Ue=r("p"),Za=t("Then take a look at the dataset:"),ja=h(),w(Ae.$$.fragment),wa=h(),O=r("p"),es=t("While the dataset contains a lot of other useful information, like "),Ze=r("code"),as=t("lang_id"),ss=t(" and "),ea=r("code"),ts=t("english_transcription"),ns=t(", you will focus on the "),aa=r("code"),ls=t("audio"),rs=t(" and "),sa=r("code"),os=t("intent_class"),is=t(" in this guide. Remove the other columns:"),xa=h(),w(qe.$$.fragment),Ea=h(),He=r("p"),ps=t("Take a look at an example now:"),ya=h(),w(Te.$$.fragment),ka=h(),U=r("p"),cs=t("The "),ta=r("code"),fs=t("audio"),ms=t(" column contains a 1-dimensional "),na=r("code"),hs=t("array"),us=t(" of the speech signal that must be called to load and resample the audio file. The "),la=r("code"),ds=t("intent_class"),_s=t(" column is an integer that represents the class id of intent. Create a dictionary that maps a label name to an integer and vice versa. The mapping will help the model recover the label name from the label number:"),Aa=h(),w(Pe.$$.fragment),qa=h(),Be=r("p"),gs=t("Now you can convert the label number to a label name for more information:"),Ta=h(),w(De.$$.fragment),Pa=h(),te=r("p"),$s=t("Each keyword - or label - corresponds to a number; "),ra=r("code"),bs=t("2"),vs=t(" indicates "),oa=r("code"),js=t("app_error"),ws=t(" in the example above."),Da=h(),ce=r("h2"),de=r("a"),ia=r("span"),w(Se.$$.fragment),xs=h(),pa=r("span"),Es=t("Preprocess"),Sa=h(),Ge=r("p"),ys=t("Load the Wav2Vec2 feature extractor to process the audio signal:"),Ca=h(),w(Ce.$$.fragment),Ia=h(),_e=r("p"),ks=t("The "),Ie=r("a"),As=t("MInDS-14"),qs=t(" dataset has a sampling rate of 8000khz. You will need to resample the dataset to use the pretrained Wav2Vec2 model:"),Ma=h(),w(Me.$$.fragment),La=h(),Je=r("p"),Ts=t("The preprocessing function needs to:"),Oa=h(),ne=r("ol"),Le=r("li"),Ps=t("Call the "),ca=r("code"),Ds=t("audio"),Ss=t(" column to load and if necessary resample the audio file."),Cs=h(),Oe=r("li"),Is=t("Check the sampling rate of the audio file matches the sampling rate of the audio data a model was pretrained with. You can find this information on the Wav2Vec2 "),Fe=r("a"),Ms=t("model card"),Ls=t("."),Os=h(),fa=r("li"),Fs=t("Set a maximum input length so longer inputs are batched without being truncated."),Fa=h(),w(We.$$.fragment),Wa=h(),C=r("p"),Ws=t("Use \u{1F917} Datasets "),Ne=r("a"),Ns=t("map"),Rs=t(" function to apply the preprocessing function over the entire dataset. You can speed up the "),ma=r("code"),Vs=t("map"),zs=t(" function by setting "),ha=r("code"),Ys=t("batched=True"),Us=t(" to process multiple elements of the dataset at once. Remove the columns you don\u2019t need, and rename "),ua=r("code"),Hs=t("intent_class"),Bs=t(" to "),da=r("code"),Gs=t("label"),Js=t(" because that is what the model expects:"),Na=h(),w(Re.$$.fragment),Ra=h(),fe=r("h2"),ge=r("a"),_a=r("span"),w(Ve.$$.fragment),Ks=h(),ga=r("span"),Qs=t("Train"),Va=h(),w($e.$$.fragment),za=h(),w(be.$$.fragment),this.h()},l(e){const l=Nt('[data-svelte="svelte-1phssyn"]',document.head);c=o(l,"META",{name:!0,content:!0}),l.forEach(s),b=u(e),m=o(e,"H1",{class:!0});var ze=i(m);g=o(ze,"A",{id:!0,class:!0,href:!0});var $a=i(g);v=o($a,"SPAN",{});var ba=i(v);x(_.$$.fragment,ba),ba.forEach(s),$a.forEach(s),$=u(ze),q=o(ze,"SPAN",{});var Zs=i(q);T=n(Zs,"Audio classification"),Zs.forEach(s),ze.forEach(s),P=u(e),x(D.$$.fragment,e),B=u(e),G=o(e,"P",{});var et=i(G);M=n(et,"Audio classification assigns a label or class to audio data. It is similar to text classification, except an audio input is continuous and must be discretized, whereas text can be split into tokens. Some practical applications of audio classification include identifying intent, speakers, and even animal species by their sounds."),et.forEach(s),R=u(e),L=o(e,"P",{});var Ke=i(L);X=n(Ke,"This guide will show you how to fine-tune "),J=o(Ke,"A",{href:!0,rel:!0});var at=i(J);je=n(at,"Wav2Vec2"),at.forEach(s),we=n(Ke," on the "),I=o(Ke,"A",{href:!0,rel:!0});var st=i(I);xe=n(st,"MInDS-14"),st.forEach(s),Z=n(Ke," to classify intent."),Ke.forEach(s),me=u(e),x(V.$$.fragment,e),he=u(e),S=o(e,"H2",{class:!0});var Ua=i(S);z=o(Ua,"A",{id:!0,class:!0,href:!0});var tt=i(z);Y=o(tt,"SPAN",{});var nt=i(Y);x(K.$$.fragment,nt),nt.forEach(s),tt.forEach(s),Ee=u(Ua),ee=o(Ua,"SPAN",{});var lt=i(ee);Q=n(lt,"Load MInDS-14 dataset"),lt.forEach(s),Ua.forEach(s),re=u(e),f=o(e,"P",{});var Ha=i(f);j=n(Ha,"Load the "),F=o(Ha,"A",{href:!0,rel:!0});var rt=i(F);Ye=n(rt,"MInDS-14"),rt.forEach(s),ae=n(Ha," from the \u{1F917} Datasets library:"),Ha.forEach(s),oe=u(e),x(se.$$.fragment,e),ie=u(e),pe=o(e,"P",{});var ot=i(pe);ue=n(ot,"Split this dataset into a train and test set:"),ot.forEach(s),ye=u(e),x(ke.$$.fragment,e),va=u(e),Ue=o(e,"P",{});var it=i(Ue);Za=n(it,"Then take a look at the dataset:"),it.forEach(s),ja=u(e),x(Ae.$$.fragment,e),wa=u(e),O=o(e,"P",{});var le=i(O);es=n(le,"While the dataset contains a lot of other useful information, like "),Ze=o(le,"CODE",{});var pt=i(Ze);as=n(pt,"lang_id"),pt.forEach(s),ss=n(le," and "),ea=o(le,"CODE",{});var ct=i(ea);ts=n(ct,"english_transcription"),ct.forEach(s),ns=n(le,", you will focus on the "),aa=o(le,"CODE",{});var ft=i(aa);ls=n(ft,"audio"),ft.forEach(s),rs=n(le," and "),sa=o(le,"CODE",{});var mt=i(sa);os=n(mt,"intent_class"),mt.forEach(s),is=n(le," in this guide. Remove the other columns:"),le.forEach(s),xa=u(e),x(qe.$$.fragment,e),Ea=u(e),He=o(e,"P",{});var ht=i(He);ps=n(ht,"Take a look at an example now:"),ht.forEach(s),ya=u(e),x(Te.$$.fragment,e),ka=u(e),U=o(e,"P",{});var ve=i(U);cs=n(ve,"The "),ta=o(ve,"CODE",{});var ut=i(ta);fs=n(ut,"audio"),ut.forEach(s),ms=n(ve," column contains a 1-dimensional "),na=o(ve,"CODE",{});var dt=i(na);hs=n(dt,"array"),dt.forEach(s),us=n(ve," of the speech signal that must be called to load and resample the audio file. The "),la=o(ve,"CODE",{});var _t=i(la);ds=n(_t,"intent_class"),_t.forEach(s),_s=n(ve," column is an integer that represents the class id of intent. Create a dictionary that maps a label name to an integer and vice versa. The mapping will help the model recover the label name from the label number:"),ve.forEach(s),Aa=u(e),x(Pe.$$.fragment,e),qa=u(e),Be=o(e,"P",{});var gt=i(Be);gs=n(gt,"Now you can convert the label number to a label name for more information:"),gt.forEach(s),Ta=u(e),x(De.$$.fragment,e),Pa=u(e),te=o(e,"P",{});var Qe=i(te);$s=n(Qe,"Each keyword - or label - corresponds to a number; "),ra=o(Qe,"CODE",{});var $t=i(ra);bs=n($t,"2"),$t.forEach(s),vs=n(Qe," indicates "),oa=o(Qe,"CODE",{});var bt=i(oa);js=n(bt,"app_error"),bt.forEach(s),ws=n(Qe," in the example above."),Qe.forEach(s),Da=u(e),ce=o(e,"H2",{class:!0});var Ba=i(ce);de=o(Ba,"A",{id:!0,class:!0,href:!0});var vt=i(de);ia=o(vt,"SPAN",{});var jt=i(ia);x(Se.$$.fragment,jt),jt.forEach(s),vt.forEach(s),xs=u(Ba),pa=o(Ba,"SPAN",{});var wt=i(pa);Es=n(wt,"Preprocess"),wt.forEach(s),Ba.forEach(s),Sa=u(e),Ge=o(e,"P",{});var xt=i(Ge);ys=n(xt,"Load the Wav2Vec2 feature extractor to process the audio signal:"),xt.forEach(s),Ca=u(e),x(Ce.$$.fragment,e),Ia=u(e),_e=o(e,"P",{});var Ga=i(_e);ks=n(Ga,"The "),Ie=o(Ga,"A",{href:!0,rel:!0});var Et=i(Ie);As=n(Et,"MInDS-14"),Et.forEach(s),qs=n(Ga," dataset has a sampling rate of 8000khz. You will need to resample the dataset to use the pretrained Wav2Vec2 model:"),Ga.forEach(s),Ma=u(e),x(Me.$$.fragment,e),La=u(e),Je=o(e,"P",{});var yt=i(Je);Ts=n(yt,"The preprocessing function needs to:"),yt.forEach(s),Oa=u(e),ne=o(e,"OL",{});var Xe=i(ne);Le=o(Xe,"LI",{});var Ja=i(Le);Ps=n(Ja,"Call the "),ca=o(Ja,"CODE",{});var kt=i(ca);Ds=n(kt,"audio"),kt.forEach(s),Ss=n(Ja," column to load and if necessary resample the audio file."),Ja.forEach(s),Cs=u(Xe),Oe=o(Xe,"LI",{});var Ka=i(Oe);Is=n(Ka,"Check the sampling rate of the audio file matches the sampling rate of the audio data a model was pretrained with. You can find this information on the Wav2Vec2 "),Fe=o(Ka,"A",{href:!0,rel:!0});var At=i(Fe);Ms=n(At,"model card"),At.forEach(s),Ls=n(Ka,"."),Ka.forEach(s),Os=u(Xe),fa=o(Xe,"LI",{});var qt=i(fa);Fs=n(qt,"Set a maximum input length so longer inputs are batched without being truncated."),qt.forEach(s),Xe.forEach(s),Fa=u(e),x(We.$$.fragment,e),Wa=u(e),C=o(e,"P",{});var H=i(C);Ws=n(H,"Use \u{1F917} Datasets "),Ne=o(H,"A",{href:!0,rel:!0});var Tt=i(Ne);Ns=n(Tt,"map"),Tt.forEach(s),Rs=n(H," function to apply the preprocessing function over the entire dataset. You can speed up the "),ma=o(H,"CODE",{});var Pt=i(ma);Vs=n(Pt,"map"),Pt.forEach(s),zs=n(H," function by setting "),ha=o(H,"CODE",{});var Dt=i(ha);Ys=n(Dt,"batched=True"),Dt.forEach(s),Us=n(H," to process multiple elements of the dataset at once. Remove the columns you don\u2019t need, and rename "),ua=o(H,"CODE",{});var St=i(ua);Hs=n(St,"intent_class"),St.forEach(s),Bs=n(H," to "),da=o(H,"CODE",{});var Ct=i(da);Gs=n(Ct,"label"),Ct.forEach(s),Js=n(H," because that is what the model expects:"),H.forEach(s),Na=u(e),x(Re.$$.fragment,e),Ra=u(e),fe=o(e,"H2",{class:!0});var Qa=i(fe);ge=o(Qa,"A",{id:!0,class:!0,href:!0});var It=i(ge);_a=o(It,"SPAN",{});var Mt=i(_a);x(Ve.$$.fragment,Mt),Mt.forEach(s),It.forEach(s),Ks=u(Qa),ga=o(Qa,"SPAN",{});var Lt=i(ga);Qs=n(Lt,"Train"),Lt.forEach(s),Qa.forEach(s),Va=u(e),x($e.$$.fragment,e),za=u(e),x(be.$$.fragment,e),this.h()},h(){d(c,"name","hf:doc:metadata"),d(c,"content",JSON.stringify(Qt)),d(g,"id","audio-classification"),d(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(g,"href","#audio-classification"),d(m,"class","relative group"),d(J,"href","https://huggingface.co/facebook/wav2vec2-base"),d(J,"rel","nofollow"),d(I,"href","https://huggingface.co/datasets/PolyAI/minds14"),d(I,"rel","nofollow"),d(z,"id","load-minds14-dataset"),d(z,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(z,"href","#load-minds14-dataset"),d(S,"class","relative group"),d(F,"href","https://huggingface.co/datasets/PolyAI/minds14"),d(F,"rel","nofollow"),d(de,"id","preprocess"),d(de,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(de,"href","#preprocess"),d(ce,"class","relative group"),d(Ie,"href","https://huggingface.co/datasets/PolyAI/minds14"),d(Ie,"rel","nofollow"),d(Fe,"href","https://huggingface.co/facebook/wav2vec2-base"),d(Fe,"rel","nofollow"),d(Ne,"href","https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.map"),d(Ne,"rel","nofollow"),d(ge,"id","train"),d(ge,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(ge,"href","#train"),d(fe,"class","relative group")},m(e,l){a(document.head,c),p(e,b,l),p(e,m,l),a(m,g),a(g,v),E(_,v,null),a(m,$),a(m,q),a(q,T),p(e,P,l),E(D,e,l),p(e,B,l),p(e,G,l),a(G,M),p(e,R,l),p(e,L,l),a(L,X),a(L,J),a(J,je),a(L,we),a(L,I),a(I,xe),a(L,Z),p(e,me,l),E(V,e,l),p(e,he,l),p(e,S,l),a(S,z),a(z,Y),E(K,Y,null),a(S,Ee),a(S,ee),a(ee,Q),p(e,re,l),p(e,f,l),a(f,j),a(f,F),a(F,Ye),a(f,ae),p(e,oe,l),E(se,e,l),p(e,ie,l),p(e,pe,l),a(pe,ue),p(e,ye,l),E(ke,e,l),p(e,va,l),p(e,Ue,l),a(Ue,Za),p(e,ja,l),E(Ae,e,l),p(e,wa,l),p(e,O,l),a(O,es),a(O,Ze),a(Ze,as),a(O,ss),a(O,ea),a(ea,ts),a(O,ns),a(O,aa),a(aa,ls),a(O,rs),a(O,sa),a(sa,os),a(O,is),p(e,xa,l),E(qe,e,l),p(e,Ea,l),p(e,He,l),a(He,ps),p(e,ya,l),E(Te,e,l),p(e,ka,l),p(e,U,l),a(U,cs),a(U,ta),a(ta,fs),a(U,ms),a(U,na),a(na,hs),a(U,us),a(U,la),a(la,ds),a(U,_s),p(e,Aa,l),E(Pe,e,l),p(e,qa,l),p(e,Be,l),a(Be,gs),p(e,Ta,l),E(De,e,l),p(e,Pa,l),p(e,te,l),a(te,$s),a(te,ra),a(ra,bs),a(te,vs),a(te,oa),a(oa,js),a(te,ws),p(e,Da,l),p(e,ce,l),a(ce,de),a(de,ia),E(Se,ia,null),a(ce,xs),a(ce,pa),a(pa,Es),p(e,Sa,l),p(e,Ge,l),a(Ge,ys),p(e,Ca,l),E(Ce,e,l),p(e,Ia,l),p(e,_e,l),a(_e,ks),a(_e,Ie),a(Ie,As),a(_e,qs),p(e,Ma,l),E(Me,e,l),p(e,La,l),p(e,Je,l),a(Je,Ts),p(e,Oa,l),p(e,ne,l),a(ne,Le),a(Le,Ps),a(Le,ca),a(ca,Ds),a(Le,Ss),a(ne,Cs),a(ne,Oe),a(Oe,Is),a(Oe,Fe),a(Fe,Ms),a(Oe,Ls),a(ne,Os),a(ne,fa),a(fa,Fs),p(e,Fa,l),E(We,e,l),p(e,Wa,l),p(e,C,l),a(C,Ws),a(C,Ne),a(Ne,Ns),a(C,Rs),a(C,ma),a(ma,Vs),a(C,zs),a(C,ha),a(ha,Ys),a(C,Us),a(C,ua),a(ua,Hs),a(C,Bs),a(C,da),a(da,Gs),a(C,Js),p(e,Na,l),E(Re,e,l),p(e,Ra,l),p(e,fe,l),a(fe,ge),a(ge,_a),E(Ve,_a,null),a(fe,Ks),a(fe,ga),a(ga,Qs),p(e,Va,l),E($e,e,l),p(e,za,l),E(be,e,l),Ya=!0},p(e,[l]){const ze={};l&2&&(ze.$$scope={dirty:l,ctx:e}),V.$set(ze);const $a={};l&2&&($a.$$scope={dirty:l,ctx:e}),$e.$set($a);const ba={};l&2&&(ba.$$scope={dirty:l,ctx:e}),be.$set(ba)},i(e){Ya||(y(_.$$.fragment,e),y(D.$$.fragment,e),y(V.$$.fragment,e),y(K.$$.fragment,e),y(se.$$.fragment,e),y(ke.$$.fragment,e),y(Ae.$$.fragment,e),y(qe.$$.fragment,e),y(Te.$$.fragment,e),y(Pe.$$.fragment,e),y(De.$$.fragment,e),y(Se.$$.fragment,e),y(Ce.$$.fragment,e),y(Me.$$.fragment,e),y(We.$$.fragment,e),y(Re.$$.fragment,e),y(Ve.$$.fragment,e),y($e.$$.fragment,e),y(be.$$.fragment,e),Ya=!0)},o(e){k(_.$$.fragment,e),k(D.$$.fragment,e),k(V.$$.fragment,e),k(K.$$.fragment,e),k(se.$$.fragment,e),k(ke.$$.fragment,e),k(Ae.$$.fragment,e),k(qe.$$.fragment,e),k(Te.$$.fragment,e),k(Pe.$$.fragment,e),k(De.$$.fragment,e),k(Se.$$.fragment,e),k(Ce.$$.fragment,e),k(Me.$$.fragment,e),k(We.$$.fragment,e),k(Re.$$.fragment,e),k(Ve.$$.fragment,e),k($e.$$.fragment,e),k(be.$$.fragment,e),Ya=!1},d(e){s(c),e&&s(b),e&&s(m),A(_),e&&s(P),A(D,e),e&&s(B),e&&s(G),e&&s(R),e&&s(L),e&&s(me),A(V,e),e&&s(he),e&&s(S),A(K),e&&s(re),e&&s(f),e&&s(oe),A(se,e),e&&s(ie),e&&s(pe),e&&s(ye),A(ke,e),e&&s(va),e&&s(Ue),e&&s(ja),A(Ae,e),e&&s(wa),e&&s(O),e&&s(xa),A(qe,e),e&&s(Ea),e&&s(He),e&&s(ya),A(Te,e),e&&s(ka),e&&s(U),e&&s(Aa),A(Pe,e),e&&s(qa),e&&s(Be),e&&s(Ta),A(De,e),e&&s(Pa),e&&s(te),e&&s(Da),e&&s(ce),A(Se),e&&s(Sa),e&&s(Ge),e&&s(Ca),A(Ce,e),e&&s(Ia),e&&s(_e),e&&s(Ma),A(Me,e),e&&s(La),e&&s(Je),e&&s(Oa),e&&s(ne),e&&s(Fa),A(We,e),e&&s(Wa),e&&s(C),e&&s(Na),A(Re,e),e&&s(Ra),e&&s(fe),A(Ve),e&&s(Va),A($e,e),e&&s(za),A(be,e)}}}const Qt={local:"audio-classification",sections:[{local:"load-minds14-dataset",title:"Load MInDS-14 dataset"},{local:"preprocess",title:"Preprocess"},{local:"train",title:"Train"}],title:"Audio classification"};function Xt(N){return Rt(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class ln extends Ot{constructor(c){super();Ft(this,c,Xt,Kt,Wt,{})}}export{ln as default,Qt as metadata};
34
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/tasks/multiple_choice.mdx-hf-doc-builder.js
import{S as ht,i as ft,s as mt,e as p,k as _,w as E,t as l,M as dt,c as i,d as a,m as g,a as c,x,h as r,b as $,G as e,g as h,y as z,q as T,o as q,B as C,v as ut,L as it}from"../../chunks/vendor-hf-doc-builder.js";import{T as ct}from"../../chunks/Tip-hf-doc-builder.js";import{I as Fe}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{C as ns}from"../../chunks/CodeBlock-hf-doc-builder.js";import{F as pt,M as Oe}from"../../chunks/Markdown-hf-doc-builder.js";function _t(D){let n,d;return n=new ns({props:{code:`from dataclasses import dataclass from transformers.tokenization_utils_base import PreTrainedTokenizerBase, PaddingStrategy from typing import Optional, Union import torch @dataclass class DataCollatorForMultipleChoice: """ Data collator that will dynamically pad the inputs for multiple choice received. """ tokenizer: PreTrainedTokenizerBase padding: Union[bool, str, PaddingStrategy] = True max_length: Optional[int] = None pad_to_multiple_of: Optional[int] = None def __call__(self, features): label_name = "label" if "label" in features[0].keys() else "labels" labels = [feature.pop(label_name) for feature in features] batch_size = len(features) num_choices = len(features[0]["input_ids"]) flattened_features = [ [{k: v[i] for k, v in feature.items()} for i in range(num_choices)] for feature in features ] flattened_features = sum(flattened_features, []) batch = self.tokenizer.pad( flattened_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", ) batch = {k: v.view(batch_size, num_choices, -1) for k, v in batch.items()} batch["labels"] = torch.tensor(labels, dtype=torch.int64) return batch`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> dataclasses <span class="hljs-keyword">import</span> dataclass <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers.tokenization_utils_base <span class="hljs-keyword">import</span> PreTrainedTokenizerBase, PaddingStrategy <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> typing <span class="hljs-keyword">import</span> <span class="hljs-type">Optional</span>, <span class="hljs-type">Union</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>@dataclass <span class="hljs-meta">... </span><span class="hljs-keyword">class</span> <span class="hljs-title class_">DataCollatorForMultipleChoice</span>: <span class="hljs-meta">... </span> <span class="hljs-string">&quot;&quot;&quot; <span class="hljs-meta">... </span> Data collator that will dynamically pad the inputs for multiple choice received. <span class="hljs-meta">... </span> &quot;&quot;&quot;</span> <span class="hljs-meta">... </span> tokenizer: PreTrainedTokenizerBase <span class="hljs-meta">... </span> padding: <span class="hljs-type">Union</span>[<span class="hljs-built_in">bool</span>, <span class="hljs-built_in">str</span>, PaddingStrategy] = <span class="hljs-literal">True</span> <span class="hljs-meta">... </span> max_length: <span class="hljs-type">Optional</span>[<span class="hljs-built_in">int</span>] = <span class="hljs-literal">None</span> <span class="hljs-meta">... </span> pad_to_multiple_of: <span class="hljs-type">Optional</span>[<span class="hljs-built_in">int</span>] = <span class="hljs-literal">None</span> <span class="hljs-meta">... </span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">__call__</span>(<span class="hljs-params">self, features</span>): <span class="hljs-meta">... </span> label_name = <span class="hljs-string">&quot;label&quot;</span> <span class="hljs-keyword">if</span> <span class="hljs-string">&quot;label&quot;</span> <span class="hljs-keyword">in</span> features[<span class="hljs-number">0</span>].keys() <span class="hljs-keyword">else</span> <span class="hljs-string">&quot;labels&quot;</span> <span class="hljs-meta">... </span> labels = [feature.pop(label_name) <span class="hljs-keyword">for</span> feature <span class="hljs-keyword">in</span> features] <span class="hljs-meta">... </span> batch_size = <span class="hljs-built_in">len</span>(features) <span class="hljs-meta">... </span> num_choices = <span class="hljs-built_in">len</span>(features[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;input_ids&quot;</span>]) <span class="hljs-meta">... </span> flattened_features = [ <span class="hljs-meta">... </span> [{k: v[i] <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> feature.items()} <span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(num_choices)] <span class="hljs-keyword">for</span> feature <span class="hljs-keyword">in</span> features <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span> flattened_features = <span class="hljs-built_in">sum</span>(flattened_features, []) <span class="hljs-meta">... </span> batch = self.tokenizer.pad( <span class="hljs-meta">... </span> flattened_features, <span class="hljs-meta">... </span> padding=self.padding, <span class="hljs-meta">... </span> max_length=self.max_length, <span class="hljs-meta">... </span> pad_to_multiple_of=self.pad_to_multiple_of, <span class="hljs-meta">... </span> return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span> batch = {k: v.view(batch_size, num_choices, -<span class="hljs-number">1</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> batch.items()} <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;labels&quot;</span>] = torch.tensor(labels, dtype=torch.int64) <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch`}}),{c(){E(n.$$.fragment)},l(t){x(n.$$.fragment,t)},m(t,u){z(n,t,u),d=!0},p:it,i(t){d||(T(n.$$.fragment,t),d=!0)},o(t){q(n.$$.fragment,t),d=!1},d(t){C(n,t)}}}function gt(D){let n,d;return n=new Oe({props:{$$slots:{default:[_t]},$$scope:{ctx:D}}}),{c(){E(n.$$.fragment)},l(t){x(n.$$.fragment,t)},m(t,u){z(n,t,u),d=!0},p(t,u){const w={};u&2&&(w.$$scope={dirty:u,ctx:t}),n.$set(w)},i(t){d||(T(n.$$.fragment,t),d=!0)},o(t){q(n.$$.fragment,t),d=!1},d(t){C(n,t)}}}function jt(D){let n,d;return n=new ns({props:{code:`from dataclasses import dataclass from transformers.tokenization_utils_base import PreTrainedTokenizerBase, PaddingStrategy from typing import Optional, Union import tensorflow as tf @dataclass class DataCollatorForMultipleChoice: """ Data collator that will dynamically pad the inputs for multiple choice received. """ tokenizer: PreTrainedTokenizerBase padding: Union[bool, str, PaddingStrategy] = True max_length: Optional[int] = None pad_to_multiple_of: Optional[int] = None def __call__(self, features): label_name = "label" if "label" in features[0].keys() else "labels" labels = [feature.pop(label_name) for feature in features] batch_size = len(features) num_choices = len(features[0]["input_ids"]) flattened_features = [ [{k: v[i] for k, v in feature.items()} for i in range(num_choices)] for feature in features ] flattened_features = sum(flattened_features, []) batch = self.tokenizer.pad( flattened_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="tf", ) batch = {k: tf.reshape(v, (batch_size, num_choices, -1)) for k, v in batch.items()} batch["labels"] = tf.convert_to_tensor(labels, dtype=tf.int64) return batch`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> dataclasses <span class="hljs-keyword">import</span> dataclass <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers.tokenization_utils_base <span class="hljs-keyword">import</span> PreTrainedTokenizerBase, PaddingStrategy <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> typing <span class="hljs-keyword">import</span> <span class="hljs-type">Optional</span>, <span class="hljs-type">Union</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>@dataclass <span class="hljs-meta">... </span><span class="hljs-keyword">class</span> <span class="hljs-title class_">DataCollatorForMultipleChoice</span>: <span class="hljs-meta">... </span> <span class="hljs-string">&quot;&quot;&quot; <span class="hljs-meta">... </span> Data collator that will dynamically pad the inputs for multiple choice received. <span class="hljs-meta">... </span> &quot;&quot;&quot;</span> <span class="hljs-meta">... </span> tokenizer: PreTrainedTokenizerBase <span class="hljs-meta">... </span> padding: <span class="hljs-type">Union</span>[<span class="hljs-built_in">bool</span>, <span class="hljs-built_in">str</span>, PaddingStrategy] = <span class="hljs-literal">True</span> <span class="hljs-meta">... </span> max_length: <span class="hljs-type">Optional</span>[<span class="hljs-built_in">int</span>] = <span class="hljs-literal">None</span> <span class="hljs-meta">... </span> pad_to_multiple_of: <span class="hljs-type">Optional</span>[<span class="hljs-built_in">int</span>] = <span class="hljs-literal">None</span> <span class="hljs-meta">... </span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">__call__</span>(<span class="hljs-params">self, features</span>): <span class="hljs-meta">... </span> label_name = <span class="hljs-string">&quot;label&quot;</span> <span class="hljs-keyword">if</span> <span class="hljs-string">&quot;label&quot;</span> <span class="hljs-keyword">in</span> features[<span class="hljs-number">0</span>].keys() <span class="hljs-keyword">else</span> <span class="hljs-string">&quot;labels&quot;</span> <span class="hljs-meta">... </span> labels = [feature.pop(label_name) <span class="hljs-keyword">for</span> feature <span class="hljs-keyword">in</span> features] <span class="hljs-meta">... </span> batch_size = <span class="hljs-built_in">len</span>(features) <span class="hljs-meta">... </span> num_choices = <span class="hljs-built_in">len</span>(features[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;input_ids&quot;</span>]) <span class="hljs-meta">... </span> flattened_features = [ <span class="hljs-meta">... </span> [{k: v[i] <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> feature.items()} <span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(num_choices)] <span class="hljs-keyword">for</span> feature <span class="hljs-keyword">in</span> features <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span> flattened_features = <span class="hljs-built_in">sum</span>(flattened_features, []) <span class="hljs-meta">... </span> batch = self.tokenizer.pad( <span class="hljs-meta">... </span> flattened_features, <span class="hljs-meta">... </span> padding=self.padding, <span class="hljs-meta">... </span> max_length=self.max_length, <span class="hljs-meta">... </span> pad_to_multiple_of=self.pad_to_multiple_of, <span class="hljs-meta">... </span> return_tensors=<span class="hljs-string">&quot;tf&quot;</span>, <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span> batch = {k: tf.reshape(v, (batch_size, num_choices, -<span class="hljs-number">1</span>)) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> batch.items()} <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;labels&quot;</span>] = tf.convert_to_tensor(labels, dtype=tf.int64) <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch`}}),{c(){E(n.$$.fragment)},l(t){x(n.$$.fragment,t)},m(t,u){z(n,t,u),d=!0},p:it,i(t){d||(T(n.$$.fragment,t),d=!0)},o(t){q(n.$$.fragment,t),d=!1},d(t){C(n,t)}}}function $t(D){let n,d;return n=new Oe({props:{$$slots:{default:[jt]},$$scope:{ctx:D}}}),{c(){E(n.$$.fragment)},l(t){x(n.$$.fragment,t)},m(t,u){z(n,t,u),d=!0},p(t,u){const w={};u&2&&(w.$$scope={dirty:u,ctx:t}),n.$set(w)},i(t){d||(T(n.$$.fragment,t),d=!0)},o(t){q(n.$$.fragment,t),d=!1},d(t){C(n,t)}}}function wt(D){let n,d,t,u,w;return{c(){n=p("p"),d=l("If you aren\u2019t familiar with fine-tuning a model with Trainer, take a look at the basic tutorial "),t=p("a"),u=l("here"),w=l("!"),this.h()},l(b){n=i(b,"P",{});var y=c(n);d=r(y,"If you aren\u2019t familiar with fine-tuning a model with Trainer, take a look at the basic tutorial "),t=i(y,"A",{href:!0});var F=c(t);u=r(F,"here"),F.forEach(a),w=r(y,"!"),y.forEach(a),this.h()},h(){$(t,"href","../training#finetune-with-trainer")},m(b,y){h(b,n,y),e(n,d),e(n,t),e(t,u),e(n,w)},d(b){b&&a(n)}}}function kt(D){let n,d,t,u,w,b,y,F,U,G,L,K,es,v,V,J,B,ps,N,ms,I,X,as,is,Q,W,A,ls,M,rs,os,cs,R,O;return y=new ns({props:{code:`from transformers import AutoModelForMultipleChoice, TrainingArguments, Trainer model = AutoModelForMultipleChoice.from_pretrained("bert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForMultipleChoice, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForMultipleChoice.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>)`}}),U=new ct({props:{$$slots:{default:[wt]},$$scope:{ctx:D}}}),R=new ns({props:{code:`training_args = TrainingArguments( output_dir="./results", evaluation_strategy="epoch", learning_rate=5e-5, per_device_train_batch_size=16, per_device_eval_batch_size=16, num_train_epochs=3, weight_decay=0.01, ) trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_swag["train"], eval_dataset=tokenized_swag["validation"], tokenizer=tokenizer, data_collator=DataCollatorForMultipleChoice(tokenizer=tokenizer), ) trainer.train()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">5e-5</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> per_device_eval_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=tokenized_swag[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=tokenized_swag[<span class="hljs-string">&quot;validation&quot;</span>], <span class="hljs-meta">... </span> tokenizer=tokenizer, <span class="hljs-meta">... </span> data_collator=DataCollatorForMultipleChoice(tokenizer=tokenizer), <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()`}}),{c(){n=p("p"),d=l("Load BERT with "),t=p("a"),u=l("AutoModelForMultipleChoice"),w=l(":"),b=_(),E(y.$$.fragment),F=_(),E(U.$$.fragment),G=_(),L=p("p"),K=l("At this point, only three steps remain:"),es=_(),v=p("ol"),V=p("li"),J=l("Define your training hyperparameters in "),B=p("a"),ps=l("TrainingArguments"),N=l("."),ms=_(),I=p("li"),X=l("Pass the training arguments to "),as=p("a"),is=l("Trainer"),Q=l(" along with the model, dataset, tokenizer, and data collator."),W=_(),A=p("li"),ls=l("Call "),M=p("a"),rs=l("train()"),os=l(" to fine-tune your model."),cs=_(),E(R.$$.fragment),this.h()},l(f){n=i(f,"P",{});var k=c(n);d=r(k,"Load BERT with "),t=i(k,"A",{href:!0});var H=c(t);u=r(H,"AutoModelForMultipleChoice"),H.forEach(a),w=r(k,":"),k.forEach(a),b=g(f),x(y.$$.fragment,f),F=g(f),x(U.$$.fragment,f),G=g(f),L=i(f,"P",{});var Z=c(L);K=r(Z,"At this point, only three steps remain:"),Z.forEach(a),es=g(f),v=i(f,"OL",{});var S=c(v);V=i(S,"LI",{});var hs=c(V);J=r(hs,"Define your training hyperparameters in "),B=i(hs,"A",{href:!0});var ds=c(B);ps=r(ds,"TrainingArguments"),ds.forEach(a),N=r(hs,"."),hs.forEach(a),ms=g(S),I=i(S,"LI",{});var Y=c(I);X=r(Y,"Pass the training arguments to "),as=i(Y,"A",{href:!0});var ss=c(as);is=r(ss,"Trainer"),ss.forEach(a),Q=r(Y," along with the model, dataset, tokenizer, and data collator."),Y.forEach(a),W=g(S),A=i(S,"LI",{});var P=c(A);ls=r(P,"Call "),M=i(P,"A",{href:!0});var o=c(M);rs=r(o,"train()"),o.forEach(a),os=r(P," to fine-tune your model."),P.forEach(a),S.forEach(a),cs=g(f),x(R.$$.fragment,f),this.h()},h(){$(t,"href","/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoModelForMultipleChoice"),$(B,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments"),$(as,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),$(M,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.train")},m(f,k){h(f,n,k),e(n,d),e(n,t),e(t,u),e(n,w),h(f,b,k),z(y,f,k),h(f,F,k),z(U,f,k),h(f,G,k),h(f,L,k),e(L,K),h(f,es,k),h(f,v,k),e(v,V),e(V,J),e(V,B),e(B,ps),e(V,N),e(v,ms),e(v,I),e(I,X),e(I,as),e(as,is),e(I,Q),e(v,W),e(v,A),e(A,ls),e(A,M),e(M,rs),e(A,os),h(f,cs,k),z(R,f,k),O=!0},p(f,k){const H={};k&2&&(H.$$scope={dirty:k,ctx:f}),U.$set(H)},i(f){O||(T(y.$$.fragment,f),T(U.$$.fragment,f),T(R.$$.fragment,f),O=!0)},o(f){q(y.$$.fragment,f),q(U.$$.fragment,f),q(R.$$.fragment,f),O=!1},d(f){f&&a(n),f&&a(b),C(y,f),f&&a(F),C(U,f),f&&a(G),f&&a(L),f&&a(es),f&&a(v),f&&a(cs),C(R,f)}}}function bt(D){let n,d;return n=new Oe({props:{$$slots:{default:[kt]},$$scope:{ctx:D}}}),{c(){E(n.$$.fragment)},l(t){x(n.$$.fragment,t)},m(t,u){z(n,t,u),d=!0},p(t,u){const w={};u&2&&(w.$$scope={dirty:u,ctx:t}),n.$set(w)},i(t){d||(T(n.$$.fragment,t),d=!0)},o(t){q(n.$$.fragment,t),d=!1},d(t){C(n,t)}}}function vt(D){let n,d,t,u,w;return{c(){n=p("p"),d=l("If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),t=p("a"),u=l("here"),w=l("!"),this.h()},l(b){n=i(b,"P",{});var y=c(n);d=r(y,"If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),t=i(y,"A",{href:!0});var F=c(t);u=r(F,"here"),F.forEach(a),w=r(y,"!"),y.forEach(a),this.h()},h(){$(t,"href","training#finetune-with-keras")},m(b,y){h(b,n,y),e(n,d),e(n,t),e(t,u),e(n,w)},d(b){b&&a(n)}}}function yt(D){let n,d,t,u,w,b,y,F,U,G,L,K,es,v,V,J,B,ps,N,ms,I,X,as,is,Q,W,A,ls,M,rs,os,cs,R,O,f,k,H,Z,S,hs,ds,Y,ss,P;return G=new ns({props:{code:`data_collator = DataCollatorForMultipleChoice(tokenizer=tokenizer) tf_train_set = model.prepare_tf_dataset( tokenized_swag["train"], shuffle=True, batch_size=batch_size, collate_fn=data_collator, ) tf_validation_set = model.prepare_tf_dataset( tokenized_swag["validation"], shuffle=False, batch_size=batch_size, collate_fn=data_collator, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForMultipleChoice(tokenizer=tokenizer) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> tokenized_swag[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=batch_size, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_validation_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> tokenized_swag[<span class="hljs-string">&quot;validation&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=batch_size, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)`}}),K=new ct({props:{$$slots:{default:[vt]},$$scope:{ctx:D}}}),B=new ns({props:{code:`from transformers import create_optimizer batch_size = 16 num_train_epochs = 2 total_train_steps = (len(tokenized_swag["train"]) // batch_size) * num_train_epochs optimizer, schedule = create_optimizer(init_lr=5e-5, num_warmup_steps=0, num_train_steps=total_train_steps)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer <span class="hljs-meta">&gt;&gt;&gt; </span>batch_size = <span class="hljs-number">16</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_train_epochs = <span class="hljs-number">2</span> <span class="hljs-meta">&gt;&gt;&gt; </span>total_train_steps = (<span class="hljs-built_in">len</span>(tokenized_swag[<span class="hljs-string">&quot;train&quot;</span>]) // batch_size) * num_train_epochs <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer, schedule = create_optimizer(init_lr=<span class="hljs-number">5e-5</span>, num_warmup_steps=<span class="hljs-number">0</span>, num_train_steps=total_train_steps)`}}),Q=new ns({props:{code:`from transformers import TFAutoModelForMultipleChoice model = TFAutoModelForMultipleChoice.from_pretrained("bert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForMultipleChoice.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>)`}}),O=new ns({props:{code:"model.compile(optimizer=optimizer)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)'}}),ss=new ns({props:{code:"model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=2)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=<span class="hljs-number">2</span>)'}}),{c(){n=p("p"),d=l("To fine-tune a model in TensorFlow, start by converting your datasets to the "),t=p("code"),u=l("tf.data.Dataset"),w=l(" format with "),b=p("a"),y=l("prepare_tf_dataset()"),F=l("."),U=_(),E(G.$$.fragment),L=_(),E(K.$$.fragment),es=_(),v=p("p"),V=l("Set up an optimizer function, learning rate schedule, and some training hyperparameters:"),J=_(),E(B.$$.fragment),ps=_(),N=p("p"),ms=l("Load BERT with "),I=p("a"),X=l("TFAutoModelForMultipleChoice"),as=l(":"),is=_(),E(Q.$$.fragment),W=_(),A=p("p"),ls=l("Configure the model for training with "),M=p("a"),rs=p("code"),os=l("compile"),cs=l(":"),R=_(),E(O.$$.fragment),f=_(),k=p("p"),H=l("Call "),Z=p("a"),S=p("code"),hs=l("fit"),ds=l(" to fine-tune the model:"),Y=_(),E(ss.$$.fragment),this.h()},l(o){n=i(o,"P",{});var j=c(n);d=r(j,"To fine-tune a model in TensorFlow, start by converting your datasets to the "),t=i(j,"CODE",{});var ks=c(t);u=r(ks,"tf.data.Dataset"),ks.forEach(a),w=r(j," format with "),b=i(j,"A",{href:!0});var Us=c(b);y=r(Us,"prepare_tf_dataset()"),Us.forEach(a),F=r(j,"."),j.forEach(a),U=g(o),x(G.$$.fragment,o),L=g(o),x(K.$$.fragment,o),es=g(o),v=i(o,"P",{});var Es=c(v);V=r(Es,"Set up an optimizer function, learning rate schedule, and some training hyperparameters:"),Es.forEach(a),J=g(o),x(B.$$.fragment,o),ps=g(o),N=i(o,"P",{});var xs=c(N);ms=r(xs,"Load BERT with "),I=i(xs,"A",{href:!0});var Gs=c(I);X=r(Gs,"TFAutoModelForMultipleChoice"),Gs.forEach(a),as=r(xs,":"),xs.forEach(a),is=g(o),x(Q.$$.fragment,o),W=g(o),A=i(o,"P",{});var _s=c(A);ls=r(_s,"Configure the model for training with "),M=i(_s,"A",{href:!0,rel:!0});var Rs=c(M);rs=i(Rs,"CODE",{});var Hs=c(rs);os=r(Hs,"compile"),Hs.forEach(a),Rs.forEach(a),cs=r(_s,":"),_s.forEach(a),R=g(o),x(O.$$.fragment,o),f=g(o),k=i(o,"P",{});var gs=c(k);H=r(gs,"Call "),Z=i(gs,"A",{href:!0,rel:!0});var Ys=c(Z);S=i(Ys,"CODE",{});var Ks=c(S);hs=r(Ks,"fit"),Ks.forEach(a),Ys.forEach(a),ds=r(gs," to fine-tune the model:"),gs.forEach(a),Y=g(o),x(ss.$$.fragment,o),this.h()},h(){$(b,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset"),$(I,"href","/docs/transformers/pr_19429/en/model_doc/auto#transformers.TFAutoModelForMultipleChoice"),$(M,"href","https://keras.io/api/models/model_training_apis/#compile-method"),$(M,"rel","nofollow"),$(Z,"href","https://keras.io/api/models/model_training_apis/#fit-method"),$(Z,"rel","nofollow")},m(o,j){h(o,n,j),e(n,d),e(n,t),e(t,u),e(n,w),e(n,b),e(b,y),e(n,F),h(o,U,j),z(G,o,j),h(o,L,j),z(K,o,j),h(o,es,j),h(o,v,j),e(v,V),h(o,J,j),z(B,o,j),h(o,ps,j),h(o,N,j),e(N,ms),e(N,I),e(I,X),e(N,as),h(o,is,j),z(Q,o,j),h(o,W,j),h(o,A,j),e(A,ls),e(A,M),e(M,rs),e(rs,os),e(A,cs),h(o,R,j),z(O,o,j),h(o,f,j),h(o,k,j),e(k,H),e(k,Z),e(Z,S),e(S,hs),e(k,ds),h(o,Y,j),z(ss,o,j),P=!0},p(o,j){const ks={};j&2&&(ks.$$scope={dirty:j,ctx:o}),K.$set(ks)},i(o){P||(T(G.$$.fragment,o),T(K.$$.fragment,o),T(B.$$.fragment,o),T(Q.$$.fragment,o),T(O.$$.fragment,o),T(ss.$$.fragment,o),P=!0)},o(o){q(G.$$.fragment,o),q(K.$$.fragment,o),q(B.$$.fragment,o),q(Q.$$.fragment,o),q(O.$$.fragment,o),q(ss.$$.fragment,o),P=!1},d(o){o&&a(n),o&&a(U),C(G,o),o&&a(L),C(K,o),o&&a(es),o&&a(v),o&&a(J),C(B,o),o&&a(ps),o&&a(N),o&&a(is),C(Q,o),o&&a(W),o&&a(A),o&&a(R),C(O,o),o&&a(f),o&&a(k),o&&a(Y),C(ss,o)}}}function Et(D){let n,d;return n=new Oe({props:{$$slots:{default:[yt]},$$scope:{ctx:D}}}),{c(){E(n.$$.fragment)},l(t){x(n.$$.fragment,t)},m(t,u){z(n,t,u),d=!0},p(t,u){const w={};u&2&&(w.$$scope={dirty:u,ctx:t}),n.$set(w)},i(t){d||(T(n.$$.fragment,t),d=!0)},o(t){q(n.$$.fragment,t),d=!1},d(t){C(n,t)}}}function xt(D){let n,d,t,u,w,b,y,F,U,G,L,K,es,v,V,J,B,ps,N,ms,I,X,as,is,Q,W,A,ls,M,rs,os,cs,R,O,f,k,H,Z,S,hs,ds,Y,ss,P,o,j,ks,Us,Es,xs,Gs,_s,Rs,Hs,gs,Ys,Ks,ge,bs,zs,se,Ds,Se,ee,Le,je,Js,Be,$e,Fs,we,Qs,Ne,ke,js,vs,Ie,ae,We,Ue,te,Ge,Re,He,Os,Ye,ne,Ke,Je,Qe,us,Ve,le,Xe,Ze,re,sa,ea,oe,aa,ta,be,Ss,ve,fs,na,Ls,la,ra,pe,oa,pa,ie,ia,ca,ye,Bs,Ee,ts,ha,Vs,fa,ma,ce,da,ua,he,_a,ga,fe,ja,$a,xe,Ns,me,wa,ka,ze,Ts,Te,ys,qs,de,Is,ba,ue,va,qe,Cs,Ce;return b=new Fe({}),M=new Fe({}),H=new ns({props:{code:`from datasets import load_dataset swag = load_dataset("swag", "regular")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>swag = load_dataset(<span class="hljs-string">&quot;swag&quot;</span>, <span class="hljs-string">&quot;regular&quot;</span>)`}}),Y=new ns({props:{code:'swag["train"][0]',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>swag[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;ending0&#x27;</span>: <span class="hljs-string">&#x27;passes by walking down the street playing their instruments.&#x27;</span>, <span class="hljs-string">&#x27;ending1&#x27;</span>: <span class="hljs-string">&#x27;has heard approaching them.&#x27;</span>, <span class="hljs-string">&#x27;ending2&#x27;</span>: <span class="hljs-string">&quot;arrives and they&#x27;re outside dancing and asleep.&quot;</span>, <span class="hljs-string">&#x27;ending3&#x27;</span>: <span class="hljs-string">&#x27;turns the lead singer watches the performance.&#x27;</span>, <span class="hljs-string">&#x27;fold-ind&#x27;</span>: <span class="hljs-string">&#x27;3416&#x27;</span>, <span class="hljs-string">&#x27;gold-source&#x27;</span>: <span class="hljs-string">&#x27;gold&#x27;</span>, <span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&#x27;sent1&#x27;</span>: <span class="hljs-string">&#x27;Members of the procession walk down the street holding small horn brass instruments.&#x27;</span>, <span class="hljs-string">&#x27;sent2&#x27;</span>: <span class="hljs-string">&#x27;A drum line&#x27;</span>, <span class="hljs-string">&#x27;startphrase&#x27;</span>: <span class="hljs-string">&#x27;Members of the procession walk down the street holding small horn brass instruments. A drum line&#x27;</span>, <span class="hljs-string">&#x27;video-id&#x27;</span>: <span class="hljs-string">&#x27;anetv_jkn6uvmqwh4&#x27;</span>}`}}),Ds=new Fe({}),Fs=new ns({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>)`}}),Ss=new ns({props:{code:`ending_names = ["ending0", "ending1", "ending2", "ending3"] def preprocess_function(examples): first_sentences = [[context] * 4 for context in examples["sent1"]] question_headers = examples["sent2"] second_sentences = [ [f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(question_headers) ] first_sentences = sum(first_sentences, []) second_sentences = sum(second_sentences, []) tokenized_examples = tokenizer(first_sentences, second_sentences, truncation=True) return {k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items()}`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>ending_names = [<span class="hljs-string">&quot;ending0&quot;</span>, <span class="hljs-string">&quot;ending1&quot;</span>, <span class="hljs-string">&quot;ending2&quot;</span>, <span class="hljs-string">&quot;ending3&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> first_sentences = [[context] * <span class="hljs-number">4</span> <span class="hljs-keyword">for</span> context <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;sent1&quot;</span>]] <span class="hljs-meta">... </span> question_headers = examples[<span class="hljs-string">&quot;sent2&quot;</span>] <span class="hljs-meta">... </span> second_sentences = [ <span class="hljs-meta">... </span> [<span class="hljs-string">f&quot;<span class="hljs-subst">{header}</span> <span class="hljs-subst">{examples[end][i]}</span>&quot;</span> <span class="hljs-keyword">for</span> end <span class="hljs-keyword">in</span> ending_names] <span class="hljs-keyword">for</span> i, header <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(question_headers) <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span> first_sentences = <span class="hljs-built_in">sum</span>(first_sentences, []) <span class="hljs-meta">... </span> second_sentences = <span class="hljs-built_in">sum</span>(second_sentences, []) <span class="hljs-meta">... </span> tokenized_examples = tokenizer(first_sentences, second_sentences, truncation=<span class="hljs-literal">True</span>) <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> {k: [v[i : i + <span class="hljs-number">4</span>] <span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(<span class="hljs-number">0</span>, <span class="hljs-built_in">len</span>(v), <span class="hljs-number">4</span>)] <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> tokenized_examples.items()}`}}),Bs=new ns({props:{code:"tokenized_swag = swag.map(preprocess_function, batched=True)",highlighted:'tokenized_swag = swag.<span class="hljs-built_in">map</span>(preprocess_function, batched=<span class="hljs-literal">True</span>)'}}),Ts=new pt({props:{pytorch:!0,tensorflow:!0,jax:!1,$$slots:{tensorflow:[$t],pytorch:[gt]},$$scope:{ctx:D}}}),Is=new Fe({}),Cs=new pt({props:{pytorch:!0,tensorflow:!0,jax:!1,$$slots:{tensorflow:[Et],pytorch:[bt]},$$scope:{ctx:D}}}),{c(){n=p("meta"),d=_(),t=p("h1"),u=p("a"),w=p("span"),E(b.$$.fragment),y=_(),F=p("span"),U=l("Multiple choice"),G=_(),L=p("p"),K=l("A multiple choice task is similar to question answering, except several candidate answers are provided along with a context. The model is trained to select the correct answer from multiple inputs given a context."),es=_(),v=p("p"),V=l("This guide will show you how to fine-tune "),J=p("a"),B=l("BERT"),ps=l(" on the "),N=p("code"),ms=l("regular"),I=l(" configuration of the "),X=p("a"),as=l("SWAG"),is=l(" dataset to select the best answer given multiple options and some context."),Q=_(),W=p("h2"),A=p("a"),ls=p("span"),E(M.$$.fragment),rs=_(),os=p("span"),cs=l("Load SWAG dataset"),R=_(),O=p("p"),f=l("Load the SWAG dataset from the \u{1F917} Datasets library:"),k=_(),E(H.$$.fragment),Z=_(),S=p("p"),hs=l("Then take a look at an example:"),ds=_(),E(Y.$$.fragment),ss=_(),P=p("p"),o=l("The "),j=p("code"),ks=l("sent1"),Us=l(" and "),Es=p("code"),xs=l("sent2"),Gs=l(" fields show how a sentence begins, and each "),_s=p("code"),Rs=l("ending"),Hs=l(" field shows how a sentence could end. Given the sentence beginning, the model must pick the correct sentence ending as indicated by the "),gs=p("code"),Ys=l("label"),Ks=l(" field."),ge=_(),bs=p("h2"),zs=p("a"),se=p("span"),E(Ds.$$.fragment),Se=_(),ee=p("span"),Le=l("Preprocess"),je=_(),Js=p("p"),Be=l("Load the BERT tokenizer to process the start of each sentence and the four possible endings:"),$e=_(),E(Fs.$$.fragment),we=_(),Qs=p("p"),Ne=l("The preprocessing function needs to do:"),ke=_(),js=p("ol"),vs=p("li"),Ie=l("Make four copies of the "),ae=p("code"),We=l("sent1"),Ue=l(" field so you can combine each of them with "),te=p("code"),Ge=l("sent2"),Re=l(" to recreate how a sentence starts."),He=_(),Os=p("li"),Ye=l("Combine "),ne=p("code"),Ke=l("sent2"),Je=l(" with each of the four possible sentence endings."),Qe=_(),us=p("li"),Ve=l("Flatten these two lists so you can tokenize them, and then unflatten them afterward so each example has a corresponding "),le=p("code"),Xe=l("input_ids"),Ze=l(", "),re=p("code"),sa=l("attention_mask"),ea=l(", and "),oe=p("code"),aa=l("labels"),ta=l(" field."),be=_(),E(Ss.$$.fragment),ve=_(),fs=p("p"),na=l("Use \u{1F917} Datasets "),Ls=p("a"),la=l("map"),ra=l(" function to apply the preprocessing function over the entire dataset. You can speed up the "),pe=p("code"),oa=l("map"),pa=l(" function by setting "),ie=p("code"),ia=l("batched=True"),ca=l(" to process multiple elements of the dataset at once:"),ye=_(),E(Bs.$$.fragment),Ee=_(),ts=p("p"),ha=l("\u{1F917} Transformers doesn\u2019t have a data collator for multiple choice, so you will need to create one. You can adapt the "),Vs=p("a"),fa=l("DataCollatorWithPadding"),ma=l(" to create a batch of examples for multiple choice. It will also "),ce=p("em"),da=l("dynamically pad"),ua=l(" your text and labels to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the "),he=p("code"),_a=l("tokenizer"),ga=l(" function by setting "),fe=p("code"),ja=l("padding=True"),$a=l(", dynamic padding is more efficient."),xe=_(),Ns=p("p"),me=p("code"),wa=l("DataCollatorForMultipleChoice"),ka=l(" will flatten all the model inputs, apply padding, and then unflatten the results:"),ze=_(),E(Ts.$$.fragment),Te=_(),ys=p("h2"),qs=p("a"),de=p("span"),E(Is.$$.fragment),ba=_(),ue=p("span"),va=l("Train"),qe=_(),E(Cs.$$.fragment),this.h()},l(s){const m=dt('[data-svelte="svelte-1phssyn"]',document.head);n=i(m,"META",{name:!0,content:!0}),m.forEach(a),d=g(s),t=i(s,"H1",{class:!0});var Ws=c(t);u=i(Ws,"A",{id:!0,class:!0,href:!0});var _e=c(u);w=i(_e,"SPAN",{});var Ea=c(w);x(b.$$.fragment,Ea),Ea.forEach(a),_e.forEach(a),y=g(Ws),F=i(Ws,"SPAN",{});var xa=c(F);U=r(xa,"Multiple choice"),xa.forEach(a),Ws.forEach(a),G=g(s),L=i(s,"P",{});var za=c(L);K=r(za,"A multiple choice task is similar to question answering, except several candidate answers are provided along with a context. The model is trained to select the correct answer from multiple inputs given a context."),za.forEach(a),es=g(s),v=i(s,"P",{});var As=c(v);V=r(As,"This guide will show you how to fine-tune "),J=i(As,"A",{href:!0,rel:!0});var Ta=c(J);B=r(Ta,"BERT"),Ta.forEach(a),ps=r(As," on the "),N=i(As,"CODE",{});var qa=c(N);ms=r(qa,"regular"),qa.forEach(a),I=r(As," configuration of the "),X=i(As,"A",{href:!0,rel:!0});var Ca=c(X);as=r(Ca,"SWAG"),Ca.forEach(a),is=r(As," dataset to select the best answer given multiple options and some context."),As.forEach(a),Q=g(s),W=i(s,"H2",{class:!0});var Ae=c(W);A=i(Ae,"A",{id:!0,class:!0,href:!0});var Aa=c(A);ls=i(Aa,"SPAN",{});var Pa=c(ls);x(M.$$.fragment,Pa),Pa.forEach(a),Aa.forEach(a),rs=g(Ae),os=i(Ae,"SPAN",{});var Ma=c(os);cs=r(Ma,"Load SWAG dataset"),Ma.forEach(a),Ae.forEach(a),R=g(s),O=i(s,"P",{});var Da=c(O);f=r(Da,"Load the SWAG dataset from the \u{1F917} Datasets library:"),Da.forEach(a),k=g(s),x(H.$$.fragment,s),Z=g(s),S=i(s,"P",{});var Fa=c(S);hs=r(Fa,"Then take a look at an example:"),Fa.forEach(a),ds=g(s),x(Y.$$.fragment,s),ss=g(s),P=i(s,"P",{});var $s=c(P);o=r($s,"The "),j=i($s,"CODE",{});var Oa=c(j);ks=r(Oa,"sent1"),Oa.forEach(a),Us=r($s," and "),Es=i($s,"CODE",{});var Sa=c(Es);xs=r(Sa,"sent2"),Sa.forEach(a),Gs=r($s," fields show how a sentence begins, and each "),_s=i($s,"CODE",{});var La=c(_s);Rs=r(La,"ending"),La.forEach(a),Hs=r($s," field shows how a sentence could end. Given the sentence beginning, the model must pick the correct sentence ending as indicated by the "),gs=i($s,"CODE",{});var Ba=c(gs);Ys=r(Ba,"label"),Ba.forEach(a),Ks=r($s," field."),$s.forEach(a),ge=g(s),bs=i(s,"H2",{class:!0});var Pe=c(bs);zs=i(Pe,"A",{id:!0,class:!0,href:!0});var Na=c(zs);se=i(Na,"SPAN",{});var Ia=c(se);x(Ds.$$.fragment,Ia),Ia.forEach(a),Na.forEach(a),Se=g(Pe),ee=i(Pe,"SPAN",{});var Wa=c(ee);Le=r(Wa,"Preprocess"),Wa.forEach(a),Pe.forEach(a),je=g(s),Js=i(s,"P",{});var Ua=c(Js);Be=r(Ua,"Load the BERT tokenizer to process the start of each sentence and the four possible endings:"),Ua.forEach(a),$e=g(s),x(Fs.$$.fragment,s),we=g(s),Qs=i(s,"P",{});var Ga=c(Qs);Ne=r(Ga,"The preprocessing function needs to do:"),Ga.forEach(a),ke=g(s),js=i(s,"OL",{});var Xs=c(js);vs=i(Xs,"LI",{});var Zs=c(vs);Ie=r(Zs,"Make four copies of the "),ae=i(Zs,"CODE",{});var Ra=c(ae);We=r(Ra,"sent1"),Ra.forEach(a),Ue=r(Zs," field so you can combine each of them with "),te=i(Zs,"CODE",{});var Ha=c(te);Ge=r(Ha,"sent2"),Ha.forEach(a),Re=r(Zs," to recreate how a sentence starts."),Zs.forEach(a),He=g(Xs),Os=i(Xs,"LI",{});var Me=c(Os);Ye=r(Me,"Combine "),ne=i(Me,"CODE",{});var Ya=c(ne);Ke=r(Ya,"sent2"),Ya.forEach(a),Je=r(Me," with each of the four possible sentence endings."),Me.forEach(a),Qe=g(Xs),us=i(Xs,"LI",{});var Ps=c(us);Ve=r(Ps,"Flatten these two lists so you can tokenize them, and then unflatten them afterward so each example has a corresponding "),le=i(Ps,"CODE",{});var Ka=c(le);Xe=r(Ka,"input_ids"),Ka.forEach(a),Ze=r(Ps,", "),re=i(Ps,"CODE",{});var Ja=c(re);sa=r(Ja,"attention_mask"),Ja.forEach(a),ea=r(Ps,", and "),oe=i(Ps,"CODE",{});var Qa=c(oe);aa=r(Qa,"labels"),Qa.forEach(a),ta=r(Ps," field."),Ps.forEach(a),Xs.forEach(a),be=g(s),x(Ss.$$.fragment,s),ve=g(s),fs=i(s,"P",{});var Ms=c(fs);na=r(Ms,"Use \u{1F917} Datasets "),Ls=i(Ms,"A",{href:!0,rel:!0});var Va=c(Ls);la=r(Va,"map"),Va.forEach(a),ra=r(Ms," function to apply the preprocessing function over the entire dataset. You can speed up the "),pe=i(Ms,"CODE",{});var Xa=c(pe);oa=r(Xa,"map"),Xa.forEach(a),pa=r(Ms," function by setting "),ie=i(Ms,"CODE",{});var Za=c(ie);ia=r(Za,"batched=True"),Za.forEach(a),ca=r(Ms," to process multiple elements of the dataset at once:"),Ms.forEach(a),ye=g(s),x(Bs.$$.fragment,s),Ee=g(s),ts=i(s,"P",{});var ws=c(ts);ha=r(ws,"\u{1F917} Transformers doesn\u2019t have a data collator for multiple choice, so you will need to create one. You can adapt the "),Vs=i(ws,"A",{href:!0});var st=c(Vs);fa=r(st,"DataCollatorWithPadding"),st.forEach(a),ma=r(ws," to create a batch of examples for multiple choice. It will also "),ce=i(ws,"EM",{});var et=c(ce);da=r(et,"dynamically pad"),et.forEach(a),ua=r(ws," your text and labels to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the "),he=i(ws,"CODE",{});var at=c(he);_a=r(at,"tokenizer"),at.forEach(a),ga=r(ws," function by setting "),fe=i(ws,"CODE",{});var tt=c(fe);ja=r(tt,"padding=True"),tt.forEach(a),$a=r(ws,", dynamic padding is more efficient."),ws.forEach(a),xe=g(s),Ns=i(s,"P",{});var ya=c(Ns);me=i(ya,"CODE",{});var nt=c(me);wa=r(nt,"DataCollatorForMultipleChoice"),nt.forEach(a),ka=r(ya," will flatten all the model inputs, apply padding, and then unflatten the results:"),ya.forEach(a),ze=g(s),x(Ts.$$.fragment,s),Te=g(s),ys=i(s,"H2",{class:!0});var De=c(ys);qs=i(De,"A",{id:!0,class:!0,href:!0});var lt=c(qs);de=i(lt,"SPAN",{});var rt=c(de);x(Is.$$.fragment,rt),rt.forEach(a),lt.forEach(a),ba=g(De),ue=i(De,"SPAN",{});var ot=c(ue);va=r(ot,"Train"),ot.forEach(a),De.forEach(a),qe=g(s),x(Cs.$$.fragment,s),this.h()},h(){$(n,"name","hf:doc:metadata"),$(n,"content",JSON.stringify(zt)),$(u,"id","multiple-choice"),$(u,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),$(u,"href","#multiple-choice"),$(t,"class","relative group"),$(J,"href","https://huggingface.co/bert-base-uncased"),$(J,"rel","nofollow"),$(X,"href","https://huggingface.co/datasets/swag"),$(X,"rel","nofollow"),$(A,"id","load-swag-dataset"),$(A,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),$(A,"href","#load-swag-dataset"),$(W,"class","relative group"),$(zs,"id","preprocess"),$(zs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),$(zs,"href","#preprocess"),$(bs,"class","relative group"),$(Ls,"href","https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.map"),$(Ls,"rel","nofollow"),$(Vs,"href","/docs/transformers/pr_19429/en/main_classes/data_collator#transformers.DataCollatorWithPadding"),$(qs,"id","train"),$(qs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),$(qs,"href","#train"),$(ys,"class","relative group")},m(s,m){e(document.head,n),h(s,d,m),h(s,t,m),e(t,u),e(u,w),z(b,w,null),e(t,y),e(t,F),e(F,U),h(s,G,m),h(s,L,m),e(L,K),h(s,es,m),h(s,v,m),e(v,V),e(v,J),e(J,B),e(v,ps),e(v,N),e(N,ms),e(v,I),e(v,X),e(X,as),e(v,is),h(s,Q,m),h(s,W,m),e(W,A),e(A,ls),z(M,ls,null),e(W,rs),e(W,os),e(os,cs),h(s,R,m),h(s,O,m),e(O,f),h(s,k,m),z(H,s,m),h(s,Z,m),h(s,S,m),e(S,hs),h(s,ds,m),z(Y,s,m),h(s,ss,m),h(s,P,m),e(P,o),e(P,j),e(j,ks),e(P,Us),e(P,Es),e(Es,xs),e(P,Gs),e(P,_s),e(_s,Rs),e(P,Hs),e(P,gs),e(gs,Ys),e(P,Ks),h(s,ge,m),h(s,bs,m),e(bs,zs),e(zs,se),z(Ds,se,null),e(bs,Se),e(bs,ee),e(ee,Le),h(s,je,m),h(s,Js,m),e(Js,Be),h(s,$e,m),z(Fs,s,m),h(s,we,m),h(s,Qs,m),e(Qs,Ne),h(s,ke,m),h(s,js,m),e(js,vs),e(vs,Ie),e(vs,ae),e(ae,We),e(vs,Ue),e(vs,te),e(te,Ge),e(vs,Re),e(js,He),e(js,Os),e(Os,Ye),e(Os,ne),e(ne,Ke),e(Os,Je),e(js,Qe),e(js,us),e(us,Ve),e(us,le),e(le,Xe),e(us,Ze),e(us,re),e(re,sa),e(us,ea),e(us,oe),e(oe,aa),e(us,ta),h(s,be,m),z(Ss,s,m),h(s,ve,m),h(s,fs,m),e(fs,na),e(fs,Ls),e(Ls,la),e(fs,ra),e(fs,pe),e(pe,oa),e(fs,pa),e(fs,ie),e(ie,ia),e(fs,ca),h(s,ye,m),z(Bs,s,m),h(s,Ee,m),h(s,ts,m),e(ts,ha),e(ts,Vs),e(Vs,fa),e(ts,ma),e(ts,ce),e(ce,da),e(ts,ua),e(ts,he),e(he,_a),e(ts,ga),e(ts,fe),e(fe,ja),e(ts,$a),h(s,xe,m),h(s,Ns,m),e(Ns,me),e(me,wa),e(Ns,ka),h(s,ze,m),z(Ts,s,m),h(s,Te,m),h(s,ys,m),e(ys,qs),e(qs,de),z(Is,de,null),e(ys,ba),e(ys,ue),e(ue,va),h(s,qe,m),z(Cs,s,m),Ce=!0},p(s,[m]){const Ws={};m&2&&(Ws.$$scope={dirty:m,ctx:s}),Ts.$set(Ws);const _e={};m&2&&(_e.$$scope={dirty:m,ctx:s}),Cs.$set(_e)},i(s){Ce||(T(b.$$.fragment,s),T(M.$$.fragment,s),T(H.$$.fragment,s),T(Y.$$.fragment,s),T(Ds.$$.fragment,s),T(Fs.$$.fragment,s),T(Ss.$$.fragment,s),T(Bs.$$.fragment,s),T(Ts.$$.fragment,s),T(Is.$$.fragment,s),T(Cs.$$.fragment,s),Ce=!0)},o(s){q(b.$$.fragment,s),q(M.$$.fragment,s),q(H.$$.fragment,s),q(Y.$$.fragment,s),q(Ds.$$.fragment,s),q(Fs.$$.fragment,s),q(Ss.$$.fragment,s),q(Bs.$$.fragment,s),q(Ts.$$.fragment,s),q(Is.$$.fragment,s),q(Cs.$$.fragment,s),Ce=!1},d(s){a(n),s&&a(d),s&&a(t),C(b),s&&a(G),s&&a(L),s&&a(es),s&&a(v),s&&a(Q),s&&a(W),C(M),s&&a(R),s&&a(O),s&&a(k),C(H,s),s&&a(Z),s&&a(S),s&&a(ds),C(Y,s),s&&a(ss),s&&a(P),s&&a(ge),s&&a(bs),C(Ds),s&&a(je),s&&a(Js),s&&a($e),C(Fs,s),s&&a(we),s&&a(Qs),s&&a(ke),s&&a(js),s&&a(be),C(Ss,s),s&&a(ve),s&&a(fs),s&&a(ye),C(Bs,s),s&&a(Ee),s&&a(ts),s&&a(xe),s&&a(Ns),s&&a(ze),C(Ts,s),s&&a(Te),s&&a(ys),C(Is),s&&a(qe),C(Cs,s)}}}const zt={local:"multiple-choice",sections:[{local:"load-swag-dataset",title:"Load SWAG dataset"},{local:"preprocess",title:"Preprocess"},{local:"train",title:"Train"}],title:"Multiple choice"};function Tt(D){return ut(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Dt extends ht{constructor(n){super();ft(this,n,Tt,xt,mt,{})}}export{Dt as default,zt as metadata};
35
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/tasks/translation.mdx-hf-doc-builder.js
import{S as Ia,i as Ua,s as Na,e as p,k as w,w as y,t as n,M as Ba,c as f,d as s,m as v,a as m,x as E,h as o,b as j,G as r,g as u,y as T,q as x,o as z,B as A,v as Wa,L as At}from"../../chunks/vendor-hf-doc-builder.js";import{T as zt}from"../../chunks/Tip-hf-doc-builder.js";import{Y as Oa}from"../../chunks/Youtube-hf-doc-builder.js";import{I as xt}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{C as K}from"../../chunks/CodeBlock-hf-doc-builder.js";import{F as fa,M as We}from"../../chunks/Markdown-hf-doc-builder.js";function Ya(F){let a,l,t,i,_;return{c(){a=p("p"),l=n("See the translation "),t=p("a"),i=n("task page"),_=n(" for more information about its associated models, datasets, and metrics."),this.h()},l($){a=f($,"P",{});var g=m(a);l=o(g,"See the translation "),t=f(g,"A",{href:!0,rel:!0});var q=m(t);i=o(q,"task page"),q.forEach(s),_=o(g," for more information about its associated models, datasets, and metrics."),g.forEach(s),this.h()},h(){j(t,"href","https://huggingface.co/tasks/translation"),j(t,"rel","nofollow")},m($,g){u($,a,g),r(a,l),r(a,t),r(t,i),r(a,_)},d($){$&&s(a)}}}function Ha(F){let a,l,t,i,_,$,g,q;return g=new K({props:{code:`from transformers import AutoModelForSeq2SeqLM model = AutoModelForSeq2SeqLM.from_pretrained("t5-small")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSeq2SeqLM <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>)`}}),{c(){a=p("p"),l=n("Load T5 with "),t=p("a"),i=n("AutoModelForSeq2SeqLM"),_=n(":"),$=w(),y(g.$$.fragment),this.h()},l(d){a=f(d,"P",{});var k=m(a);l=o(k,"Load T5 with "),t=f(k,"A",{href:!0});var L=m(t);i=o(L,"AutoModelForSeq2SeqLM"),L.forEach(s),_=o(k,":"),k.forEach(s),$=v(d),E(g.$$.fragment,d),this.h()},h(){j(t,"href","/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoModelForSeq2SeqLM")},m(d,k){u(d,a,k),r(a,l),r(a,t),r(t,i),r(a,_),u(d,$,k),T(g,d,k),q=!0},p:At,i(d){q||(x(g.$$.fragment,d),q=!0)},o(d){z(g.$$.fragment,d),q=!1},d(d){d&&s(a),d&&s($),A(g,d)}}}function Za(F){let a,l;return a=new We({props:{$$slots:{default:[Ha]},$$scope:{ctx:F}}}),{c(){y(a.$$.fragment)},l(t){E(a.$$.fragment,t)},m(t,i){T(a,t,i),l=!0},p(t,i){const _={};i&2&&(_.$$scope={dirty:i,ctx:t}),a.$set(_)},i(t){l||(x(a.$$.fragment,t),l=!0)},o(t){z(a.$$.fragment,t),l=!1},d(t){A(a,t)}}}function Ja(F){let a,l,t,i,_,$,g,q;return g=new K({props:{code:`from transformers import TFAutoModelForSeq2SeqLM model = TFAutoModelForSeq2SeqLM.from_pretrained("t5-small")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForSeq2SeqLM <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>)`}}),{c(){a=p("p"),l=n("Load T5 with "),t=p("a"),i=n("TFAutoModelForSeq2SeqLM"),_=n(":"),$=w(),y(g.$$.fragment),this.h()},l(d){a=f(d,"P",{});var k=m(a);l=o(k,"Load T5 with "),t=f(k,"A",{href:!0});var L=m(t);i=o(L,"TFAutoModelForSeq2SeqLM"),L.forEach(s),_=o(k,":"),k.forEach(s),$=v(d),E(g.$$.fragment,d),this.h()},h(){j(t,"href","/docs/transformers/pr_19429/en/model_doc/auto#transformers.TFAutoModelForSeq2SeqLM")},m(d,k){u(d,a,k),r(a,l),r(a,t),r(t,i),r(a,_),u(d,$,k),T(g,d,k),q=!0},p:At,i(d){q||(x(g.$$.fragment,d),q=!0)},o(d){z(g.$$.fragment,d),q=!1},d(d){d&&s(a),d&&s($),A(g,d)}}}function Ka(F){let a,l;return a=new We({props:{$$slots:{default:[Ja]},$$scope:{ctx:F}}}),{c(){y(a.$$.fragment)},l(t){E(a.$$.fragment,t)},m(t,i){T(a,t,i),l=!0},p(t,i){const _={};i&2&&(_.$$scope={dirty:i,ctx:t}),a.$set(_)},i(t){l||(x(a.$$.fragment,t),l=!0)},o(t){z(a.$$.fragment,t),l=!1},d(t){A(a,t)}}}function Ra(F){let a,l;return a=new K({props:{code:`from transformers import DataCollatorForSeq2Seq data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForSeq2Seq <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model)`}}),{c(){y(a.$$.fragment)},l(t){E(a.$$.fragment,t)},m(t,i){T(a,t,i),l=!0},p:At,i(t){l||(x(a.$$.fragment,t),l=!0)},o(t){z(a.$$.fragment,t),l=!1},d(t){A(a,t)}}}function Ga(F){let a,l;return a=new We({props:{$$slots:{default:[Ra]},$$scope:{ctx:F}}}),{c(){y(a.$$.fragment)},l(t){E(a.$$.fragment,t)},m(t,i){T(a,t,i),l=!0},p(t,i){const _={};i&2&&(_.$$scope={dirty:i,ctx:t}),a.$set(_)},i(t){l||(x(a.$$.fragment,t),l=!0)},o(t){z(a.$$.fragment,t),l=!1},d(t){A(a,t)}}}function Xa(F){let a,l;return a=new K({props:{code:`from transformers import DataCollatorForSeq2Seq data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model, return_tensors="tf")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForSeq2Seq <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)`}}),{c(){y(a.$$.fragment)},l(t){E(a.$$.fragment,t)},m(t,i){T(a,t,i),l=!0},p:At,i(t){l||(x(a.$$.fragment,t),l=!0)},o(t){z(a.$$.fragment,t),l=!1},d(t){A(a,t)}}}function Qa(F){let a,l;return a=new We({props:{$$slots:{default:[Xa]},$$scope:{ctx:F}}}),{c(){y(a.$$.fragment)},l(t){E(a.$$.fragment,t)},m(t,i){T(a,t,i),l=!0},p(t,i){const _={};i&2&&(_.$$scope={dirty:i,ctx:t}),a.$set(_)},i(t){l||(x(a.$$.fragment,t),l=!0)},o(t){z(a.$$.fragment,t),l=!1},d(t){A(a,t)}}}function Va(F){let a,l,t,i,_,$,g,q;return{c(){a=p("p"),l=n("If you aren\u2019t familiar with fine-tuning a model with the "),t=p("a"),i=n("Trainer"),_=n(", take a look at the basic tutorial "),$=p("a"),g=n("here"),q=n("!"),this.h()},l(d){a=f(d,"P",{});var k=m(a);l=o(k,"If you aren\u2019t familiar with fine-tuning a model with the "),t=f(k,"A",{href:!0});var L=m(t);i=o(L,"Trainer"),L.forEach(s),_=o(k,", take a look at the basic tutorial "),$=f(k,"A",{href:!0});var M=m($);g=o(M,"here"),M.forEach(s),q=o(k,"!"),k.forEach(s),this.h()},h(){j(t,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),j($,"href","../training#finetune-with-trainer")},m(d,k){u(d,a,k),r(a,l),r(a,t),r(t,i),r(a,_),r(a,$),r($,g),r(a,q)},d(d){d&&s(a)}}}function es(F){let a,l,t,i,_,$,g,q,d,k,L,M,U,R,Z,N,J,H,O,se,D,V,re,ee,C,W;return a=new zt({props:{$$slots:{default:[Va]},$$scope:{ctx:F}}}),C=new K({props:{code:`from transformers import Seq2SeqTrainingArguments, Seq2SeqTrainer training_args = Seq2SeqTrainingArguments( output_dir="./results", evaluation_strategy="epoch", learning_rate=2e-5, per_device_train_batch_size=16, per_device_eval_batch_size=16, weight_decay=0.01, save_total_limit=3, num_train_epochs=1, fp16=True, ) trainer = Seq2SeqTrainer( model=model, args=training_args, train_dataset=tokenized_books["train"], eval_dataset=tokenized_books["test"], tokenizer=tokenizer, data_collator=data_collator, ) trainer.train()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Seq2SeqTrainingArguments, Seq2SeqTrainer <span class="hljs-meta">&gt;&gt;&gt; </span>training_args = Seq2SeqTrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> per_device_eval_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span> save_total_limit=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">1</span>, <span class="hljs-meta">... </span> fp16=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Seq2SeqTrainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=tokenized_books[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=tokenized_books[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> tokenizer=tokenizer, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()`}}),{c(){y(a.$$.fragment),l=w(),t=p("p"),i=n("At this point, only three steps remain:"),_=w(),$=p("ol"),g=p("li"),q=n("Define your training hyperparameters in "),d=p("a"),k=n("Seq2SeqTrainingArguments"),L=n("."),M=w(),U=p("li"),R=n("Pass the training arguments to "),Z=p("a"),N=n("Seq2SeqTrainer"),J=n(" along with the model, dataset, tokenizer, and data collator."),H=w(),O=p("li"),se=n("Call "),D=p("a"),V=n("train()"),re=n(" to fine-tune your model."),ee=w(),y(C.$$.fragment),this.h()},l(b){E(a.$$.fragment,b),l=v(b),t=f(b,"P",{});var P=m(t);i=o(P,"At this point, only three steps remain:"),P.forEach(s),_=v(b),$=f(b,"OL",{});var B=m($);g=f(B,"LI",{});var I=m(g);q=o(I,"Define your training hyperparameters in "),d=f(I,"A",{href:!0});var te=m(d);k=o(te,"Seq2SeqTrainingArguments"),te.forEach(s),L=o(I,"."),I.forEach(s),M=v(B),U=f(B,"LI",{});var G=m(U);R=o(G,"Pass the training arguments to "),Z=f(G,"A",{href:!0});var oe=m(Z);N=o(oe,"Seq2SeqTrainer"),oe.forEach(s),J=o(G," along with the model, dataset, tokenizer, and data collator."),G.forEach(s),H=v(B),O=f(B,"LI",{});var X=m(O);se=o(X,"Call "),D=f(X,"A",{href:!0});var Y=m(D);V=o(Y,"train()"),Y.forEach(s),re=o(X," to fine-tune your model."),X.forEach(s),B.forEach(s),ee=v(b),E(C.$$.fragment,b),this.h()},h(){j(d,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Seq2SeqTrainingArguments"),j(Z,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Seq2SeqTrainer"),j(D,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.train")},m(b,P){T(a,b,P),u(b,l,P),u(b,t,P),r(t,i),u(b,_,P),u(b,$,P),r($,g),r(g,q),r(g,d),r(d,k),r(g,L),r($,M),r($,U),r(U,R),r(U,Z),r(Z,N),r(U,J),r($,H),r($,O),r(O,se),r(O,D),r(D,V),r(O,re),u(b,ee,P),T(C,b,P),W=!0},p(b,P){const B={};P&2&&(B.$$scope={dirty:P,ctx:b}),a.$set(B)},i(b){W||(x(a.$$.fragment,b),x(C.$$.fragment,b),W=!0)},o(b){z(a.$$.fragment,b),z(C.$$.fragment,b),W=!1},d(b){A(a,b),b&&s(l),b&&s(t),b&&s(_),b&&s($),b&&s(ee),A(C,b)}}}function ts(F){let a,l;return a=new We({props:{$$slots:{default:[es]},$$scope:{ctx:F}}}),{c(){y(a.$$.fragment)},l(t){E(a.$$.fragment,t)},m(t,i){T(a,t,i),l=!0},p(t,i){const _={};i&2&&(_.$$scope={dirty:i,ctx:t}),a.$set(_)},i(t){l||(x(a.$$.fragment,t),l=!0)},o(t){z(a.$$.fragment,t),l=!1},d(t){A(a,t)}}}function as(F){let a,l,t,i,_;return{c(){a=p("p"),l=n("If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),t=p("a"),i=n("here"),_=n("!"),this.h()},l($){a=f($,"P",{});var g=m(a);l=o(g,"If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),t=f(g,"A",{href:!0});var q=m(t);i=o(q,"here"),q.forEach(s),_=o(g,"!"),g.forEach(s),this.h()},h(){j(t,"href","training#finetune-with-keras")},m($,g){u($,a,g),r(a,l),r(a,t),r(t,i),r(a,_)},d($){$&&s(a)}}}function ss(F){let a,l,t,i,_,$,g,q,d,k,L,M,U,R,Z,N,J,H,O,se,D,V,re,ee,C,W,b,P,B,I,te,G,oe,X,Y,$e;return k=new K({props:{code:`tf_train_set = model.prepare_tf_dataset( tokenized_books["train"], shuffle=True, batch_size=16, collate_fn=data_collator, ) tf_test_set = model.prepare_tf_dataset( tokenized_books["test"], shuffle=False, batch_size=16, collate_fn=data_collator, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> tokenized_books[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_test_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> tokenized_books[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)`}}),M=new zt({props:{$$slots:{default:[as]},$$scope:{ctx:F}}}),J=new K({props:{code:`from transformers import create_optimizer, AdamWeightDecay optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer, AdamWeightDecay <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer = AdamWeightDecay(learning_rate=<span class="hljs-number">2e-5</span>, weight_decay_rate=<span class="hljs-number">0.01</span>)`}}),W=new K({props:{code:"model.compile(optimizer=optimizer)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)'}}),Y=new K({props:{code:"model.fit(tf_train_set, validation_data=tf_test_set, epochs=3)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(tf_train_set, validation_data=tf_test_set, epochs=<span class="hljs-number">3</span>)'}}),{c(){a=p("p"),l=n("To fine-tune a model in TensorFlow, start by converting your datasets to the "),t=p("code"),i=n("tf.data.Dataset"),_=n(" format with "),$=p("a"),g=n("prepare_tf_dataset()"),q=n("."),d=w(),y(k.$$.fragment),L=w(),y(M.$$.fragment),U=w(),R=p("p"),Z=n("Set up an optimizer function, learning rate schedule, and some training hyperparameters:"),N=w(),y(J.$$.fragment),H=w(),O=p("p"),se=n("Configure the model for training with "),D=p("a"),V=p("code"),re=n("compile"),ee=n(":"),C=w(),y(W.$$.fragment),b=w(),P=p("p"),B=n("Call "),I=p("a"),te=p("code"),G=n("fit"),oe=n(" to fine-tune the model:"),X=w(),y(Y.$$.fragment),this.h()},l(h){a=f(h,"P",{});var S=m(a);l=o(S,"To fine-tune a model in TensorFlow, start by converting your datasets to the "),t=f(S,"CODE",{});var le=m(t);i=o(le,"tf.data.Dataset"),le.forEach(s),_=o(S," format with "),$=f(S,"A",{href:!0});var me=m($);g=o(me,"prepare_tf_dataset()"),me.forEach(s),q=o(S,"."),S.forEach(s),d=v(h),E(k.$$.fragment,h),L=v(h),E(M.$$.fragment,h),U=v(h),R=f(h,"P",{});var De=m(R);Z=o(De,"Set up an optimizer function, learning rate schedule, and some training hyperparameters:"),De.forEach(s),N=v(h),E(J.$$.fragment,h),H=v(h),O=f(h,"P",{});var he=m(O);se=o(he,"Configure the model for training with "),D=f(he,"A",{href:!0,rel:!0});var ie=m(D);V=f(ie,"CODE",{});var qe=m(V);re=o(qe,"compile"),qe.forEach(s),ie.forEach(s),ee=o(he,":"),he.forEach(s),C=v(h),E(W.$$.fragment,h),b=v(h),P=f(h,"P",{});var ne=m(P);B=o(ne,"Call "),I=f(ne,"A",{href:!0,rel:!0});var Ce=m(I);te=f(Ce,"CODE",{});var Se=m(te);G=o(Se,"fit"),Se.forEach(s),Ce.forEach(s),oe=o(ne," to fine-tune the model:"),ne.forEach(s),X=v(h),E(Y.$$.fragment,h),this.h()},h(){j($,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset"),j(D,"href","https://keras.io/api/models/model_training_apis/#compile-method"),j(D,"rel","nofollow"),j(I,"href","https://keras.io/api/models/model_training_apis/#fit-method"),j(I,"rel","nofollow")},m(h,S){u(h,a,S),r(a,l),r(a,t),r(t,i),r(a,_),r(a,$),r($,g),r(a,q),u(h,d,S),T(k,h,S),u(h,L,S),T(M,h,S),u(h,U,S),u(h,R,S),r(R,Z),u(h,N,S),T(J,h,S),u(h,H,S),u(h,O,S),r(O,se),r(O,D),r(D,V),r(V,re),r(O,ee),u(h,C,S),T(W,h,S),u(h,b,S),u(h,P,S),r(P,B),r(P,I),r(I,te),r(te,G),r(P,oe),u(h,X,S),T(Y,h,S),$e=!0},p(h,S){const le={};S&2&&(le.$$scope={dirty:S,ctx:h}),M.$set(le)},i(h){$e||(x(k.$$.fragment,h),x(M.$$.fragment,h),x(J.$$.fragment,h),x(W.$$.fragment,h),x(Y.$$.fragment,h),$e=!0)},o(h){z(k.$$.fragment,h),z(M.$$.fragment,h),z(J.$$.fragment,h),z(W.$$.fragment,h),z(Y.$$.fragment,h),$e=!1},d(h){h&&s(a),h&&s(d),A(k,h),h&&s(L),A(M,h),h&&s(U),h&&s(R),h&&s(N),A(J,h),h&&s(H),h&&s(O),h&&s(C),A(W,h),h&&s(b),h&&s(P),h&&s(X),A(Y,h)}}}function rs(F){let a,l;return a=new We({props:{$$slots:{default:[ss]},$$scope:{ctx:F}}}),{c(){y(a.$$.fragment)},l(t){E(a.$$.fragment,t)},m(t,i){T(a,t,i),l=!0},p(t,i){const _={};i&2&&(_.$$scope={dirty:i,ctx:t}),a.$set(_)},i(t){l||(x(a.$$.fragment,t),l=!0)},o(t){z(a.$$.fragment,t),l=!1},d(t){A(a,t)}}}function ns(F){let a,l,t,i,_,$,g,q;return{c(){a=p("p"),l=n(`For a more in-depth example of how to fine-tune a model for translation, take a look at the corresponding `),t=p("a"),i=n("PyTorch notebook"),_=n(` or `),$=p("a"),g=n("TensorFlow notebook"),q=n("."),this.h()},l(d){a=f(d,"P",{});var k=m(a);l=o(k,`For a more in-depth example of how to fine-tune a model for translation, take a look at the corresponding `),t=f(k,"A",{href:!0,rel:!0});var L=m(t);i=o(L,"PyTorch notebook"),L.forEach(s),_=o(k,` or `),$=f(k,"A",{href:!0,rel:!0});var M=m($);g=o(M,"TensorFlow notebook"),M.forEach(s),q=o(k,"."),k.forEach(s),this.h()},h(){j(t,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/translation.ipynb"),j(t,"rel","nofollow"),j($,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/translation-tf.ipynb"),j($,"rel","nofollow")},m(d,k){u(d,a,k),r(a,l),r(a,t),r(t,i),r(a,_),r(a,$),r($,g),r(a,q)},d(d){d&&s(a)}}}function os(F){let a,l,t,i,_,$,g,q,d,k,L,M,U,R,Z,N,J,H,O,se,D,V,re,ee,C,W,b,P,B,I,te,G,oe,X,Y,$e,h,S,le,me,De,he,ie,qe,ne,Ce,Se,ye,lt,de,Ft,Ye,Pt,Lt,it,ce,_e,He,Ee,Mt,Ze,Dt,pt,Te,ft,Oe,Ct,mt,xe,ht,Ie,Ot,ct,pe,Je,It,Ut,Ke,Nt,Bt,ze,Wt,Re,Yt,Ht,ut,Ae,$t,ae,Zt,Fe,Jt,Kt,Ge,Rt,Gt,Xe,Xt,Qt,dt,Pe,_t,ge,gt,Q,Vt,Ue,ea,ta,Qe,aa,sa,Ve,ra,na,et,oa,la,kt,ke,wt,ue,we,tt,Le,ia,at,pa,vt,ve,bt,be,jt;return $=new xt({}),L=new Oa({props:{id:"1JvfrvZgi6c"}}),C=new zt({props:{$$slots:{default:[Ya]},$$scope:{ctx:F}}}),I=new xt({}),S=new K({props:{code:`from datasets import load_dataset books = load_dataset("opus_books", "en-fr")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>books = load_dataset(<span class="hljs-string">&quot;opus_books&quot;</span>, <span class="hljs-string">&quot;en-fr&quot;</span>)`}}),ie=new K({props:{code:'books = books["train"].train_test_split(test_size=0.2)',highlighted:'books = books[<span class="hljs-string">&quot;train&quot;</span>].train_test_split(test_size=<span class="hljs-number">0.2</span>)'}}),ye=new K({props:{code:'books["train"][0]',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>books[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;id&#x27;</span>: <span class="hljs-string">&#x27;90560&#x27;</span>, <span class="hljs-string">&#x27;translation&#x27;</span>: {<span class="hljs-string">&#x27;en&#x27;</span>: <span class="hljs-string">&#x27;But this lofty plateau measured only a few fathoms, and soon we reentered Our Element.&#x27;</span>, <span class="hljs-string">&#x27;fr&#x27;</span>: <span class="hljs-string">&#x27;Mais ce plateau \xE9lev\xE9 ne mesurait que quelques toises, et bient\xF4t nous f\xFBmes rentr\xE9s dans notre \xE9l\xE9ment.&#x27;</span>}}`}}),Ee=new xt({}),Te=new Oa({props:{id:"XAR8jnZZuUs"}}),xe=new K({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("t5-small")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>)`}}),Ae=new K({props:{code:`source_lang = "en" target_lang = "fr" prefix = "translate English to French: " def preprocess_function(examples): inputs = [prefix + example[source_lang] for example in examples["translation"]] targets = [example[target_lang] for example in examples["translation"]] model_inputs = tokenizer(inputs, text_target=targets, max_length=128, truncation=True) return model_inputs`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>source_lang = <span class="hljs-string">&quot;en&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>target_lang = <span class="hljs-string">&quot;fr&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>prefix = <span class="hljs-string">&quot;translate English to French: &quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> inputs = [prefix + example[source_lang] <span class="hljs-keyword">for</span> example <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;translation&quot;</span>]] <span class="hljs-meta">... </span> targets = [example[target_lang] <span class="hljs-keyword">for</span> example <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;translation&quot;</span>]] <span class="hljs-meta">... </span> model_inputs = tokenizer(inputs, text_target=targets, max_length=<span class="hljs-number">128</span>, truncation=<span class="hljs-literal">True</span>) <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> model_inputs`}}),Pe=new K({props:{code:"tokenized_books = books.map(preprocess_function, batched=True)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_books = books.<span class="hljs-built_in">map</span>(preprocess_function, batched=<span class="hljs-literal">True</span>)'}}),ge=new fa({props:{pytorch:!0,tensorflow:!0,jax:!1,$$slots:{tensorflow:[Ka],pytorch:[Za]},$$scope:{ctx:F}}}),ke=new fa({props:{pytorch:!0,tensorflow:!0,jax:!1,$$slots:{tensorflow:[Qa],pytorch:[Ga]},$$scope:{ctx:F}}}),Le=new xt({}),ve=new fa({props:{pytorch:!0,tensorflow:!0,jax:!1,$$slots:{tensorflow:[rs],pytorch:[ts]},$$scope:{ctx:F}}}),be=new zt({props:{$$slots:{default:[ns]},$$scope:{ctx:F}}}),{c(){a=p("meta"),l=w(),t=p("h1"),i=p("a"),_=p("span"),y($.$$.fragment),g=w(),q=p("span"),d=n("Translation"),k=w(),y(L.$$.fragment),M=w(),U=p("p"),R=n("Translation converts a sequence of text from one language to another. It is one of several tasks you can formulate as a sequence-to-sequence problem, a powerful framework that extends to vision and audio tasks."),Z=w(),N=p("p"),J=n("This guide will show you how to fine-tune "),H=p("a"),O=n("T5"),se=n(" on the English-French subset of the "),D=p("a"),V=n("OPUS Books"),re=n(" dataset to translate English text to French."),ee=w(),y(C.$$.fragment),W=w(),b=p("h2"),P=p("a"),B=p("span"),y(I.$$.fragment),te=w(),G=p("span"),oe=n("Load OPUS Books dataset"),X=w(),Y=p("p"),$e=n("Load the OPUS Books dataset from the \u{1F917} Datasets library:"),h=w(),y(S.$$.fragment),le=w(),me=p("p"),De=n("Split this dataset into a train and test set:"),he=w(),y(ie.$$.fragment),qe=w(),ne=p("p"),Ce=n("Then take a look at an example:"),Se=w(),y(ye.$$.fragment),lt=w(),de=p("p"),Ft=n("The "),Ye=p("code"),Pt=n("translation"),Lt=n(" field is a dictionary containing the English and French translations of the text."),it=w(),ce=p("h2"),_e=p("a"),He=p("span"),y(Ee.$$.fragment),Mt=w(),Ze=p("span"),Dt=n("Preprocess"),pt=w(),y(Te.$$.fragment),ft=w(),Oe=p("p"),Ct=n("Load the T5 tokenizer to process the language pairs:"),mt=w(),y(xe.$$.fragment),ht=w(),Ie=p("p"),Ot=n("The preprocessing function needs to:"),ct=w(),pe=p("ol"),Je=p("li"),It=n("Prefix the input with a prompt so T5 knows this is a translation task. Some models capable of multiple NLP tasks require prompting for specific tasks."),Ut=w(),Ke=p("li"),Nt=n("Tokenize the input (English) and target (French) separately. You can\u2019t tokenize French text with a tokenizer pretrained on an English vocabulary. A context manager will help set the tokenizer to French first before tokenizing it."),Bt=w(),ze=p("li"),Wt=n("Truncate sequences to be no longer than the maximum length set by the "),Re=p("code"),Yt=n("max_length"),Ht=n(" parameter."),ut=w(),y(Ae.$$.fragment),$t=w(),ae=p("p"),Zt=n("Use \u{1F917} Datasets "),Fe=p("a"),Jt=n("map"),Kt=n(" function to apply the preprocessing function over the entire dataset. You can speed up the "),Ge=p("code"),Rt=n("map"),Gt=n(" function by setting "),Xe=p("code"),Xt=n("batched=True"),Qt=n(" to process multiple elements of the dataset at once:"),dt=w(),y(Pe.$$.fragment),_t=w(),y(ge.$$.fragment),gt=w(),Q=p("p"),Vt=n("Use "),Ue=p("a"),ea=n("DataCollatorForSeq2Seq"),ta=n(" to create a batch of examples. It will also "),Qe=p("em"),aa=n("dynamically pad"),sa=n(" your text and labels to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the "),Ve=p("code"),ra=n("tokenizer"),na=n(" function by setting "),et=p("code"),oa=n("padding=True"),la=n(", dynamic padding is more efficient."),kt=w(),y(ke.$$.fragment),wt=w(),ue=p("h2"),we=p("a"),tt=p("span"),y(Le.$$.fragment),ia=w(),at=p("span"),pa=n("Train"),vt=w(),y(ve.$$.fragment),bt=w(),y(be.$$.fragment),this.h()},l(e){const c=Ba('[data-svelte="svelte-1phssyn"]',document.head);a=f(c,"META",{name:!0,content:!0}),c.forEach(s),l=v(e),t=f(e,"H1",{class:!0});var Me=m(t);i=f(Me,"A",{id:!0,class:!0,href:!0});var st=m(i);_=f(st,"SPAN",{});var rt=m(_);E($.$$.fragment,rt),rt.forEach(s),st.forEach(s),g=v(Me),q=f(Me,"SPAN",{});var nt=m(q);d=o(nt,"Translation"),nt.forEach(s),Me.forEach(s),k=v(e),E(L.$$.fragment,e),M=v(e),U=f(e,"P",{});var ot=m(U);R=o(ot,"Translation converts a sequence of text from one language to another. It is one of several tasks you can formulate as a sequence-to-sequence problem, a powerful framework that extends to vision and audio tasks."),ot.forEach(s),Z=v(e),N=f(e,"P",{});var Ne=m(N);J=o(Ne,"This guide will show you how to fine-tune "),H=f(Ne,"A",{href:!0,rel:!0});var ma=m(H);O=o(ma,"T5"),ma.forEach(s),se=o(Ne," on the English-French subset of the "),D=f(Ne,"A",{href:!0,rel:!0});var ha=m(D);V=o(ha,"OPUS Books"),ha.forEach(s),re=o(Ne," dataset to translate English text to French."),Ne.forEach(s),ee=v(e),E(C.$$.fragment,e),W=v(e),b=f(e,"H2",{class:!0});var qt=m(b);P=f(qt,"A",{id:!0,class:!0,href:!0});var ca=m(P);B=f(ca,"SPAN",{});var ua=m(B);E(I.$$.fragment,ua),ua.forEach(s),ca.forEach(s),te=v(qt),G=f(qt,"SPAN",{});var $a=m(G);oe=o($a,"Load OPUS Books dataset"),$a.forEach(s),qt.forEach(s),X=v(e),Y=f(e,"P",{});var da=m(Y);$e=o(da,"Load the OPUS Books dataset from the \u{1F917} Datasets library:"),da.forEach(s),h=v(e),E(S.$$.fragment,e),le=v(e),me=f(e,"P",{});var _a=m(me);De=o(_a,"Split this dataset into a train and test set:"),_a.forEach(s),he=v(e),E(ie.$$.fragment,e),qe=v(e),ne=f(e,"P",{});var ga=m(ne);Ce=o(ga,"Then take a look at an example:"),ga.forEach(s),Se=v(e),E(ye.$$.fragment,e),lt=v(e),de=f(e,"P",{});var St=m(de);Ft=o(St,"The "),Ye=f(St,"CODE",{});var ka=m(Ye);Pt=o(ka,"translation"),ka.forEach(s),Lt=o(St," field is a dictionary containing the English and French translations of the text."),St.forEach(s),it=v(e),ce=f(e,"H2",{class:!0});var yt=m(ce);_e=f(yt,"A",{id:!0,class:!0,href:!0});var wa=m(_e);He=f(wa,"SPAN",{});var va=m(He);E(Ee.$$.fragment,va),va.forEach(s),wa.forEach(s),Mt=v(yt),Ze=f(yt,"SPAN",{});var ba=m(Ze);Dt=o(ba,"Preprocess"),ba.forEach(s),yt.forEach(s),pt=v(e),E(Te.$$.fragment,e),ft=v(e),Oe=f(e,"P",{});var ja=m(Oe);Ct=o(ja,"Load the T5 tokenizer to process the language pairs:"),ja.forEach(s),mt=v(e),E(xe.$$.fragment,e),ht=v(e),Ie=f(e,"P",{});var qa=m(Ie);Ot=o(qa,"The preprocessing function needs to:"),qa.forEach(s),ct=v(e),pe=f(e,"OL",{});var Be=m(pe);Je=f(Be,"LI",{});var Sa=m(Je);It=o(Sa,"Prefix the input with a prompt so T5 knows this is a translation task. Some models capable of multiple NLP tasks require prompting for specific tasks."),Sa.forEach(s),Ut=v(Be),Ke=f(Be,"LI",{});var ya=m(Ke);Nt=o(ya,"Tokenize the input (English) and target (French) separately. You can\u2019t tokenize French text with a tokenizer pretrained on an English vocabulary. A context manager will help set the tokenizer to French first before tokenizing it."),ya.forEach(s),Bt=v(Be),ze=f(Be,"LI",{});var Et=m(ze);Wt=o(Et,"Truncate sequences to be no longer than the maximum length set by the "),Re=f(Et,"CODE",{});var Ea=m(Re);Yt=o(Ea,"max_length"),Ea.forEach(s),Ht=o(Et," parameter."),Et.forEach(s),Be.forEach(s),ut=v(e),E(Ae.$$.fragment,e),$t=v(e),ae=f(e,"P",{});var je=m(ae);Zt=o(je,"Use \u{1F917} Datasets "),Fe=f(je,"A",{href:!0,rel:!0});var Ta=m(Fe);Jt=o(Ta,"map"),Ta.forEach(s),Kt=o(je," function to apply the preprocessing function over the entire dataset. You can speed up the "),Ge=f(je,"CODE",{});var xa=m(Ge);Rt=o(xa,"map"),xa.forEach(s),Gt=o(je," function by setting "),Xe=f(je,"CODE",{});var za=m(Xe);Xt=o(za,"batched=True"),za.forEach(s),Qt=o(je," to process multiple elements of the dataset at once:"),je.forEach(s),dt=v(e),E(Pe.$$.fragment,e),_t=v(e),E(ge.$$.fragment,e),gt=v(e),Q=f(e,"P",{});var fe=m(Q);Vt=o(fe,"Use "),Ue=f(fe,"A",{href:!0});var Aa=m(Ue);ea=o(Aa,"DataCollatorForSeq2Seq"),Aa.forEach(s),ta=o(fe," to create a batch of examples. It will also "),Qe=f(fe,"EM",{});var Fa=m(Qe);aa=o(Fa,"dynamically pad"),Fa.forEach(s),sa=o(fe," your text and labels to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the "),Ve=f(fe,"CODE",{});var Pa=m(Ve);ra=o(Pa,"tokenizer"),Pa.forEach(s),na=o(fe," function by setting "),et=f(fe,"CODE",{});var La=m(et);oa=o(La,"padding=True"),La.forEach(s),la=o(fe,", dynamic padding is more efficient."),fe.forEach(s),kt=v(e),E(ke.$$.fragment,e),wt=v(e),ue=f(e,"H2",{class:!0});var Tt=m(ue);we=f(Tt,"A",{id:!0,class:!0,href:!0});var Ma=m(we);tt=f(Ma,"SPAN",{});var Da=m(tt);E(Le.$$.fragment,Da),Da.forEach(s),Ma.forEach(s),ia=v(Tt),at=f(Tt,"SPAN",{});var Ca=m(at);pa=o(Ca,"Train"),Ca.forEach(s),Tt.forEach(s),vt=v(e),E(ve.$$.fragment,e),bt=v(e),E(be.$$.fragment,e),this.h()},h(){j(a,"name","hf:doc:metadata"),j(a,"content",JSON.stringify(ls)),j(i,"id","translation"),j(i,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),j(i,"href","#translation"),j(t,"class","relative group"),j(H,"href","https://huggingface.co/t5-small"),j(H,"rel","nofollow"),j(D,"href","https://huggingface.co/datasets/opus_books"),j(D,"rel","nofollow"),j(P,"id","load-opus-books-dataset"),j(P,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),j(P,"href","#load-opus-books-dataset"),j(b,"class","relative group"),j(_e,"id","preprocess"),j(_e,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),j(_e,"href","#preprocess"),j(ce,"class","relative group"),j(Fe,"href","https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.map"),j(Fe,"rel","nofollow"),j(Ue,"href","/docs/transformers/pr_19429/en/main_classes/data_collator#transformers.DataCollatorForSeq2Seq"),j(we,"id","train"),j(we,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),j(we,"href","#train"),j(ue,"class","relative group")},m(e,c){r(document.head,a),u(e,l,c),u(e,t,c),r(t,i),r(i,_),T($,_,null),r(t,g),r(t,q),r(q,d),u(e,k,c),T(L,e,c),u(e,M,c),u(e,U,c),r(U,R),u(e,Z,c),u(e,N,c),r(N,J),r(N,H),r(H,O),r(N,se),r(N,D),r(D,V),r(N,re),u(e,ee,c),T(C,e,c),u(e,W,c),u(e,b,c),r(b,P),r(P,B),T(I,B,null),r(b,te),r(b,G),r(G,oe),u(e,X,c),u(e,Y,c),r(Y,$e),u(e,h,c),T(S,e,c),u(e,le,c),u(e,me,c),r(me,De),u(e,he,c),T(ie,e,c),u(e,qe,c),u(e,ne,c),r(ne,Ce),u(e,Se,c),T(ye,e,c),u(e,lt,c),u(e,de,c),r(de,Ft),r(de,Ye),r(Ye,Pt),r(de,Lt),u(e,it,c),u(e,ce,c),r(ce,_e),r(_e,He),T(Ee,He,null),r(ce,Mt),r(ce,Ze),r(Ze,Dt),u(e,pt,c),T(Te,e,c),u(e,ft,c),u(e,Oe,c),r(Oe,Ct),u(e,mt,c),T(xe,e,c),u(e,ht,c),u(e,Ie,c),r(Ie,Ot),u(e,ct,c),u(e,pe,c),r(pe,Je),r(Je,It),r(pe,Ut),r(pe,Ke),r(Ke,Nt),r(pe,Bt),r(pe,ze),r(ze,Wt),r(ze,Re),r(Re,Yt),r(ze,Ht),u(e,ut,c),T(Ae,e,c),u(e,$t,c),u(e,ae,c),r(ae,Zt),r(ae,Fe),r(Fe,Jt),r(ae,Kt),r(ae,Ge),r(Ge,Rt),r(ae,Gt),r(ae,Xe),r(Xe,Xt),r(ae,Qt),u(e,dt,c),T(Pe,e,c),u(e,_t,c),T(ge,e,c),u(e,gt,c),u(e,Q,c),r(Q,Vt),r(Q,Ue),r(Ue,ea),r(Q,ta),r(Q,Qe),r(Qe,aa),r(Q,sa),r(Q,Ve),r(Ve,ra),r(Q,na),r(Q,et),r(et,oa),r(Q,la),u(e,kt,c),T(ke,e,c),u(e,wt,c),u(e,ue,c),r(ue,we),r(we,tt),T(Le,tt,null),r(ue,ia),r(ue,at),r(at,pa),u(e,vt,c),T(ve,e,c),u(e,bt,c),T(be,e,c),jt=!0},p(e,[c]){const Me={};c&2&&(Me.$$scope={dirty:c,ctx:e}),C.$set(Me);const st={};c&2&&(st.$$scope={dirty:c,ctx:e}),ge.$set(st);const rt={};c&2&&(rt.$$scope={dirty:c,ctx:e}),ke.$set(rt);const nt={};c&2&&(nt.$$scope={dirty:c,ctx:e}),ve.$set(nt);const ot={};c&2&&(ot.$$scope={dirty:c,ctx:e}),be.$set(ot)},i(e){jt||(x($.$$.fragment,e),x(L.$$.fragment,e),x(C.$$.fragment,e),x(I.$$.fragment,e),x(S.$$.fragment,e),x(ie.$$.fragment,e),x(ye.$$.fragment,e),x(Ee.$$.fragment,e),x(Te.$$.fragment,e),x(xe.$$.fragment,e),x(Ae.$$.fragment,e),x(Pe.$$.fragment,e),x(ge.$$.fragment,e),x(ke.$$.fragment,e),x(Le.$$.fragment,e),x(ve.$$.fragment,e),x(be.$$.fragment,e),jt=!0)},o(e){z($.$$.fragment,e),z(L.$$.fragment,e),z(C.$$.fragment,e),z(I.$$.fragment,e),z(S.$$.fragment,e),z(ie.$$.fragment,e),z(ye.$$.fragment,e),z(Ee.$$.fragment,e),z(Te.$$.fragment,e),z(xe.$$.fragment,e),z(Ae.$$.fragment,e),z(Pe.$$.fragment,e),z(ge.$$.fragment,e),z(ke.$$.fragment,e),z(Le.$$.fragment,e),z(ve.$$.fragment,e),z(be.$$.fragment,e),jt=!1},d(e){s(a),e&&s(l),e&&s(t),A($),e&&s(k),A(L,e),e&&s(M),e&&s(U),e&&s(Z),e&&s(N),e&&s(ee),A(C,e),e&&s(W),e&&s(b),A(I),e&&s(X),e&&s(Y),e&&s(h),A(S,e),e&&s(le),e&&s(me),e&&s(he),A(ie,e),e&&s(qe),e&&s(ne),e&&s(Se),A(ye,e),e&&s(lt),e&&s(de),e&&s(it),e&&s(ce),A(Ee),e&&s(pt),A(Te,e),e&&s(ft),e&&s(Oe),e&&s(mt),A(xe,e),e&&s(ht),e&&s(Ie),e&&s(ct),e&&s(pe),e&&s(ut),A(Ae,e),e&&s($t),e&&s(ae),e&&s(dt),A(Pe,e),e&&s(_t),A(ge,e),e&&s(gt),e&&s(Q),e&&s(kt),A(ke,e),e&&s(wt),e&&s(ue),A(Le),e&&s(vt),A(ve,e),e&&s(bt),A(be,e)}}}const ls={local:"translation",sections:[{local:"load-opus-books-dataset",title:"Load OPUS Books dataset"},{local:"preprocess",title:"Preprocess"},{local:"train",title:"Train"}],title:"Translation"};function is(F){return Wa(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class $s extends Ia{constructor(a){super();Ua(this,a,is,os,Na,{})}}export{$s as default,ls as metadata};
36
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/internal/generation_utils.mdx-hf-doc-builder.js
import{S as x0,i as w0,s as L0,e as o,k as c,w as f,t as a,M as E0,c as n,d as t,m as l,a as s,x as g,h as i,b as d,G as r,g as m,y as u,q as h,o as _,B as b,v as P0,L as F0}from"../../chunks/vendor-hf-doc-builder.js";import{D as v}from"../../chunks/Docstring-hf-doc-builder.js";import{C as n$}from"../../chunks/CodeBlock-hf-doc-builder.js";import{I as Ae}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{E as D0}from"../../chunks/ExampleCodeBlock-hf-doc-builder.js";function S0(kd){let w,Re;return w=new n$({props:{code:`completed = False while not completed: _, completed = constraint.update(constraint.advance())`,highlighted:`completed = <span class="hljs-literal">False</span> <span class="hljs-keyword">while</span> <span class="hljs-keyword">not</span> completed: _, completed = constraint.update(constraint.advance())`}}),{c(){f(w.$$.fragment)},l(k){g(w.$$.fragment,k)},m(k,Ce){u(w,k,Ce),Re=!0},p:F0,i(k){Re||(h(w.$$.fragment,k),Re=!0)},o(k){_(w.$$.fragment,k),Re=!1},d(k){b(w,k)}}}function z0(kd){let w,Re,k,Ce,mc,at,Xm,fc,Jm,xd,y,Qm,Un,Zm,ef,Yn,rf,tf,Xn,of,nf,Jn,sf,af,Qn,cf,lf,Zn,df,pf,es,mf,ff,wd,rs,gf,Ld,Ke,_r,gc,it,uf,uc,hf,Ed,V,_f,ts,bf,vf,os,$f,Tf,ns,yf,kf,Pd,ss,xf,Fd,ct,Dd,We,wf,hc,Lf,Ef,as,Pf,Ff,Sd,M,is,_c,Df,Sf,zf,cs,bc,Of,qf,Bf,ls,vc,If,Af,Cf,ds,$c,Wf,Nf,zd,x,Vf,Tc,Mf,Gf,yc,jf,Hf,kc,Rf,Kf,xc,Uf,Yf,wc,Xf,Jf,Lc,Qf,Zf,Od,S,eg,Ec,rg,tg,Pc,og,ng,Fc,sg,ag,Dc,ig,cg,qd,z,lg,Sc,dg,pg,zc,mg,fg,Oc,gg,ug,qc,hg,_g,Bd,lt,Id,br,bg,Bc,vg,$g,Ad,O,Tg,Ic,yg,kg,Ac,xg,wg,Cc,Lg,Eg,Wc,Pg,Fg,Cd,ps,Dg,Wd,Ue,vr,Nc,dt,Sg,Vc,zg,Nd,Ye,pt,Og,Mc,qg,Vd,Xe,mt,Bg,Gc,Ig,Md,R,ft,Ag,jc,Cg,Wg,$r,gt,Ng,Hc,Vg,Gd,Je,Tr,Rc,ut,Mg,Kc,Gg,jd,Qe,ht,jg,Uc,Hg,Hd,Ze,_t,Rg,Yc,Kg,Rd,K,bt,Ug,Xc,Yg,Xg,yr,vt,Jg,Jc,Qg,Kd,er,kr,Qc,$t,Zg,Zc,eu,Ud,rr,Tt,ru,el,tu,Yd,tr,yt,ou,rl,nu,Xd,or,xr,tl,kt,su,ol,au,Jd,nr,xt,iu,nl,cu,Qd,sr,wt,lu,sl,du,Zd,ar,wr,al,Lt,pu,il,mu,ep,Lr,fu,ms,gu,uu,rp,U,Et,hu,cl,_u,bu,Er,Pt,vu,ll,$u,tp,Y,Ft,Tu,L,yu,fs,ku,xu,gs,wu,Lu,dl,Eu,Pu,pl,ml,Fu,Du,us,Su,zu,hs,Ou,qu,Bu,_s,Dt,op,X,St,Iu,fl,Au,Cu,Pr,zt,Wu,gl,Nu,np,J,Ot,Vu,bs,vs,Mu,Gu,ju,$s,qt,sp,Q,Bt,Hu,Ts,ys,Ru,Ku,Uu,ks,It,ap,Z,At,Yu,xs,ws,Xu,Ju,Qu,Ls,Ct,ip,ee,Wt,Zu,Es,Ps,eh,rh,th,Fs,Nt,cp,re,Vt,oh,Ds,Ss,nh,sh,ah,zs,Mt,lp,te,Gt,ih,Fr,Os,ch,lh,jt,dh,ph,mh,qs,Ht,dp,oe,Rt,fh,Dr,Bs,gh,uh,Kt,hh,_h,bh,Is,Ut,pp,ne,Yt,vh,As,Cs,$h,Th,yh,Ws,Xt,mp,se,Jt,kh,Sr,Ns,xh,wh,Qt,Lh,Eh,Ph,Vs,Zt,fp,ae,eo,Fh,Ne,Ms,Dh,Sh,Gs,zh,Oh,ro,qh,Bh,Ih,js,to,gp,ie,oo,Ah,Hs,Rs,Ch,Wh,Nh,Ks,no,up,ce,so,Vh,zr,Us,Mh,Gh,ul,jh,Hh,Rh,Ys,ao,hp,le,io,Kh,G,Xs,Uh,Yh,hl,Xh,Jh,_l,Qh,Zh,bl,e_,r_,t_,Js,co,_p,de,lo,o_,vl,n_,s_,Or,po,a_,$l,i_,bp,pe,mo,c_,A,l_,Qs,d_,p_,Tl,m_,f_,yl,kl,g_,u_,Zs,h_,__,b_,ea,fo,vp,me,go,v_,xl,$_,T_,qr,uo,y_,wl,k_,$p,fe,ho,x_,ra,ta,w_,L_,E_,oa,_o,Tp,ge,bo,P_,na,sa,F_,D_,S_,aa,vo,yp,ue,$o,z_,ia,ca,O_,q_,B_,la,To,kp,he,yo,I_,da,pa,A_,C_,W_,ma,ko,xp,_e,xo,N_,fa,ga,V_,M_,G_,ua,wo,wp,be,Lo,j_,Br,ha,H_,R_,Eo,K_,U_,Y_,_a,Po,Lp,ve,Fo,X_,ba,va,J_,Q_,Z_,$a,Do,Ep,$e,So,eb,Ta,ya,rb,tb,ob,ka,zo,Pp,Te,Oo,nb,Ir,xa,sb,ab,Ll,ib,cb,lb,wa,qo,Fp,ye,Bo,db,El,pb,mb,Ar,Io,fb,Pl,gb,Dp,ke,Ao,ub,E,hb,La,_b,bb,Ea,vb,$b,Fl,Tb,yb,Dl,Sl,kb,xb,Pa,wb,Lb,Fa,Eb,Pb,Fb,Da,Co,Sp,xe,Wo,Db,zl,Sb,zb,Cr,No,Ob,Ol,qb,zp,we,Vo,Bb,Sa,za,Ib,Ab,Cb,Oa,Mo,Op,Le,Go,Wb,qa,Ba,Nb,Vb,Mb,Ia,jo,qp,Ee,Ho,Gb,Aa,Ca,jb,Hb,Rb,Wa,Ro,Bp,Pe,Ko,Kb,Na,Va,Ub,Yb,Xb,Ma,Uo,Ip,Fe,Yo,Jb,Wr,Ga,Qb,Zb,ql,ev,rv,tv,ja,Xo,Ap,De,Jo,ov,Ha,Ra,nv,sv,av,Ka,Qo,Cp,ir,Nr,Bl,Zo,iv,Il,cv,Wp,Vr,lv,Ua,dv,pv,Np,Se,en,mv,Al,fv,gv,Ya,rn,Vp,cr,tn,uv,Xa,on,Mp,ze,nn,hv,sn,_v,Cl,bv,vv,$v,Ja,an,Gp,Oe,cn,Tv,ln,yv,Wl,kv,xv,wv,Qa,dn,jp,lr,Mr,Nl,pn,Lv,Vl,Ev,Hp,Gr,Pv,Za,Fv,Dv,Rp,$,mn,Sv,Ml,zv,Ov,Gl,qv,Bv,jr,Iv,jl,Av,Cv,Hr,fn,Wv,Hl,Nv,Vv,Rr,gn,Mv,Rl,Gv,jv,Kr,un,Hv,Kl,Rv,Kv,Ur,hn,Uv,_n,Yv,Ul,Xv,Jv,Qv,Yr,bn,Zv,Yl,e1,r1,Xr,vn,t1,Xl,o1,n1,Ve,$n,s1,Tn,a1,Jl,i1,c1,l1,Ql,d1,Kp,dr,yn,p1,ei,ri,m1,f1,Up,pr,kn,g1,xn,u1,ti,h1,_1,Yp,C,wn,b1,Zl,v1,$1,F,Ln,T1,ed,y1,k1,mr,x1,rd,w1,L1,td,E1,P1,F1,od,nd,D1,S1,qe,z1,sd,O1,q1,ad,B1,I1,id,A1,C1,W1,cd,N1,V1,Jr,En,M1,ld,G1,Xp,fr,Qr,dd,Pn,j1,pd,H1,Jp,W,Fn,R1,gr,K1,oi,U1,Y1,ni,X1,J1,Q1,si,Dn,Z1,ai,Sn,Qp,P,zn,e2,ii,ci,r2,t2,o2,On,n2,qn,s2,a2,i2,li,c2,Bn,l2,d2,di,In,p2,pi,An,Zp,N,Cn,m2,mi,fi,f2,g2,u2,gi,Wn,h2,ui,Nn,em,ur,Zr,md,Vn,_2,fd,b2,rm,Be,Mn,v2,gd,$2,T2,hi,y2,Gn,k2,tm,Ie,jn,x2,ud,w2,L2,_i,E2,Hn,P2,om;return at=new Ae({}),it=new Ae({}),ct=new n$({props:{code:`from transformers import GPT2Tokenizer, GPT2LMHeadModel tokenizer = GPT2Tokenizer.from_pretrained("gpt2") model = GPT2LMHeadModel.from_pretrained("gpt2") inputs = tokenizer("Hello, my dog is cute and ", return_tensors="pt") generation_output = model.generate(**inputs, return_dict_in_generate=True, output_scores=True)`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, GPT2LMHeadModel tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) model = GPT2LMHeadModel.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute and &quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) generation_output = model.generate(**inputs, return_dict_in_generate=<span class="hljs-literal">True</span>, output_scores=<span class="hljs-literal">True</span>)`}}),lt=new n$({props:{code:"generation_output[:2]",highlighted:'generation_output[:<span class="hljs-number">2</span>]'}}),dt=new Ae({}),pt=new v({props:{name:"class transformers.generation_utils.GreedySearchDecoderOnlyOutput",anchor:"transformers.generation_utils.GreedySearchDecoderOnlyOutput",parameters:[{name:"sequences",val:": LongTensor = None"},{name:"scores",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"}],parametersDescription:[{anchor:"transformers.generation_utils.GreedySearchDecoderOnlyOutput.sequences",description:`<strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.`,name:"sequences"},{anchor:"transformers.generation_utils.GreedySearchDecoderOnlyOutput.scores",description:`<strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. Tuple of <code>torch.FloatTensor</code> with up to <code>max_new_tokens</code> elements (one element for each generated token), with each tensor of shape <code>(batch_size, config.vocab_size)</code>.`,name:"scores"},{anchor:"transformers.generation_utils.GreedySearchDecoderOnlyOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, num_heads, generated_length, sequence_length)</code>.`,name:"attentions"},{anchor:"transformers.generation_utils.GreedySearchDecoderOnlyOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, generated_length, hidden_size)</code>.`,name:"hidden_states"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L72"}}),mt=new v({props:{name:"class transformers.generation_utils.GreedySearchEncoderDecoderOutput",anchor:"transformers.generation_utils.GreedySearchEncoderDecoderOutput",parameters:[{name:"sequences",val:": LongTensor = None"},{name:"scores",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"}],parametersDescription:[{anchor:"transformers.generation_utils.GreedySearchEncoderDecoderOutput.sequences",description:`<strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.`,name:"sequences"},{anchor:"transformers.generation_utils.GreedySearchEncoderDecoderOutput.scores",description:`<strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. Tuple of <code>torch.FloatTensor</code> with up to <code>max_new_tokens</code> elements (one element for each generated token), with each tensor of shape <code>(batch_size, config.vocab_size)</code>.`,name:"scores"},{anchor:"transformers.generation_utils.GreedySearchEncoderDecoderOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer of the decoder) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.`,name:"encoder_attentions"},{anchor:"transformers.generation_utils.GreedySearchEncoderDecoderOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.`,name:"encoder_hidden_states"},{anchor:"transformers.generation_utils.GreedySearchEncoderDecoderOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, num_heads, generated_length, sequence_length)</code>.`,name:"decoder_attentions"},{anchor:"transformers.generation_utils.GreedySearchEncoderDecoderOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, num_heads, generated_length, sequence_length)</code>.`,name:"cross_attentions"},{anchor:"transformers.generation_utils.GreedySearchEncoderDecoderOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, generated_length, hidden_size)</code>.`,name:"decoder_hidden_states"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L100"}}),ft=new v({props:{name:"class transformers.generation_flax_utils.FlaxGreedySearchOutput",anchor:"transformers.generation_flax_utils.FlaxGreedySearchOutput",parameters:[{name:"sequences",val:": ndarray = None"}],parametersDescription:[{anchor:"transformers.generation_flax_utils.FlaxGreedySearchOutput.sequences",description:`<strong>sequences</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, max_length)</code>) &#x2014; The generated sequences.`,name:"sequences"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_utils.py#L51"}}),gt=new v({props:{name:"replace",anchor:"transformers.generation_flax_utils.FlaxGreedySearchOutput.replace",parameters:[{name:"**updates",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108"}}),ut=new Ae({}),ht=new v({props:{name:"class transformers.generation_utils.SampleDecoderOnlyOutput",anchor:"transformers.generation_utils.SampleDecoderOnlyOutput",parameters:[{name:"sequences",val:": LongTensor = None"},{name:"scores",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"}],parametersDescription:[{anchor:"transformers.generation_utils.SampleDecoderOnlyOutput.sequences",description:`<strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size*num_return_sequences, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.`,name:"sequences"},{anchor:"transformers.generation_utils.SampleDecoderOnlyOutput.scores",description:`<strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. Tuple of <code>torch.FloatTensor</code> with up to <code>max_new_tokens</code> elements (one element for each generated token), with each tensor of shape <code>(batch_size*num_return_sequences, config.vocab_size)</code>.`,name:"scores"},{anchor:"transformers.generation_utils.SampleDecoderOnlyOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(num_return_sequences*batch_size, num_heads, generated_length, sequence_length)</code>.`,name:"attentions"},{anchor:"transformers.generation_utils.SampleDecoderOnlyOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(num_return_sequences*batch_size, generated_length, hidden_size)</code>.`,name:"hidden_states"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L142"}}),_t=new v({props:{name:"class transformers.generation_utils.SampleEncoderDecoderOutput",anchor:"transformers.generation_utils.SampleEncoderDecoderOutput",parameters:[{name:"sequences",val:": LongTensor = None"},{name:"scores",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"}],parametersDescription:[{anchor:"transformers.generation_utils.SampleEncoderDecoderOutput.sequences",description:`<strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size*num_return_sequences, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.`,name:"sequences"},{anchor:"transformers.generation_utils.SampleEncoderDecoderOutput.scores",description:`<strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. Tuple of <code>torch.FloatTensor</code> with up to <code>max_new_tokens</code> elements (one element for each generated token), with each tensor of shape <code>(batch_size*num_return_sequences, config.vocab_size)</code>.`,name:"scores"},{anchor:"transformers.generation_utils.SampleEncoderDecoderOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer of the decoder) of shape <code>(batch_size*num_return_sequences, num_heads, sequence_length, sequence_length)</code>.`,name:"encoder_attentions"},{anchor:"transformers.generation_utils.SampleEncoderDecoderOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size*num_return_sequences, sequence_length, hidden_size)</code>.`,name:"encoder_hidden_states"},{anchor:"transformers.generation_utils.SampleEncoderDecoderOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_return_sequences, num_heads, generated_length, sequence_length)</code>.`,name:"decoder_attentions"},{anchor:"transformers.generation_utils.SampleEncoderDecoderOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, num_heads, generated_length, sequence_length)</code>.`,name:"cross_attentions"},{anchor:"transformers.generation_utils.SampleEncoderDecoderOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_return_sequences, generated_length, hidden_size)</code>.`,name:"decoder_hidden_states"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L171"}}),bt=new v({props:{name:"class transformers.generation_flax_utils.FlaxSampleOutput",anchor:"transformers.generation_flax_utils.FlaxSampleOutput",parameters:[{name:"sequences",val:": ndarray = None"}],parametersDescription:[{anchor:"transformers.generation_flax_utils.FlaxSampleOutput.sequences",description:`<strong>sequences</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, max_length)</code>) &#x2014; The generated sequences.`,name:"sequences"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_utils.py#L65"}}),vt=new v({props:{name:"replace",anchor:"transformers.generation_flax_utils.FlaxSampleOutput.replace",parameters:[{name:"**updates",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108"}}),$t=new Ae({}),Tt=new v({props:{name:"class transformers.generation_utils.BeamSearchDecoderOnlyOutput",anchor:"transformers.generation_utils.BeamSearchDecoderOnlyOutput",parameters:[{name:"sequences",val:": LongTensor = None"},{name:"sequences_scores",val:": typing.Optional[torch.FloatTensor] = None"},{name:"scores",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"beam_indices",val:": typing.Optional[torch.LongTensor] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"}],parametersDescription:[{anchor:"transformers.generation_utils.BeamSearchDecoderOnlyOutput.sequences",description:`<strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size*num_return_sequences, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.`,name:"sequences"},{anchor:"transformers.generation_utils.BeamSearchDecoderOnlyOutput.sequences_scores",description:`<strong>sequences_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size*num_return_sequences)</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Final beam scores of the generated <code>sequences</code>.`,name:"sequences_scores"},{anchor:"transformers.generation_utils.BeamSearchDecoderOnlyOutput.scores",description:`<strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam. Tuple of <code>torch.FloatTensor</code> with up to <code>max_new_tokens</code> elements (one element for each generated token), with each tensor of shape <code>(batch_size*num_beams*num_return_sequences, config.vocab_size)</code>.`,name:"scores"},{anchor:"transformers.generation_utils.BeamSearchDecoderOnlyOutput.beam_indices",description:`<strong>beam_indices</strong> (<code>tuple(tuple(torch.LongTensor))</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam indices of generated token id at each generation step. <code>torch.LongTensor</code> of shape <code>(batch_size*num_return_sequences, input_ids.shape[-1])</code>.`,name:"beam_indices"},{anchor:"transformers.generation_utils.BeamSearchDecoderOnlyOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams, num_heads, generated_length, sequence_length)</code>.`,name:"attentions"},{anchor:"transformers.generation_utils.BeamSearchDecoderOnlyOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)</code>.`,name:"hidden_states"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L214"}}),yt=new v({props:{name:"class transformers.generation_utils.BeamSearchEncoderDecoderOutput",anchor:"transformers.generation_utils.BeamSearchEncoderDecoderOutput",parameters:[{name:"sequences",val:": LongTensor = None"},{name:"sequences_scores",val:": typing.Optional[torch.FloatTensor] = None"},{name:"scores",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"beam_indices",val:": typing.Optional[torch.LongTensor] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"}],parametersDescription:[{anchor:"transformers.generation_utils.BeamSearchEncoderDecoderOutput.sequences",description:`<strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size*num_return_sequences, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.`,name:"sequences"},{anchor:"transformers.generation_utils.BeamSearchEncoderDecoderOutput.sequences_scores",description:`<strong>sequences_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size*num_return_sequences)</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Final beam scores of the generated <code>sequences</code>.`,name:"sequences_scores"},{anchor:"transformers.generation_utils.BeamSearchEncoderDecoderOutput.scores",description:`<strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam. Tuple of <code>torch.FloatTensor</code> with up to <code>max_new_tokens</code> elements (one element for each generated token), with each tensor of shape <code>(batch_size*num_beams, config.vocab_size)</code>.`,name:"scores"},{anchor:"transformers.generation_utils.BeamSearchEncoderDecoderOutput.beam_indices",description:`<strong>beam_indices</strong> (<code>tuple(tuple(torch.LongTensor))</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam indices of generated token id at each generation step. <code>torch.LongTensor</code> of shape <code>(batch_size*num_return_sequences, max_length-1)</code>.`,name:"beam_indices"},{anchor:"transformers.generation_utils.BeamSearchEncoderDecoderOutput.attentions",description:"<strong>attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014;",name:"attentions"},{anchor:"transformers.generation_utils.BeamSearchEncoderDecoderOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer of the decoder) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.`,name:"encoder_attentions"},{anchor:"transformers.generation_utils.BeamSearchEncoderDecoderOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size*num_beams*num_return_sequences, sequence_length, hidden_size)</code>.`,name:"encoder_hidden_states"},{anchor:"transformers.generation_utils.BeamSearchEncoderDecoderOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams*num_return_sequences, num_heads, generated_length, sequence_length)</code>.`,name:"decoder_attentions"},{anchor:"transformers.generation_utils.BeamSearchEncoderDecoderOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, num_heads, generated_length, sequence_length)</code>.`,name:"cross_attentions"},{anchor:"transformers.generation_utils.BeamSearchEncoderDecoderOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)</code>.`,name:"decoder_hidden_states"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L249"}}),kt=new Ae({}),xt=new v({props:{name:"class transformers.generation_utils.BeamSampleDecoderOnlyOutput",anchor:"transformers.generation_utils.BeamSampleDecoderOnlyOutput",parameters:[{name:"sequences",val:": LongTensor = None"},{name:"sequences_scores",val:": typing.Optional[torch.FloatTensor] = None"},{name:"scores",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"beam_indices",val:": typing.Optional[torch.LongTensor] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"}],parametersDescription:[{anchor:"transformers.generation_utils.BeamSampleDecoderOnlyOutput.sequences",description:`<strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size*num_return_sequences, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.`,name:"sequences"},{anchor:"transformers.generation_utils.BeamSampleDecoderOnlyOutput.sequences_scores",description:`<strong>sequences_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_return_sequence)</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Final beam scores of the generated <code>sequences</code>.`,name:"sequences_scores"},{anchor:"transformers.generation_utils.BeamSampleDecoderOnlyOutput.scores",description:`<strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam. Tuple of <code>torch.FloatTensor</code> with up to <code>max_new_tokens</code> elements (one element for each generated token), with each tensor of shape <code>(batch_size*num_beams*num_return_sequences, config.vocab_size)</code>.`,name:"scores"},{anchor:"transformers.generation_utils.BeamSampleDecoderOnlyOutput.beam_indices",description:`<strong>beam_indices</strong> (<code>tuple(tuple(torch.LongTensor))</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam indices of generated token id at each generation step. <code>torch.LongTensor</code> of shape <code>(batch_size*num_return_sequences, input_ids.shape[-1])</code>.`,name:"beam_indices"},{anchor:"transformers.generation_utils.BeamSampleDecoderOnlyOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams, num_heads, generated_length, sequence_length)</code>.`,name:"attentions"},{anchor:"transformers.generation_utils.BeamSampleDecoderOnlyOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams, generated_length, hidden_size)</code>.`,name:"hidden_states"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L300"}}),wt=new v({props:{name:"class transformers.generation_utils.BeamSampleEncoderDecoderOutput",anchor:"transformers.generation_utils.BeamSampleEncoderDecoderOutput",parameters:[{name:"sequences",val:": LongTensor = None"},{name:"sequences_scores",val:": typing.Optional[torch.FloatTensor] = None"},{name:"scores",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"beam_indices",val:": typing.Optional[torch.LongTensor] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"}],parametersDescription:[{anchor:"transformers.generation_utils.BeamSampleEncoderDecoderOutput.sequences",description:`<strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size*num_beams, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.`,name:"sequences"},{anchor:"transformers.generation_utils.BeamSampleEncoderDecoderOutput.sequences_scores",description:`<strong>sequences_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_return_sequence)</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Final beam scores of the generated <code>sequences</code>.`,name:"sequences_scores"},{anchor:"transformers.generation_utils.BeamSampleEncoderDecoderOutput.scores",description:`<strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam. Tuple of <code>torch.FloatTensor</code> with up to <code>max_new_tokens</code> elements (one element for each generated token), with each tensor of shape <code>(batch_size*num_beams, config.vocab_size)</code>).`,name:"scores"},{anchor:"transformers.generation_utils.BeamSampleEncoderDecoderOutput.beam_indices",description:`<strong>beam_indices</strong> (<code>torch.LongTensor</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam indices of generated token id at each generation step. <code>torch.LongTensor</code> of shape <code>(batch_size*num_return_sequences, max_length-1)</code>.`,name:"beam_indices"},{anchor:"transformers.generation_utils.BeamSampleEncoderDecoderOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer of the decoder) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.`,name:"encoder_attentions"},{anchor:"transformers.generation_utils.BeamSampleEncoderDecoderOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size*num_beams, sequence_length, hidden_size)</code>.`,name:"encoder_hidden_states"},{anchor:"transformers.generation_utils.BeamSampleEncoderDecoderOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams, num_heads, generated_length, sequence_length)</code>.`,name:"decoder_attentions"},{anchor:"transformers.generation_utils.BeamSampleEncoderDecoderOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, num_heads, generated_length, sequence_length)</code>.`,name:"cross_attentions"},{anchor:"transformers.generation_utils.BeamSampleEncoderDecoderOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams, generated_length, hidden_size)</code>.`,name:"decoder_hidden_states"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L335"}}),Lt=new Ae({}),Et=new v({props:{name:"class transformers.LogitsProcessor",anchor:"transformers.LogitsProcessor",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L51"}}),Pt=new v({props:{name:"__call__",anchor:"transformers.LogitsProcessor.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"}],parametersDescription:[{anchor:"transformers.LogitsProcessor.__call__.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LogitsProcessor.__call__.scores",description:`<strong>scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs &#x2014; Additional logits processor specific kwargs.`,name:"scores"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L54",returnDescription:` <p>The processed prediction scores.</p> `,returnType:` <p><code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code></p> `}}),Ft=new v({props:{name:"class transformers.LogitsProcessorList",anchor:"transformers.LogitsProcessorList",parameters:[{name:"iterable",val:" = ()"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L73"}}),Dt=new v({props:{name:"__call__",anchor:"transformers.LogitsProcessorList.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.LogitsProcessorList.__call__.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LogitsProcessorList.__call__.scores",description:`<strong>scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs &#x2014; Additional logits processor specific kwargs.`,name:"scores"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L80",returnDescription:` <p>The processed prediction scores.</p> `,returnType:` <p><code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code></p> `}}),St=new v({props:{name:"class transformers.LogitsWarper",anchor:"transformers.LogitsWarper",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L62"}}),zt=new v({props:{name:"__call__",anchor:"transformers.LogitsWarper.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"}],parametersDescription:[{anchor:"transformers.LogitsWarper.__call__.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LogitsWarper.__call__.scores",description:`<strong>scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs &#x2014; Additional logits processor specific kwargs.`,name:"scores"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L65",returnDescription:` <p>The processed prediction scores.</p> `,returnType:` <p><code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code></p> `}}),Ot=new v({props:{name:"class transformers.MinLengthLogitsProcessor",anchor:"transformers.MinLengthLogitsProcessor",parameters:[{name:"min_length",val:": int"},{name:"eos_token_id",val:": int"}],parametersDescription:[{anchor:"transformers.MinLengthLogitsProcessor.min_length",description:`<strong>min_length</strong> (<code>int</code>) &#x2014; The minimum length below which the score of <code>eos_token_id</code> is set to <code>-float(&quot;Inf&quot;)</code>.`,name:"min_length"},{anchor:"transformers.MinLengthLogitsProcessor.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L96"}}),qt=new v({props:{name:"__call__",anchor:"transformers.MinLengthLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L117"}}),Bt=new v({props:{name:"class transformers.TemperatureLogitsWarper",anchor:"transformers.TemperatureLogitsWarper",parameters:[{name:"temperature",val:": float"}],parametersDescription:[{anchor:"transformers.TemperatureLogitsWarper.temperature",description:`<strong>temperature</strong> (<code>float</code>) &#x2014; The value used to module the logits distribution.`,name:"temperature"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L124"}}),It=new v({props:{name:"__call__",anchor:"transformers.TemperatureLogitsWarper.__call__",parameters:[{name:"input_ids",val:": Tensor"},{name:"scores",val:": Tensor"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L139"}}),At=new v({props:{name:"class transformers.RepetitionPenaltyLogitsProcessor",anchor:"transformers.RepetitionPenaltyLogitsProcessor",parameters:[{name:"penalty",val:": float"}],parametersDescription:[{anchor:"transformers.RepetitionPenaltyLogitsProcessor.repetition_penalty",description:`<strong>repetition_penalty</strong> (<code>float</code>) &#x2014; The parameter for repetition penalty. 1.0 means no penalty. See <a href="https://arxiv.org/pdf/1909.05858.pdf" rel="nofollow">this paper</a> for more details.`,name:"repetition_penalty"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L144"}}),Ct=new v({props:{name:"__call__",anchor:"transformers.RepetitionPenaltyLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L160"}}),Wt=new v({props:{name:"class transformers.TopPLogitsWarper",anchor:"transformers.TopPLogitsWarper",parameters:[{name:"top_p",val:": float"},{name:"filter_value",val:": float = -inf"},{name:"min_tokens_to_keep",val:": int = 1"}],parametersDescription:[{anchor:"transformers.TopPLogitsWarper.top_p",description:`<strong>top_p</strong> (<code>float</code>) &#x2014; If set to &lt; 1, only the smallest set of most probable tokens with probabilities that add up to <code>top_p</code> or higher are kept for generation.`,name:"top_p"},{anchor:"transformers.TopPLogitsWarper.filter_value",description:`<strong>filter_value</strong> (<code>float</code>, <em>optional</em>, defaults to <code>-float(&quot;Inf&quot;)</code>) &#x2014; All filtered values will be set to this float value.`,name:"filter_value"},{anchor:"transformers.TopPLogitsWarper.min_tokens_to_keep",description:`<strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimum number of tokens that cannot be filtered.`,name:"min_tokens_to_keep"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L170"}}),Nt=new v({props:{name:"__call__",anchor:"transformers.TopPLogitsWarper.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L193"}}),Vt=new v({props:{name:"class transformers.TopKLogitsWarper",anchor:"transformers.TopKLogitsWarper",parameters:[{name:"top_k",val:": int"},{name:"filter_value",val:": float = -inf"},{name:"min_tokens_to_keep",val:": int = 1"}],parametersDescription:[{anchor:"transformers.TopKLogitsWarper.top_k",description:`<strong>top_k</strong> (<code>int</code>) &#x2014; The number of highest probability vocabulary tokens to keep for top-k-filtering.`,name:"top_k"},{anchor:"transformers.TopKLogitsWarper.filter_value",description:`<strong>filter_value</strong> (<code>float</code>, <em>optional</em>, defaults to <code>-float(&quot;Inf&quot;)</code>) &#x2014; All filtered values will be set to this float value.`,name:"filter_value"},{anchor:"transformers.TopKLogitsWarper.min_tokens_to_keep",description:`<strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimum number of tokens that cannot be filtered.`,name:"min_tokens_to_keep"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L209"}}),Mt=new v({props:{name:"__call__",anchor:"transformers.TopKLogitsWarper.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L230"}}),Gt=new v({props:{name:"class transformers.TypicalLogitsWarper",anchor:"transformers.TypicalLogitsWarper",parameters:[{name:"mass",val:": float = 0.9"},{name:"filter_value",val:": float = -inf"},{name:"min_tokens_to_keep",val:": int = 1"}],parametersDescription:[{anchor:"transformers.TypicalLogitsWarper.mass",description:`<strong>mass</strong> (<code>float</code>) &#x2014; Value of typical_p between 0 and 1 inclusive, defaults to 0.9.`,name:"mass"},{anchor:"transformers.TypicalLogitsWarper.filter_value",description:`<strong>filter_value</strong> (<code>float</code>, <em>optional</em>, defaults to <code>-float(&quot;Inf&quot;)</code>) &#x2014; All filtered values will be set to this float value.`,name:"filter_value"},{anchor:"transformers.TypicalLogitsWarper.min_tokens_to_keep",description:`<strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimum number of tokens that cannot be filtered.`,name:"min_tokens_to_keep"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L238"}}),Ht=new v({props:{name:"__call__",anchor:"transformers.TypicalLogitsWarper.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L261"}}),Rt=new v({props:{name:"class transformers.NoRepeatNGramLogitsProcessor",anchor:"transformers.NoRepeatNGramLogitsProcessor",parameters:[{name:"ngram_size",val:": int"}],parametersDescription:[{anchor:"transformers.NoRepeatNGramLogitsProcessor.ngram_size",description:`<strong>ngram_size</strong> (<code>int</code>) &#x2014; All ngrams of size <code>ngram_size</code> can only occur once.`,name:"ngram_size"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L322"}}),Ut=new v({props:{name:"__call__",anchor:"transformers.NoRepeatNGramLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L337"}}),Yt=new v({props:{name:"class transformers.NoBadWordsLogitsProcessor",anchor:"transformers.NoBadWordsLogitsProcessor",parameters:[{name:"bad_words_ids",val:": typing.List[typing.List[int]]"},{name:"eos_token_id",val:": int"}],parametersDescription:[{anchor:"transformers.NoBadWordsLogitsProcessor.bad_words_ids",description:`<strong>bad_words_ids</strong> (<code>List[List[int]]</code>) &#x2014; List of list of token ids that are not allowed to be generated. In order to get the token ids of the words that should not appear in the generated text, use <code>tokenizer(bad_words, add_prefix_space=True, add_special_tokens=False).input_ids</code>.`,name:"bad_words_ids"},{anchor:"transformers.NoBadWordsLogitsProcessor.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L389"}}),Xt=new v({props:{name:"__call__",anchor:"transformers.NoBadWordsLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L431"}}),Jt=new v({props:{name:"class transformers.PrefixConstrainedLogitsProcessor",anchor:"transformers.PrefixConstrainedLogitsProcessor",parameters:[{name:"prefix_allowed_tokens_fn",val:": typing.Callable[[int, torch.Tensor], typing.List[int]]"},{name:"num_beams",val:": int"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L517"}}),Zt=new v({props:{name:"__call__",anchor:"transformers.PrefixConstrainedLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L534"}}),eo=new v({props:{name:"class transformers.HammingDiversityLogitsProcessor",anchor:"transformers.HammingDiversityLogitsProcessor",parameters:[{name:"diversity_penalty",val:": float"},{name:"num_beams",val:": int"},{name:"num_beam_groups",val:": int"}],parametersDescription:[{anchor:"transformers.HammingDiversityLogitsProcessor.diversity_penalty",description:`<strong>diversity_penalty</strong> (<code>float</code>) &#x2014; This value is subtracted from a beam&#x2019;s score if it generates a token same as any beam from other group at a particular time. Note that <code>diversity_penalty</code> is only effective if <code>group beam search</code> is enabled.`,name:"diversity_penalty"},{anchor:"transformers.HammingDiversityLogitsProcessor.num_beams",description:`<strong>num_beams</strong> (<code>int</code>) &#x2014; Number of beams used for group beam search. See <a href="https://arxiv.org/pdf/1610.02424.pdf" rel="nofollow">this paper</a> for more details.`,name:"num_beams"},{anchor:"transformers.HammingDiversityLogitsProcessor.num_beam_groups",description:`<strong>num_beam_groups</strong> (<code>int</code>) &#x2014; Number of groups to divide <code>num_beams</code> into in order to ensure diversity among different groups of beams. See <a href="https://arxiv.org/pdf/1610.02424.pdf" rel="nofollow">this paper</a> for more details.`,name:"num_beam_groups"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L543"}}),to=new v({props:{name:"__call__",anchor:"transformers.HammingDiversityLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"},{name:"current_tokens",val:": LongTensor"},{name:"beam_group_idx",val:": int"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L574"}}),oo=new v({props:{name:"class transformers.ForcedBOSTokenLogitsProcessor",anchor:"transformers.ForcedBOSTokenLogitsProcessor",parameters:[{name:"bos_token_id",val:": int"}],parametersDescription:[{anchor:"transformers.ForcedBOSTokenLogitsProcessor.bos_token_id",description:`<strong>bos_token_id</strong> (<code>int</code>) &#x2014; The id of the token to force as the first generated token.`,name:"bos_token_id"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L603"}}),no=new v({props:{name:"__call__",anchor:"transformers.ForcedBOSTokenLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L615"}}),so=new v({props:{name:"class transformers.ForcedEOSTokenLogitsProcessor",anchor:"transformers.ForcedEOSTokenLogitsProcessor",parameters:[{name:"max_length",val:": int"},{name:"eos_token_id",val:": int"}],parametersDescription:[{anchor:"transformers.ForcedEOSTokenLogitsProcessor.max_length",description:`<strong>max_length</strong> (<code>int</code>) &#x2014; The maximum length of the sequence to be generated.`,name:"max_length"},{anchor:"transformers.ForcedEOSTokenLogitsProcessor.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached.`,name:"eos_token_id"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L624"}}),ao=new v({props:{name:"__call__",anchor:"transformers.ForcedEOSTokenLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L639"}}),io=new v({props:{name:"class transformers.InfNanRemoveLogitsProcessor",anchor:"transformers.InfNanRemoveLogitsProcessor",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L648"}}),co=new v({props:{name:"__call__",anchor:"transformers.InfNanRemoveLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L655"}}),lo=new v({props:{name:"class transformers.TFLogitsProcessor",anchor:"transformers.TFLogitsProcessor",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L53"}}),po=new v({props:{name:"__call__",anchor:"transformers.TFLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": Tensor"},{name:"scores",val:": Tensor"},{name:"cur_len",val:": int"}],parametersDescription:[{anchor:"transformers.TFLogitsProcessor.__call__.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFLogitsProcessor.__call__.scores",description:`<strong>scores</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search.`,name:"scores"},{anchor:"transformers.TFLogitsProcessor.__call__.cur_len",description:`<strong>cur_len</strong> (<code>int</code>) &#x2014; The current length of valid input sequence tokens. In the TF implementation, the input_ids&#x2019; sequence length is the maximum length generate can produce, and we need to know which of its tokens are valid. kwargs &#x2014; Additional logits processor specific kwargs.`,name:"cur_len"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L56",returnDescription:` <p>The processed prediction scores.</p> `,returnType:` <p><code>tf.Tensor</code> of shape <code>(batch_size, config.vocab_size)</code></p> `}}),mo=new v({props:{name:"class transformers.TFLogitsProcessorList",anchor:"transformers.TFLogitsProcessorList",parameters:[{name:"iterable",val:" = ()"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L75"}}),fo=new v({props:{name:"__call__",anchor:"transformers.TFLogitsProcessorList.__call__",parameters:[{name:"input_ids",val:": Tensor"},{name:"scores",val:": Tensor"},{name:"cur_len",val:": int"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.TFLogitsProcessorList.__call__.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFLogitsProcessorList.__call__.scores",description:`<strong>scores</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search.`,name:"scores"},{anchor:"transformers.TFLogitsProcessorList.__call__.cur_len",description:`<strong>cur_len</strong> (<code>int</code>) &#x2014; The current length of valid input sequence tokens. In the TF implementation, the input_ids&#x2019; sequence length is the maximum length generate can produce, and we need to know which of its tokens are valid. kwargs &#x2014; Additional logits processor specific kwargs.`,name:"cur_len"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L82",returnDescription:` <p>The processed prediction scores.</p> `,returnType:` <p><code>tf.Tensor</code> of shape <code>(batch_size, config.vocab_size)</code></p> `}}),go=new v({props:{name:"class transformers.TFLogitsWarper",anchor:"transformers.TFLogitsWarper",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L64"}}),uo=new v({props:{name:"__call__",anchor:"transformers.TFLogitsWarper.__call__",parameters:[{name:"input_ids",val:": Tensor"},{name:"scores",val:": Tensor"},{name:"cur_len",val:": int"}],parametersDescription:[{anchor:"transformers.TFLogitsWarper.__call__.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFLogitsWarper.__call__.scores",description:`<strong>scores</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search.`,name:"scores"},{anchor:"transformers.TFLogitsWarper.__call__.cur_len",description:`<strong>cur_len</strong> (<code>int</code>) &#x2014; The current length of valid input sequence tokens. In the TF implementation, the input_ids&#x2019; sequence length is the maximum length generate can produce, and we need to know which of its tokens are valid. kwargs &#x2014; Additional logits processor specific kwargs.`,name:"cur_len"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L67",returnDescription:` <p>The processed prediction scores.</p> `,returnType:` <p><code>tf.Tensor</code> of shape <code>(batch_size, config.vocab_size)</code></p> `}}),ho=new v({props:{name:"class transformers.TFTemperatureLogitsWarper",anchor:"transformers.TFTemperatureLogitsWarper",parameters:[{name:"temperature",val:": float"}],parametersDescription:[{anchor:"transformers.TFTemperatureLogitsWarper.temperature",description:`<strong>temperature</strong> (<code>float</code>) &#x2014; The value used to module the logits distribution.`,name:"temperature"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L98"}}),_o=new v({props:{name:"__call__",anchor:"transformers.TFTemperatureLogitsWarper.__call__",parameters:[{name:"input_ids",val:": Tensor"},{name:"scores",val:": Tensor"},{name:"cur_len",val:": int"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L113"}}),bo=new v({props:{name:"class transformers.TFTopPLogitsWarper",anchor:"transformers.TFTopPLogitsWarper",parameters:[{name:"top_p",val:": float"},{name:"filter_value",val:": float = -inf"},{name:"min_tokens_to_keep",val:": int = 1"}],parametersDescription:[{anchor:"transformers.TFTopPLogitsWarper.top_p",description:`<strong>top_p</strong> (<code>float</code>) &#x2014; If set to &lt; 1, only the smallest set of most probable tokens with probabilities that add up to <code>top_p</code> or higher are kept for generation.`,name:"top_p"},{anchor:"transformers.TFTopPLogitsWarper.filter_value",description:`<strong>filter_value</strong> (<code>float</code>, <em>optional</em>, defaults to <code>-float(&quot;Inf&quot;)</code>) &#x2014; All filtered values will be set to this float value.`,name:"filter_value"},{anchor:"transformers.TFTopPLogitsWarper.min_tokens_to_keep",description:`<strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimum number of tokens that cannot be filtered.`,name:"min_tokens_to_keep"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L147"}}),vo=new v({props:{name:"__call__",anchor:"transformers.TFTopPLogitsWarper.__call__",parameters:[{name:"input_ids",val:": Tensor"},{name:"scores",val:": Tensor"},{name:"cur_len",val:": int"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L169"}}),$o=new v({props:{name:"class transformers.TFTopKLogitsWarper",anchor:"transformers.TFTopKLogitsWarper",parameters:[{name:"top_k",val:": int"},{name:"filter_value",val:": float = -inf"},{name:"min_tokens_to_keep",val:": int = 1"}],parametersDescription:[{anchor:"transformers.TFTopKLogitsWarper.top_k",description:`<strong>top_k</strong> (<code>int</code>) &#x2014; The number of highest probability vocabulary tokens to keep for top-k-filtering.`,name:"top_k"},{anchor:"transformers.TFTopKLogitsWarper.filter_value",description:`<strong>filter_value</strong> (<code>float</code>, <em>optional</em>, defaults to <code>-float(&quot;Inf&quot;)</code>) &#x2014; All filtered values will be set to this float value.`,name:"filter_value"},{anchor:"transformers.TFTopKLogitsWarper.min_tokens_to_keep",description:`<strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimum number of tokens that cannot be filtered.`,name:"min_tokens_to_keep"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L118"}}),To=new v({props:{name:"__call__",anchor:"transformers.TFTopKLogitsWarper.__call__",parameters:[{name:"input_ids",val:": Tensor"},{name:"scores",val:": Tensor"},{name:"cur_len",val:": int"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L139"}}),yo=new v({props:{name:"class transformers.TFMinLengthLogitsProcessor",anchor:"transformers.TFMinLengthLogitsProcessor",parameters:[{name:"min_length",val:": int"},{name:"eos_token_id",val:": int"}],parametersDescription:[{anchor:"transformers.TFMinLengthLogitsProcessor.min_length",description:`<strong>min_length</strong> (<code>int</code>) &#x2014; The minimum length below which the score of <code>eos_token_id</code> is set to <code>-float(&quot;Inf&quot;)</code>.`,name:"min_length"},{anchor:"transformers.TFMinLengthLogitsProcessor.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L201"}}),ko=new v({props:{name:"__call__",anchor:"transformers.TFMinLengthLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": Tensor"},{name:"scores",val:": Tensor"},{name:"cur_len",val:": int"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L227"}}),xo=new v({props:{name:"class transformers.TFNoBadWordsLogitsProcessor",anchor:"transformers.TFNoBadWordsLogitsProcessor",parameters:[{name:"bad_words_ids",val:": typing.List[typing.List[int]]"},{name:"eos_token_id",val:": int"}],parametersDescription:[{anchor:"transformers.TFNoBadWordsLogitsProcessor.bad_words_ids",description:`<strong>bad_words_ids</strong> (<code>List[List[int]]</code>) &#x2014; List of list of token ids that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use <code>tokenizer(bad_word, add_prefix_space=True).input_ids</code>.`,name:"bad_words_ids"},{anchor:"transformers.TFNoBadWordsLogitsProcessor.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L287"}}),wo=new v({props:{name:"__call__",anchor:"transformers.TFNoBadWordsLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": Tensor"},{name:"scores",val:": Tensor"},{name:"cur_len",val:": int"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L364"}}),Lo=new v({props:{name:"class transformers.TFNoRepeatNGramLogitsProcessor",anchor:"transformers.TFNoRepeatNGramLogitsProcessor",parameters:[{name:"ngram_size",val:": int"}],parametersDescription:[{anchor:"transformers.TFNoRepeatNGramLogitsProcessor.ngram_size",description:`<strong>ngram_size</strong> (<code>int</code>) &#x2014; All ngrams of size <code>ngram_size</code> can only occur once.`,name:"ngram_size"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L385"}}),Po=new v({props:{name:"__call__",anchor:"transformers.TFNoRepeatNGramLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": Tensor"},{name:"scores",val:": Tensor"},{name:"cur_len",val:": int"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L424"}}),Fo=new v({props:{name:"class transformers.TFRepetitionPenaltyLogitsProcessor",anchor:"transformers.TFRepetitionPenaltyLogitsProcessor",parameters:[{name:"penalty",val:": float"}],parametersDescription:[{anchor:"transformers.TFRepetitionPenaltyLogitsProcessor.repetition_penalty",description:`<strong>repetition_penalty</strong> (<code>float</code>) &#x2014; The parameter for repetition penalty. 1.0 means no penalty. See <a href="https://arxiv.org/pdf/1909.05858.pdf" rel="nofollow">this paper</a> for more details.`,name:"repetition_penalty"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L237"}}),Do=new v({props:{name:"__call__",anchor:"transformers.TFRepetitionPenaltyLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": Tensor"},{name:"scores",val:": Tensor"},{name:"cur_len",val:": int"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L279"}}),So=new v({props:{name:"class transformers.TFForcedBOSTokenLogitsProcessor",anchor:"transformers.TFForcedBOSTokenLogitsProcessor",parameters:[{name:"bos_token_id",val:": int"}],parametersDescription:[{anchor:"transformers.TFForcedBOSTokenLogitsProcessor.bos_token_id",description:`<strong>bos_token_id</strong> (<code>int</code>) &#x2014; The id of the token to force as the first generated token.`,name:"bos_token_id"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L446"}}),zo=new v({props:{name:"__call__",anchor:"transformers.TFForcedBOSTokenLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": Tensor"},{name:"scores",val:": Tensor"},{name:"cur_len",val:": int"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L460"}}),Oo=new v({props:{name:"class transformers.TFForcedEOSTokenLogitsProcessor",anchor:"transformers.TFForcedEOSTokenLogitsProcessor",parameters:[{name:"max_length",val:": int"},{name:"eos_token_id",val:": int"}],parametersDescription:[{anchor:"transformers.TFForcedEOSTokenLogitsProcessor.max_length",description:`<strong>max_length</strong> (<code>int</code>) &#x2014; The maximum length of the sequence to be generated.`,name:"max_length"},{anchor:"transformers.TFForcedEOSTokenLogitsProcessor.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached.`,name:"eos_token_id"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L476"}}),qo=new v({props:{name:"__call__",anchor:"transformers.TFForcedEOSTokenLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": Tensor"},{name:"scores",val:": Tensor"},{name:"cur_len",val:": int"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L493"}}),Bo=new v({props:{name:"class transformers.FlaxLogitsProcessor",anchor:"transformers.FlaxLogitsProcessor",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L50"}}),Io=new v({props:{name:"__call__",anchor:"transformers.FlaxLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"scores",val:": ndarray"}],parametersDescription:[{anchor:"transformers.FlaxLogitsProcessor.__call__.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxLogitsProcessor.__call__.scores",description:`<strong>scores</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs &#x2014; Additional logits processor specific kwargs.`,name:"scores"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L53",returnDescription:` <p>The processed prediction scores.</p> `,returnType:` <p><code>jnp.ndarray</code> of shape <code>(batch_size, config.vocab_size)</code></p> `}}),Ao=new v({props:{name:"class transformers.FlaxLogitsProcessorList",anchor:"transformers.FlaxLogitsProcessorList",parameters:[{name:"iterable",val:" = ()"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L72"}}),Co=new v({props:{name:"__call__",anchor:"transformers.FlaxLogitsProcessorList.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"scores",val:": ndarray"},{name:"cur_len",val:": int"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.FlaxLogitsProcessorList.__call__.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxLogitsProcessorList.__call__.scores",description:`<strong>scores</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs &#x2014; Additional logits processor specific kwargs.`,name:"scores"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L79",returnDescription:` <p>The processed prediction scores.</p> `,returnType:` <p><code>jnp.ndarray</code> of shape <code>(batch_size, config.vocab_size)</code></p> `}}),Wo=new v({props:{name:"class transformers.FlaxLogitsWarper",anchor:"transformers.FlaxLogitsWarper",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L61"}}),No=new v({props:{name:"__call__",anchor:"transformers.FlaxLogitsWarper.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"scores",val:": ndarray"}],parametersDescription:[{anchor:"transformers.FlaxLogitsWarper.__call__.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxLogitsWarper.__call__.scores",description:`<strong>scores</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs &#x2014; Additional logits processor specific kwargs.`,name:"scores"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L64",returnDescription:` <p>The processed prediction scores.</p> `,returnType:` <p><code>jnp.ndarray</code> of shape <code>(batch_size, config.vocab_size)</code></p> `}}),Vo=new v({props:{name:"class transformers.FlaxTemperatureLogitsWarper",anchor:"transformers.FlaxTemperatureLogitsWarper",parameters:[{name:"temperature",val:": float"}],parametersDescription:[{anchor:"transformers.FlaxTemperatureLogitsWarper.temperature",description:`<strong>temperature</strong> (<code>float</code>) &#x2014; The value used to module the logits distribution.`,name:"temperature"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L95"}}),Mo=new v({props:{name:"__call__",anchor:"transformers.FlaxTemperatureLogitsWarper.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"scores",val:": ndarray"},{name:"cur_len",val:": int"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L110"}}),Go=new v({props:{name:"class transformers.FlaxTopPLogitsWarper",anchor:"transformers.FlaxTopPLogitsWarper",parameters:[{name:"top_p",val:": float"},{name:"filter_value",val:": float = -inf"},{name:"min_tokens_to_keep",val:": int = 1"}],parametersDescription:[{anchor:"transformers.FlaxTopPLogitsWarper.top_p",description:`<strong>top_p</strong> (<code>float</code>) &#x2014; If set to &lt; 1, only the smallest set of most probable tokens with probabilities that add up to <code>top_p</code> or higher are kept for generation.`,name:"top_p"},{anchor:"transformers.FlaxTopPLogitsWarper.filter_value",description:`<strong>filter_value</strong> (<code>float</code>, <em>optional</em>, defaults to <code>-float(&quot;Inf&quot;)</code>) &#x2014; All filtered values will be set to this float value.`,name:"filter_value"},{anchor:"transformers.FlaxTopPLogitsWarper.min_tokens_to_keep",description:`<strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimum number of tokens that cannot be filtered.`,name:"min_tokens_to_keep"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L115"}}),jo=new v({props:{name:"__call__",anchor:"transformers.FlaxTopPLogitsWarper.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"scores",val:": ndarray"},{name:"cur_len",val:": int"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L137"}}),Ho=new v({props:{name:"class transformers.FlaxTopKLogitsWarper",anchor:"transformers.FlaxTopKLogitsWarper",parameters:[{name:"top_k",val:": int"},{name:"filter_value",val:": float = -inf"},{name:"min_tokens_to_keep",val:": int = 1"}],parametersDescription:[{anchor:"transformers.FlaxTopKLogitsWarper.top_k",description:`<strong>top_k</strong> (<code>int</code>) &#x2014; The number of highest probability vocabulary tokens to keep for top-k-filtering.`,name:"top_k"},{anchor:"transformers.FlaxTopKLogitsWarper.filter_value",description:`<strong>filter_value</strong> (<code>float</code>, <em>optional</em>, defaults to <code>-float(&quot;Inf&quot;)</code>) &#x2014; All filtered values will be set to this float value.`,name:"filter_value"},{anchor:"transformers.FlaxTopKLogitsWarper.min_tokens_to_keep",description:`<strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimum number of tokens that cannot be filtered.`,name:"min_tokens_to_keep"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L157"}}),Ro=new v({props:{name:"__call__",anchor:"transformers.FlaxTopKLogitsWarper.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"scores",val:": ndarray"},{name:"cur_len",val:": int"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L178"}}),Ko=new v({props:{name:"class transformers.FlaxForcedBOSTokenLogitsProcessor",anchor:"transformers.FlaxForcedBOSTokenLogitsProcessor",parameters:[{name:"bos_token_id",val:": int"}],parametersDescription:[{anchor:"transformers.FlaxForcedBOSTokenLogitsProcessor.bos_token_id",description:`<strong>bos_token_id</strong> (<code>int</code>) &#x2014; The id of the token to force as the first generated token.`,name:"bos_token_id"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L193"}}),Uo=new v({props:{name:"__call__",anchor:"transformers.FlaxForcedBOSTokenLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"scores",val:": ndarray"},{name:"cur_len",val:": int"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L205"}}),Yo=new v({props:{name:"class transformers.FlaxForcedEOSTokenLogitsProcessor",anchor:"transformers.FlaxForcedEOSTokenLogitsProcessor",parameters:[{name:"max_length",val:": int"},{name:"eos_token_id",val:": int"}],parametersDescription:[{anchor:"transformers.FlaxForcedEOSTokenLogitsProcessor.max_length",description:`<strong>max_length</strong> (<code>int</code>) &#x2014; The maximum length of the sequence to be generated.`,name:"max_length"},{anchor:"transformers.FlaxForcedEOSTokenLogitsProcessor.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached.`,name:"eos_token_id"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L215"}}),Xo=new v({props:{name:"__call__",anchor:"transformers.FlaxForcedEOSTokenLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"scores",val:": ndarray"},{name:"cur_len",val:": int"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L230"}}),Jo=new v({props:{name:"class transformers.FlaxMinLengthLogitsProcessor",anchor:"transformers.FlaxMinLengthLogitsProcessor",parameters:[{name:"min_length",val:": int"},{name:"eos_token_id",val:": int"}],parametersDescription:[{anchor:"transformers.FlaxMinLengthLogitsProcessor.min_length",description:`<strong>min_length</strong> (<code>int</code>) &#x2014; The minimum length below which the score of <code>eos_token_id</code> is set to <code>-float(&quot;Inf&quot;)</code>.`,name:"min_length"},{anchor:"transformers.FlaxMinLengthLogitsProcessor.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L240"}}),Qo=new v({props:{name:"__call__",anchor:"transformers.FlaxMinLengthLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"scores",val:": ndarray"},{name:"cur_len",val:": int"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L261"}}),Zo=new Ae({}),en=new v({props:{name:"class transformers.StoppingCriteria",anchor:"transformers.StoppingCriteria",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_stopping_criteria.py#L33"}}),rn=new v({props:{name:"__call__",anchor:"transformers.StoppingCriteria.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.StoppingCriteria.__call__.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.StoppingCriteria.__call__.scores",description:`<strong>scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs &#x2014; Additional stopping criteria specific kwargs.`,name:"scores"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_stopping_criteria.py#L36",returnDescription:` <p><code>bool</code>. <code>False</code> indicates we should continue, <code>True</code> indicates we should stop.</p> `}}),tn=new v({props:{name:"class transformers.StoppingCriteriaList",anchor:"transformers.StoppingCriteriaList",parameters:[{name:"iterable",val:" = ()"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_stopping_criteria.py#L110"}}),on=new v({props:{name:"__call__",anchor:"transformers.StoppingCriteriaList.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.StoppingCriteriaList.__call__.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.StoppingCriteriaList.__call__.scores",description:`<strong>scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs &#x2014; Additional stopping criteria specific kwargs.`,name:"scores"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_stopping_criteria.py#L111",returnDescription:` <p><code>bool</code>. <code>False</code> indicates we should continue, <code>True</code> indicates we should stop.</p> `}}),nn=new v({props:{name:"class transformers.MaxLengthCriteria",anchor:"transformers.MaxLengthCriteria",parameters:[{name:"max_length",val:": int"}],parametersDescription:[{anchor:"transformers.MaxLengthCriteria.max_length",description:`<strong>max_length</strong> (<code>int</code>) &#x2014; The maximum length that the output sequence can have in number of tokens.`,name:"max_length"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_stopping_criteria.py#L41"}}),an=new v({props:{name:"__call__",anchor:"transformers.MaxLengthCriteria.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.MaxLengthCriteria.__call__.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MaxLengthCriteria.__call__.scores",description:`<strong>scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs &#x2014; Additional stopping criteria specific kwargs.`,name:"scores"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_stopping_criteria.py#L54",returnDescription:` <p><code>bool</code>. <code>False</code> indicates we should continue, <code>True</code> indicates we should stop.</p> `}}),cn=new v({props:{name:"class transformers.MaxTimeCriteria",anchor:"transformers.MaxTimeCriteria",parameters:[{name:"max_time",val:": float"},{name:"initial_timestamp",val:": typing.Optional[float] = None"}],parametersDescription:[{anchor:"transformers.MaxTimeCriteria.max_time",description:`<strong>max_time</strong> (<code>float</code>) &#x2014; The maximum allowed time in seconds for the generation.`,name:"max_time"},{anchor:"transformers.MaxTimeCriteria.initial_time",description:`<strong>initial_time</strong> (<code>float</code>, <em>optional</em>, defaults to <code>time.time()</code>) &#x2014; The start of the generation allowed time.`,name:"initial_time"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_stopping_criteria.py#L88"}}),dn=new v({props:{name:"__call__",anchor:"transformers.MaxTimeCriteria.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.MaxTimeCriteria.__call__.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MaxTimeCriteria.__call__.scores",description:`<strong>scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs &#x2014; Additional stopping criteria specific kwargs.`,name:"scores"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_stopping_criteria.py#L105",returnDescription:` <p><code>bool</code>. <code>False</code> indicates we should continue, <code>True</code> indicates we should stop.</p> `}}),pn=new Ae({}),mn=new v({props:{name:"class transformers.Constraint",anchor:"transformers.Constraint",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_constraints.py#L5"}}),jr=new D0({props:{anchor:"transformers.Constraint.example",$$slots:{default:[S0]},$$scope:{ctx:kd}}}),fn=new v({props:{name:"advance",anchor:"transformers.Constraint.advance",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_constraints.py#L48",returnDescription:` <p>Must be a tensor of a list of indexable tokens, not some integer.</p> `,returnType:` <p>token_ids(<code>torch.tensor</code>)</p> `}}),gn=new v({props:{name:"copy",anchor:"transformers.Constraint.copy",parameters:[{name:"stateful",val:" = False"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_constraints.py#L113",returnDescription:` <p>The same constraint as the one being called from.</p> `,returnType:` <p>constraint(<code>Constraint</code>)</p> `}}),un=new v({props:{name:"does_advance",anchor:"transformers.Constraint.does_advance",parameters:[{name:"token_id",val:": int"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_constraints.py#L60"}}),hn=new v({props:{name:"remaining",anchor:"transformers.Constraint.remaining",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_constraints.py#L104"}}),bn=new v({props:{name:"reset",anchor:"transformers.Constraint.reset",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_constraints.py#L94"}}),vn=new v({props:{name:"test",anchor:"transformers.Constraint.test",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_constraints.py#L24"}}),$n=new v({props:{name:"update",anchor:"transformers.Constraint.update",parameters:[{name:"token_id",val:": int"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_constraints.py#L69",returnDescription:` <p>Whether this constraint has become one step closer to being fulfuilled. completed(<code>bool</code>): Whether this constraint has been completely fulfilled by this token being generated. reset (<code>bool</code>): Whether this constraint has reset its progress by this token being generated.</p> `,returnType:` <p>stepped(<code>bool</code>)</p> `}}),yn=new v({props:{name:"class transformers.PhrasalConstraint",anchor:"transformers.PhrasalConstraint",parameters:[{name:"token_ids",val:": typing.List[int]"}],parametersDescription:[{anchor:"transformers.PhrasalConstraint.token_ids",description:`<strong>token_ids</strong> (<code>List[int]</code>) &#x2014; The id of the token that must be generated by the output.`,name:"token_ids"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_constraints.py#L129"}}),kn=new v({props:{name:"class transformers.DisjunctiveConstraint",anchor:"transformers.DisjunctiveConstraint",parameters:[{name:"nested_token_ids",val:": typing.List[typing.List[int]]"}],parametersDescription:[{anchor:"transformers.DisjunctiveConstraint.nested_token_ids",description:"<strong>nested_token_ids</strong> (<code>List[List[int]]</code>) &#x2014; a list of words, where each word is a list of ids. This constraint",name:"nested_token_ids"},{anchor:"transformers.DisjunctiveConstraint.is",description:"<strong>is</strong> fulfilled by generating just one from the list of words. &#x2014;",name:"is"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_constraints.py#L261"}}),wn=new v({props:{name:"class transformers.ConstraintListState",anchor:"transformers.ConstraintListState",parameters:[{name:"constraints",val:": typing.List[transformers.generation_beam_constraints.Constraint]"}],parametersDescription:[{anchor:"transformers.ConstraintListState.constraints",description:`<strong>constraints</strong> (<code>List[Constraint]</code>) &#x2014; A list of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.Constraint">Constraint</a> objects that must be fulfilled by the beam scorer.`,name:"constraints"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_constraints.py#L350"}}),Ln=new v({props:{name:"advance",anchor:"transformers.ConstraintListState.advance",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_constraints.py#L382"}}),En=new v({props:{name:"reset",anchor:"transformers.ConstraintListState.reset",parameters:[{name:"token_ids",val:": typing.Optional[typing.List[int]]"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_constraints.py#L417"}}),Pn=new Ae({}),Fn=new v({props:{name:"class transformers.BeamScorer",anchor:"transformers.BeamScorer",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_search.py#L88"}}),Dn=new v({props:{name:"process",anchor:"transformers.BeamScorer.process",parameters:[{name:"input_ids",val:": LongTensor"},{name:"next_scores",val:": FloatTensor"},{name:"next_tokens",val:": LongTensor"},{name:"next_indices",val:": LongTensor"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.BeamScorer.process.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * num_beams, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using any class inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BeamScorer.process.next_scores",description:`<strong>next_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2 * num_beams)</code>) &#x2014; Current scores of the top <code>2 * num_beams</code> non-finished beam hypotheses.`,name:"next_scores"},{anchor:"transformers.BeamScorer.process.next_tokens",description:`<strong>next_tokens</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, 2 * num_beams)</code>) &#x2014; <code>input_ids</code> of the tokens corresponding to the top <code>2 * num_beams</code> non-finished beam hypotheses.`,name:"next_tokens"},{anchor:"transformers.BeamScorer.process.next_indices",description:`<strong>next_indices</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, 2 * num_beams)</code>) &#x2014; Beam indices indicating to which beam hypothesis the <code>next_tokens</code> correspond.`,name:"next_indices"},{anchor:"transformers.BeamScorer.process.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.`,name:"pad_token_id"},{anchor:"transformers.BeamScorer.process.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_search.py#L94",returnDescription:` <p>A dictionary composed of the fields as defined above:</p> <ul> <li><strong>next_beam_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) \u2014 Updated scores of all non-finished beams.</li> <li><strong>next_beam_tokens</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) \u2014 Next tokens to be added to the non-finished beam_hypotheses.</li> <li><strong>next_beam_indices</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) \u2014 Beam indices indicating to which beam the next tokens shall be added.</li> </ul> `,returnType:` <p><code>UserDict</code></p> `}}),Sn=new v({props:{name:"finalize",anchor:"transformers.BeamScorer.finalize",parameters:[{name:"input_ids",val:": LongTensor"},{name:"next_scores",val:": FloatTensor"},{name:"next_tokens",val:": LongTensor"},{name:"next_indices",val:": LongTensor"},{name:"max_length",val:": int"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.BeamScorer.finalize.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * num_beams, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using any class inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BeamScorer.finalize.final_beam_scores",description:`<strong>final_beam_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) &#x2014; The final scores of all non-finished beams.`,name:"final_beam_scores"},{anchor:"transformers.BeamScorer.finalize.final_beam_tokens",description:`<strong>final_beam_tokens</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) &#x2014; The last tokens to be added to the non-finished beam_hypotheses.`,name:"final_beam_tokens"},{anchor:"transformers.BeamScorer.finalize.final_beam_indices",description:`<strong>final_beam_indices</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) &#x2014; The beam indices indicating to which beam the <code>final_beam_tokens</code> shall be added.`,name:"final_beam_indices"},{anchor:"transformers.BeamScorer.finalize.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.`,name:"pad_token_id"},{anchor:"transformers.BeamScorer.finalize.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_search.py#L106",returnDescription:` <p>The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.</p> `,returnType:` <p><code>torch.LongTensor</code> of shape <code>(batch_size * num_return_sequences, sequence_length)</code></p> `}}),zn=new v({props:{name:"class transformers.BeamSearchScorer",anchor:"transformers.BeamSearchScorer",parameters:[{name:"batch_size",val:": int"},{name:"num_beams",val:": int"},{name:"device",val:": device"},{name:"length_penalty",val:": typing.Optional[float] = 1.0"},{name:"do_early_stopping",val:": typing.Optional[bool] = False"},{name:"num_beam_hyps_to_keep",val:": typing.Optional[int] = 1"},{name:"num_beam_groups",val:": typing.Optional[int] = 1"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.BeamSearchScorer.batch_size",description:`<strong>batch_size</strong> (<code>int</code>) &#x2014; Batch Size of <code>input_ids</code> for which standard beam search decoding is run in parallel.`,name:"batch_size"},{anchor:"transformers.BeamSearchScorer.max_length",description:`<strong>max_length</strong> (<code>int</code>) &#x2014; The maximum length of the sequence to be generated.`,name:"max_length"},{anchor:"transformers.BeamSearchScorer.num_beams",description:`<strong>num_beams</strong> (<code>int</code>) &#x2014; Number of beams for beam search.`,name:"num_beams"},{anchor:"transformers.BeamSearchScorer.device",description:`<strong>device</strong> (<code>torch.device</code>) &#x2014; Defines the device type (<em>e.g.</em>, <code>&quot;cpu&quot;</code> or <code>&quot;cuda&quot;</code>) on which this instance of <code>BeamSearchScorer</code> will be allocated.`,name:"device"},{anchor:"transformers.BeamSearchScorer.length_penalty",description:`<strong>length_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log likelihood of the sequence (i.e. negative), <code>length_penalty</code> &gt; 0.0 promotes longer sequences, while <code>length_penalty</code> &lt; 0.0 encourages shorter sequences.`,name:"length_penalty"},{anchor:"transformers.BeamSearchScorer.do_early_stopping",description:`<strong>do_early_stopping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to stop the beam search when at least <code>num_beams</code> sentences are finished per batch or not.`,name:"do_early_stopping"},{anchor:"transformers.BeamSearchScorer.num_beam_hyps_to_keep",description:`<strong>num_beam_hyps_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The number of beam hypotheses that shall be returned upon calling <code>~transformer.BeamSearchScorer.finalize</code>.`,name:"num_beam_hyps_to_keep"},{anchor:"transformers.BeamSearchScorer.num_beam_groups",description:`<strong>num_beam_groups</strong> (<code>int</code>) &#x2014; Number of groups to divide <code>num_beams</code> into in order to ensure diversity among different groups of beams. See <a href="https://arxiv.org/pdf/1610.02424.pdf" rel="nofollow">this paper</a> for more details.`,name:"num_beam_groups"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_search.py#L120"}}),In=new v({props:{name:"process",anchor:"transformers.BeamSearchScorer.process",parameters:[{name:"input_ids",val:": LongTensor"},{name:"next_scores",val:": FloatTensor"},{name:"next_tokens",val:": LongTensor"},{name:"next_indices",val:": LongTensor"},{name:"pad_token_id",val:": typing.Optional[int] = None"},{name:"eos_token_id",val:": typing.Optional[int] = None"},{name:"beam_indices",val:": typing.Optional[torch.LongTensor] = None"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_search.py#L208"}}),An=new v({props:{name:"finalize",anchor:"transformers.BeamSearchScorer.finalize",parameters:[{name:"input_ids",val:": LongTensor"},{name:"final_beam_scores",val:": FloatTensor"},{name:"final_beam_tokens",val:": LongTensor"},{name:"final_beam_indices",val:": LongTensor"},{name:"max_length",val:": int"},{name:"pad_token_id",val:": typing.Optional[int] = None"},{name:"eos_token_id",val:": typing.Optional[int] = None"},{name:"beam_indices",val:": typing.Optional[torch.LongTensor] = None"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_search.py#L302"}}),Cn=new v({props:{name:"class transformers.ConstrainedBeamSearchScorer",anchor:"transformers.ConstrainedBeamSearchScorer",parameters:[{name:"batch_size",val:": int"},{name:"num_beams",val:": int"},{name:"constraints",val:": typing.List[transformers.generation_beam_constraints.Constraint]"},{name:"device",val:": device"},{name:"length_penalty",val:": typing.Optional[float] = 1.0"},{name:"do_early_stopping",val:": typing.Optional[bool] = False"},{name:"num_beam_hyps_to_keep",val:": typing.Optional[int] = 1"},{name:"num_beam_groups",val:": typing.Optional[int] = 1"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.ConstrainedBeamSearchScorer.batch_size",description:`<strong>batch_size</strong> (<code>int</code>) &#x2014; Batch Size of <code>input_ids</code> for which standard beam search decoding is run in parallel.`,name:"batch_size"},{anchor:"transformers.ConstrainedBeamSearchScorer.max_length",description:`<strong>max_length</strong> (<code>int</code>) &#x2014; The maximum length of the sequence to be generated.`,name:"max_length"},{anchor:"transformers.ConstrainedBeamSearchScorer.num_beams",description:`<strong>num_beams</strong> (<code>int</code>) &#x2014; Number of beams for beam search.`,name:"num_beams"},{anchor:"transformers.ConstrainedBeamSearchScorer.constraints",description:`<strong>constraints</strong> (<code>List[Constraint]</code>) &#x2014; A list of positive constraints represented as <code>Constraint</code> objects that must be fulfilled in the generation output. For more information, the documentation of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.Constraint">Constraint</a> should be read.`,name:"constraints"},{anchor:"transformers.ConstrainedBeamSearchScorer.device",description:`<strong>device</strong> (<code>torch.device</code>) &#x2014; Defines the device type (<em>e.g.</em>, <code>&quot;cpu&quot;</code> or <code>&quot;cuda&quot;</code>) on which this instance of <code>BeamSearchScorer</code> will be allocated.`,name:"device"},{anchor:"transformers.ConstrainedBeamSearchScorer.length_penalty",description:`<strong>length_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log likelihood of the sequence (i.e. negative), <code>length_penalty</code> &gt; 0.0 promotes longer sequences, while <code>length_penalty</code> &lt; 0.0 encourages shorter sequences.`,name:"length_penalty"},{anchor:"transformers.ConstrainedBeamSearchScorer.do_early_stopping",description:`<strong>do_early_stopping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to stop the beam search when at least <code>num_beams</code> sentences are finished per batch or not.`,name:"do_early_stopping"},{anchor:"transformers.ConstrainedBeamSearchScorer.num_beam_hyps_to_keep",description:`<strong>num_beam_hyps_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The number of beam hypotheses that shall be returned upon calling <code>~transformer.BeamSearchScorer.finalize</code>.`,name:"num_beam_hyps_to_keep"},{anchor:"transformers.ConstrainedBeamSearchScorer.num_beam_groups",description:`<strong>num_beam_groups</strong> (<code>int</code>) &#x2014; Number of groups to divide <code>num_beams</code> into in order to ensure diversity among different groups of beams. See <a href="https://arxiv.org/pdf/1610.02424.pdf" rel="nofollow">this paper</a> for more details.`,name:"num_beam_groups"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_search.py#L390"}}),Wn=new v({props:{name:"process",anchor:"transformers.ConstrainedBeamSearchScorer.process",parameters:[{name:"input_ids",val:": LongTensor"},{name:"next_scores",val:": FloatTensor"},{name:"next_tokens",val:": LongTensor"},{name:"next_indices",val:": LongTensor"},{name:"scores_for_all_vocab",val:": FloatTensor"},{name:"pad_token_id",val:": typing.Optional[int] = None"},{name:"eos_token_id",val:": typing.Optional[int] = None"}],parametersDescription:[{anchor:"transformers.ConstrainedBeamSearchScorer.process.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * num_beams, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using any class inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.ConstrainedBeamSearchScorer.process.next_scores",description:`<strong>next_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2 * num_beams)</code>) &#x2014; Current scores of the top <code>2 * num_beams</code> non-finished beam hypotheses.`,name:"next_scores"},{anchor:"transformers.ConstrainedBeamSearchScorer.process.next_tokens",description:`<strong>next_tokens</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, 2 * num_beams)</code>) &#x2014; <code>input_ids</code> of the tokens corresponding to the top <code>2 * num_beams</code> non-finished beam hypotheses.`,name:"next_tokens"},{anchor:"transformers.ConstrainedBeamSearchScorer.process.next_indices",description:`<strong>next_indices</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, 2 * num_beams)</code>) &#x2014; Beam indices indicating to which beam hypothesis the <code>next_tokens</code> correspond.`,name:"next_indices"},{anchor:"transformers.ConstrainedBeamSearchScorer.process.scores_for_all_vocab",description:`<strong>scores_for_all_vocab</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams, sequence_length)</code>) &#x2014; The scores of all tokens in the vocabulary for each of the beam hypotheses.`,name:"scores_for_all_vocab"},{anchor:"transformers.ConstrainedBeamSearchScorer.process.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.`,name:"pad_token_id"},{anchor:"transformers.ConstrainedBeamSearchScorer.process.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_search.py#L486",returnDescription:` <p>A dictionary composed of the fields as defined above:</p> <ul> <li> <p><strong>next_beam_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) \u2014 Updated scores of all non-finished beams.</p> </li> <li> <p><strong>next_beam_tokens</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) \u2014 Next tokens to be added to the non-finished beam_hypotheses.</p> </li> <li> <p><strong>next_beam_indices</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) \u2014 Beam indices indicating to which beam the next tokens shall be added.</p> </li> </ul> `,returnType:` <p><code>UserDict</code></p> `}}),Nn=new v({props:{name:"finalize",anchor:"transformers.ConstrainedBeamSearchScorer.finalize",parameters:[{name:"input_ids",val:": LongTensor"},{name:"final_beam_scores",val:": FloatTensor"},{name:"final_beam_tokens",val:": LongTensor"},{name:"final_beam_indices",val:": LongTensor"},{name:"max_length",val:": int"},{name:"pad_token_id",val:": typing.Optional[int] = None"},{name:"eos_token_id",val:": typing.Optional[int] = None"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_search.py#L768"}}),Vn=new Ae({}),Mn=new v({props:{name:"transformers.top_k_top_p_filtering",anchor:"transformers.top_k_top_p_filtering",parameters:[{name:"logits",val:": FloatTensor"},{name:"top_k",val:": int = 0"},{name:"top_p",val:": float = 1.0"},{name:"filter_value",val:": float = -inf"},{name:"min_tokens_to_keep",val:": int = 1"}],parametersDescription:[{anchor:"transformers.top_k_top_p_filtering.top_k",description:`<strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If &gt; 0, only keep the top k tokens with highest probability (top-k filtering)`,name:"top_k"},{anchor:"transformers.top_k_top_p_filtering.top_p",description:`<strong>top_p</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; If &lt; 1.0, only keep the top tokens with cumulative probability &gt;= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (<a href="http://arxiv.org/abs/1904.09751" rel="nofollow">http://arxiv.org/abs/1904.09751</a>)`,name:"top_p"},{anchor:"transformers.top_k_top_p_filtering.min_tokens_to_keep",description:`<strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimumber of tokens we keep per batch example in the output.`,name:"min_tokens_to_keep"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L3416"}}),jn=new v({props:{name:"transformers.tf_top_k_top_p_filtering",anchor:"transformers.tf_top_k_top_p_filtering",parameters:[{name:"logits",val:""},{name:"top_k",val:" = 0"},{name:"top_p",val:" = 1.0"},{name:"filter_value",val:" = -inf"},{name:"min_tokens_to_keep",val:" = 1"}],parametersDescription:[{anchor:"transformers.tf_top_k_top_p_filtering.top_k",description:`<strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If &gt; 0, only keep the top k tokens with highest probability (top-k filtering)`,name:"top_k"},{anchor:"transformers.tf_top_k_top_p_filtering.top_p",description:`<strong>top_p</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; If &lt; 1.0, only keep the top tokens with cumulative probability &gt;= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (<a href="http://arxiv.org/abs/1904.09751" rel="nofollow">http://arxiv.org/abs/1904.09751</a>)`,name:"top_p"},{anchor:"transformers.tf_top_k_top_p_filtering.min_tokens_to_keep",description:`<strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimumber of tokens we keep per batch example in the output.`,name:"min_tokens_to_keep"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_utils.py#L3207"}}),{c(){w=o("meta"),Re=c(),k=o("h1"),Ce=o("a"),mc=o("span"),f(at.$$.fragment),Xm=c(),fc=o("span"),Jm=a("Utilities for Generation"),xd=c(),y=o("p"),Qm=a("This page lists all the utility functions used by "),Un=o("a"),Zm=a("generate()"),ef=a(`, `),Yn=o("a"),rf=a("greedy_search()"),tf=a(`, `),Xn=o("a"),of=a("sample()"),nf=a(`, `),Jn=o("a"),sf=a("beam_search()"),af=a(`, `),Qn=o("a"),cf=a("beam_sample()"),lf=a(`, `),Zn=o("a"),df=a("group_beam_search()"),pf=a(`, and `),es=o("a"),mf=a("constrained_beam_search()"),ff=a("."),wd=c(),rs=o("p"),gf=a("Most of those are only useful if you are studying the code of the generate methods in the library."),Ld=c(),Ke=o("h2"),_r=o("a"),gc=o("span"),f(it.$$.fragment),uf=c(),uc=o("span"),hf=a("Generate Outputs"),Ed=c(),V=o("p"),_f=a("The output of "),ts=o("a"),bf=a("generate()"),vf=a(` is an instance of a subclass of `),os=o("a"),$f=a("ModelOutput"),Tf=a(`. This output is a data structure containing all the information returned by `),ns=o("a"),yf=a("generate()"),kf=a(", but that can also be used as tuple or dictionary."),Pd=c(),ss=o("p"),xf=a("Here\u2019s an example:"),Fd=c(),f(ct.$$.fragment),Dd=c(),We=o("p"),wf=a("The "),hc=o("code"),Lf=a("generation_output"),Ef=a(" object is a "),as=o("a"),Pf=a("GreedySearchDecoderOnlyOutput"),Ff=a(`, as we can see in the documentation of that class below, it means it has the following attributes:`),Sd=c(),M=o("ul"),is=o("li"),_c=o("code"),Df=a("sequences"),Sf=a(": the generated sequences of tokens"),zf=c(),cs=o("li"),bc=o("code"),Of=a("scores"),qf=a(" (optional): the prediction scores of the language modelling head, for each generation step"),Bf=c(),ls=o("li"),vc=o("code"),If=a("hidden_states"),Af=a(" (optional): the hidden states of the model, for each generation step"),Cf=c(),ds=o("li"),$c=o("code"),Wf=a("attentions"),Nf=a(" (optional): the attention weights of the model, for each generation step"),zd=c(),x=o("p"),Vf=a("Here we have the "),Tc=o("code"),Mf=a("scores"),Gf=a(" since we passed along "),yc=o("code"),jf=a("output_scores=True"),Hf=a(", but we don\u2019t have "),kc=o("code"),Rf=a("hidden_states"),Kf=a(` and `),xc=o("code"),Uf=a("attentions"),Yf=a(" because we didn\u2019t pass "),wc=o("code"),Xf=a("output_hidden_states=True"),Jf=a(" or "),Lc=o("code"),Qf=a("output_attentions=True"),Zf=a("."),Od=c(),S=o("p"),eg=a(`You can access each attribute as you would usually do, and if that attribute has not been returned by the model, you will get `),Ec=o("code"),rg=a("None"),tg=a(". Here for instance "),Pc=o("code"),og=a("generation_output.scores"),ng=a(` are all the generated prediction scores of the language modeling head, and `),Fc=o("code"),sg=a("generation_output.attentions"),ag=a(" is "),Dc=o("code"),ig=a("None"),cg=a("."),qd=c(),z=o("p"),lg=a("When using our "),Sc=o("code"),dg=a("generation_output"),pg=a(" object as a tuple, it only keeps the attributes that don\u2019t have "),zc=o("code"),mg=a("None"),fg=a(` values. Here, for instance, it has two elements, `),Oc=o("code"),gg=a("loss"),ug=a(" then "),qc=o("code"),hg=a("logits"),_g=a(", so"),Bd=c(),f(lt.$$.fragment),Id=c(),br=o("p"),bg=a("will return the tuple "),Bc=o("code"),vg=a("(generation_output.sequences, generation_output.scores)"),$g=a(" for instance."),Ad=c(),O=o("p"),Tg=a("When using our "),Ic=o("code"),yg=a("generation_output"),kg=a(" object as a dictionary, it only keeps the attributes that don\u2019t have "),Ac=o("code"),xg=a("None"),wg=a(` values. Here, for instance, it has two keys that are `),Cc=o("code"),Lg=a("sequences"),Eg=a(" and "),Wc=o("code"),Pg=a("scores"),Fg=a("."),Cd=c(),ps=o("p"),Dg=a("We document here all output types."),Wd=c(),Ue=o("h3"),vr=o("a"),Nc=o("span"),f(dt.$$.fragment),Sg=c(),Vc=o("span"),zg=a("GreedySearchOutput"),Nd=c(),Ye=o("div"),f(pt.$$.fragment),Og=c(),Mc=o("p"),qg=a("Base class for outputs of decoder-only generation models using greedy search."),Vd=c(),Xe=o("div"),f(mt.$$.fragment),Bg=c(),Gc=o("p"),Ig=a(`Base class for outputs of encoder-decoder generation models using greedy search. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)`),Md=c(),R=o("div"),f(ft.$$.fragment),Ag=c(),jc=o("p"),Cg=a("Flax Base class for outputs of decoder-only generation models using greedy search."),Wg=c(),$r=o("div"),f(gt.$$.fragment),Ng=c(),Hc=o("p"),Vg=a("\u201CReturns a new object replacing the specified fields with new values."),Gd=c(),Je=o("h3"),Tr=o("a"),Rc=o("span"),f(ut.$$.fragment),Mg=c(),Kc=o("span"),Gg=a("SampleOutput"),jd=c(),Qe=o("div"),f(ht.$$.fragment),jg=c(),Uc=o("p"),Hg=a("Base class for outputs of decoder-only generation models using sampling."),Hd=c(),Ze=o("div"),f(_t.$$.fragment),Rg=c(),Yc=o("p"),Kg=a(`Base class for outputs of encoder-decoder generation models using sampling. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)`),Rd=c(),K=o("div"),f(bt.$$.fragment),Ug=c(),Xc=o("p"),Yg=a("Flax Base class for outputs of decoder-only generation models using sampling."),Xg=c(),yr=o("div"),f(vt.$$.fragment),Jg=c(),Jc=o("p"),Qg=a("\u201CReturns a new object replacing the specified fields with new values."),Kd=c(),er=o("h3"),kr=o("a"),Qc=o("span"),f($t.$$.fragment),Zg=c(),Zc=o("span"),eu=a("BeamSearchOutput"),Ud=c(),rr=o("div"),f(Tt.$$.fragment),ru=c(),el=o("p"),tu=a("Base class for outputs of decoder-only generation models using beam search."),Yd=c(),tr=o("div"),f(yt.$$.fragment),ou=c(),rl=o("p"),nu=a(`Base class for outputs of encoder-decoder generation models using beam search. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)`),Xd=c(),or=o("h3"),xr=o("a"),tl=o("span"),f(kt.$$.fragment),su=c(),ol=o("span"),au=a("BeamSampleOutput"),Jd=c(),nr=o("div"),f(xt.$$.fragment),iu=c(),nl=o("p"),cu=a("Base class for outputs of decoder-only generation models using beam sample."),Qd=c(),sr=o("div"),f(wt.$$.fragment),lu=c(),sl=o("p"),du=a(`Base class for outputs of encoder-decoder generation models using beam sampling. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)`),Zd=c(),ar=o("h2"),wr=o("a"),al=o("span"),f(Lt.$$.fragment),pu=c(),il=o("span"),mu=a("LogitsProcessor"),ep=c(),Lr=o("p"),fu=a("A "),ms=o("a"),gu=a("LogitsProcessor"),uu=a(` can be used to modify the prediction scores of a language model head for generation.`),rp=c(),U=o("div"),f(Et.$$.fragment),hu=c(),cl=o("p"),_u=a("Abstract base class for all logit processors that can be applied during generation."),bu=c(),Er=o("div"),f(Pt.$$.fragment),vu=c(),ll=o("p"),$u=a("Torch method for processing logits."),tp=c(),Y=o("div"),f(Ft.$$.fragment),Tu=c(),L=o("p"),yu=a("This class can be used to create a list of "),fs=o("a"),ku=a("LogitsProcessor"),xu=a(" or "),gs=o("a"),wu=a("LogitsWarper"),Lu=a(` to subsequently process a `),dl=o("code"),Eu=a("scores"),Pu=a(" input tensor. This class inherits from list and adds a specific "),pl=o("em"),ml=o("strong"),Fu=a("call"),Du=a(` method to apply each `),us=o("a"),Su=a("LogitsProcessor"),zu=a(" or "),hs=o("a"),Ou=a("LogitsWarper"),qu=a(" to the inputs."),Bu=c(),_s=o("div"),f(Dt.$$.fragment),op=c(),X=o("div"),f(St.$$.fragment),Iu=c(),fl=o("p"),Au=a("Abstract base class for all logit warpers that can be applied during generation with multinomial sampling."),Cu=c(),Pr=o("div"),f(zt.$$.fragment),Wu=c(),gl=o("p"),Nu=a("Torch method for warping logits."),np=c(),J=o("div"),f(Ot.$$.fragment),Vu=c(),bs=o("p"),vs=o("a"),Mu=a("LogitsProcessor"),Gu=a(" enforcing a min-length by setting EOS probability to 0."),ju=c(),$s=o("div"),f(qt.$$.fragment),sp=c(),Q=o("div"),f(Bt.$$.fragment),Hu=c(),Ts=o("p"),ys=o("a"),Ru=a("LogitsWarper"),Ku=a(" for temperature (exponential scaling output probability distribution)."),Uu=c(),ks=o("div"),f(It.$$.fragment),ap=c(),Z=o("div"),f(At.$$.fragment),Yu=c(),xs=o("p"),ws=o("a"),Xu=a("LogitsProcessor"),Ju=a(" enforcing an exponential penalty on repeated sequences."),Qu=c(),Ls=o("div"),f(Ct.$$.fragment),ip=c(),ee=o("div"),f(Wt.$$.fragment),Zu=c(),Es=o("p"),Ps=o("a"),eh=a("LogitsWarper"),rh=a(" that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off."),th=c(),Fs=o("div"),f(Nt.$$.fragment),cp=c(),re=o("div"),f(Vt.$$.fragment),oh=c(),Ds=o("p"),Ss=o("a"),nh=a("LogitsWarper"),sh=a(" that performs top-k, i.e. restricting to the k highest probability elements."),ah=c(),zs=o("div"),f(Mt.$$.fragment),lp=c(),te=o("div"),f(Gt.$$.fragment),ih=c(),Fr=o("p"),Os=o("a"),ch=a("LogitsWarper"),lh=a(" that performs typical decoding. See "),jt=o("a"),dh=a(`Typical Decoding for Natural Language Generation`),ph=a(" for more information."),mh=c(),qs=o("div"),f(Ht.$$.fragment),dp=c(),oe=o("div"),f(Rt.$$.fragment),fh=c(),Dr=o("p"),Bs=o("a"),gh=a("LogitsProcessor"),uh=a(` that enforces no repetition of n-grams. See `),Kt=o("a"),hh=a("Fairseq"),_h=a("."),bh=c(),Is=o("div"),f(Ut.$$.fragment),pp=c(),ne=o("div"),f(Yt.$$.fragment),vh=c(),As=o("p"),Cs=o("a"),$h=a("LogitsProcessor"),Th=a(" that enforces that specified sequences will never be sampled."),yh=c(),Ws=o("div"),f(Xt.$$.fragment),mp=c(),se=o("div"),f(Jt.$$.fragment),kh=c(),Sr=o("p"),Ns=o("a"),xh=a("LogitsProcessor"),wh=a(` that enforces constrained generation and is useful for prefix-conditioned constrained generation. See `),Qt=o("a"),Lh=a("Autoregressive Entity Retrieval"),Eh=a(" for more information."),Ph=c(),Vs=o("div"),f(Zt.$$.fragment),fp=c(),ae=o("div"),f(eo.$$.fragment),Fh=c(),Ne=o("p"),Ms=o("a"),Dh=a("LogitsProcessor"),Sh=a(` that enforces diverse beam search. Note that this logits processor is only effective for `),Gs=o("a"),zh=a("PreTrainedModel.group_beam_search()"),Oh=a(". See "),ro=o("a"),qh=a(`Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence Models`),Bh=a(" for more details."),Ih=c(),js=o("div"),f(to.$$.fragment),gp=c(),ie=o("div"),f(oo.$$.fragment),Ah=c(),Hs=o("p"),Rs=o("a"),Ch=a("LogitsProcessor"),Wh=a(" that enforces the specified token as the first generated token."),Nh=c(),Ks=o("div"),f(no.$$.fragment),up=c(),ce=o("div"),f(so.$$.fragment),Vh=c(),zr=o("p"),Us=o("a"),Mh=a("LogitsProcessor"),Gh=a(" that enforces the specified token as the last generated token when "),ul=o("code"),jh=a("max_length"),Hh=a(" is reached."),Rh=c(),Ys=o("div"),f(ao.$$.fragment),hp=c(),le=o("div"),f(io.$$.fragment),Kh=c(),G=o("p"),Xs=o("a"),Uh=a("LogitsProcessor"),Yh=a(" that removes all "),hl=o("code"),Xh=a("nan"),Jh=a(" and "),_l=o("code"),Qh=a("inf"),Zh=a(` values to avoid the generation method to fail. Note that using the logits processor should only be used if necessary since it can slow down the generation method. `),bl=o("code"),e_=a("max_length"),r_=a(` is reached.`),t_=c(),Js=o("div"),f(co.$$.fragment),_p=c(),de=o("div"),f(lo.$$.fragment),o_=c(),vl=o("p"),n_=a("Abstract base class for all logit processors that can be applied during generation."),s_=c(),Or=o("div"),f(po.$$.fragment),a_=c(),$l=o("p"),i_=a("TF method for processing logits."),bp=c(),pe=o("div"),f(mo.$$.fragment),c_=c(),A=o("p"),l_=a("This class can be used to create a list of "),Qs=o("a"),d_=a("TFLogitsProcessor"),p_=a(" to subsequently process a "),Tl=o("code"),m_=a("scores"),f_=a(` input tensor. This class inherits from list and adds a specific `),yl=o("em"),kl=o("strong"),g_=a("call"),u_=a(" method to apply each "),Zs=o("a"),h_=a("TFLogitsProcessor"),__=a(` to the inputs.`),b_=c(),ea=o("div"),f(fo.$$.fragment),vp=c(),me=o("div"),f(go.$$.fragment),v_=c(),xl=o("p"),$_=a("Abstract base class for all logit warpers that can be applied during generation with multinomial sampling."),T_=c(),qr=o("div"),f(uo.$$.fragment),y_=c(),wl=o("p"),k_=a("TF method for warping logits."),$p=c(),fe=o("div"),f(ho.$$.fragment),x_=c(),ra=o("p"),ta=o("a"),w_=a("TFLogitsWarper"),L_=a(" for temperature (exponential scaling output probability distribution)."),E_=c(),oa=o("div"),f(_o.$$.fragment),Tp=c(),ge=o("div"),f(bo.$$.fragment),P_=c(),na=o("p"),sa=o("a"),F_=a("TFLogitsWarper"),D_=a(" that performs top-p, i.e. restricting to top tokens summing to <= prob_cut_off."),S_=c(),aa=o("div"),f(vo.$$.fragment),yp=c(),ue=o("div"),f($o.$$.fragment),z_=c(),ia=o("p"),ca=o("a"),O_=a("TFLogitsWarper"),q_=a(" that performs top-k, i.e. restricting to the k highest probability elements."),B_=c(),la=o("div"),f(To.$$.fragment),kp=c(),he=o("div"),f(yo.$$.fragment),I_=c(),da=o("p"),pa=o("a"),A_=a("TFLogitsProcessor"),C_=a(" enforcing a min-length by setting EOS probability to 0."),W_=c(),ma=o("div"),f(ko.$$.fragment),xp=c(),_e=o("div"),f(xo.$$.fragment),N_=c(),fa=o("p"),ga=o("a"),V_=a("TFLogitsProcessor"),M_=a(" that enforces that specified sequences will never be sampled."),G_=c(),ua=o("div"),f(wo.$$.fragment),wp=c(),be=o("div"),f(Lo.$$.fragment),j_=c(),Br=o("p"),ha=o("a"),H_=a("TFLogitsProcessor"),R_=a(` that enforces no repetition of n-grams. See `),Eo=o("a"),K_=a("Fairseq"),U_=a("."),Y_=c(),_a=o("div"),f(Po.$$.fragment),Lp=c(),ve=o("div"),f(Fo.$$.fragment),X_=c(),ba=o("p"),va=o("a"),J_=a("TFLogitsProcessor"),Q_=a(" enforcing an exponential penalty on repeated sequences."),Z_=c(),$a=o("div"),f(Do.$$.fragment),Ep=c(),$e=o("div"),f(So.$$.fragment),eb=c(),Ta=o("p"),ya=o("a"),rb=a("TFLogitsProcessor"),tb=a(" that enforces the specified token as the first generated token."),ob=c(),ka=o("div"),f(zo.$$.fragment),Pp=c(),Te=o("div"),f(Oo.$$.fragment),nb=c(),Ir=o("p"),xa=o("a"),sb=a("TFLogitsProcessor"),ab=a(" that enforces the specified token as the last generated token when "),Ll=o("code"),ib=a("max_length"),cb=a(" is reached."),lb=c(),wa=o("div"),f(qo.$$.fragment),Fp=c(),ye=o("div"),f(Bo.$$.fragment),db=c(),El=o("p"),pb=a("Abstract base class for all logit processors that can be applied during generation."),mb=c(),Ar=o("div"),f(Io.$$.fragment),fb=c(),Pl=o("p"),gb=a("Flax method for processing logits."),Dp=c(),ke=o("div"),f(Ao.$$.fragment),ub=c(),E=o("p"),hb=a("This class can be used to create a list of "),La=o("a"),_b=a("FlaxLogitsProcessor"),bb=a(" or "),Ea=o("a"),vb=a("FlaxLogitsWarper"),$b=a(` to subsequently process a `),Fl=o("code"),Tb=a("scores"),yb=a(" input tensor. This class inherits from list and adds a specific "),Dl=o("em"),Sl=o("strong"),kb=a("call"),xb=a(` method to apply each `),Pa=o("a"),wb=a("FlaxLogitsProcessor"),Lb=a(" or "),Fa=o("a"),Eb=a("FlaxLogitsWarper"),Pb=a(" to the inputs."),Fb=c(),Da=o("div"),f(Co.$$.fragment),Sp=c(),xe=o("div"),f(Wo.$$.fragment),Db=c(),zl=o("p"),Sb=a("Abstract base class for all logit warpers that can be applied during generation with multinomial sampling."),zb=c(),Cr=o("div"),f(No.$$.fragment),Ob=c(),Ol=o("p"),qb=a("Flax method for warping logits."),zp=c(),we=o("div"),f(Vo.$$.fragment),Bb=c(),Sa=o("p"),za=o("a"),Ib=a("FlaxLogitsWarper"),Ab=a(" for temperature (exponential scaling output probability distribution)."),Cb=c(),Oa=o("div"),f(Mo.$$.fragment),Op=c(),Le=o("div"),f(Go.$$.fragment),Wb=c(),qa=o("p"),Ba=o("a"),Nb=a("FlaxLogitsWarper"),Vb=a(" that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off."),Mb=c(),Ia=o("div"),f(jo.$$.fragment),qp=c(),Ee=o("div"),f(Ho.$$.fragment),Gb=c(),Aa=o("p"),Ca=o("a"),jb=a("FlaxLogitsWarper"),Hb=a(" that performs top-k, i.e. restricting to the k highest probability elements."),Rb=c(),Wa=o("div"),f(Ro.$$.fragment),Bp=c(),Pe=o("div"),f(Ko.$$.fragment),Kb=c(),Na=o("p"),Va=o("a"),Ub=a("FlaxLogitsProcessor"),Yb=a(" that enforces the specified token as the first generated token."),Xb=c(),Ma=o("div"),f(Uo.$$.fragment),Ip=c(),Fe=o("div"),f(Yo.$$.fragment),Jb=c(),Wr=o("p"),Ga=o("a"),Qb=a("FlaxLogitsProcessor"),Zb=a(" that enforces the specified token as the last generated token when "),ql=o("code"),ev=a("max_length"),rv=a(" is reached."),tv=c(),ja=o("div"),f(Xo.$$.fragment),Ap=c(),De=o("div"),f(Jo.$$.fragment),ov=c(),Ha=o("p"),Ra=o("a"),nv=a("FlaxLogitsProcessor"),sv=a(" enforcing a min-length by setting EOS probability to 0."),av=c(),Ka=o("div"),f(Qo.$$.fragment),Cp=c(),ir=o("h2"),Nr=o("a"),Bl=o("span"),f(Zo.$$.fragment),iv=c(),Il=o("span"),cv=a("StoppingCriteria"),Wp=c(),Vr=o("p"),lv=a("A "),Ua=o("a"),dv=a("StoppingCriteria"),pv=a(" can be used to change when to stop generation (other than EOS token)."),Np=c(),Se=o("div"),f(en.$$.fragment),mv=c(),Al=o("p"),fv=a("Abstract base class for all stopping criteria that can be applied during generation."),gv=c(),Ya=o("div"),f(rn.$$.fragment),Vp=c(),cr=o("div"),f(tn.$$.fragment),uv=c(),Xa=o("div"),f(on.$$.fragment),Mp=c(),ze=o("div"),f(nn.$$.fragment),hv=c(),sn=o("p"),_v=a("This class can be used to stop generation whenever the full generated number of tokens exceeds "),Cl=o("code"),bv=a("max_length"),vv=a(`. Keep in mind for decoder-only type of transformers, this will include the initial prompted tokens.`),$v=c(),Ja=o("div"),f(an.$$.fragment),Gp=c(),Oe=o("div"),f(cn.$$.fragment),Tv=c(),ln=o("p"),yv=a(`This class can be used to stop generation whenever the full generation exceeds some amount of time. By default, the time will start being counted when you initialize this function. You can override this by passing an `),Wl=o("code"),kv=a("initial_time"),xv=a("."),wv=c(),Qa=o("div"),f(dn.$$.fragment),jp=c(),lr=o("h2"),Mr=o("a"),Nl=o("span"),f(pn.$$.fragment),Lv=c(),Vl=o("span"),Ev=a("Constraints"),Hp=c(),Gr=o("p"),Pv=a("A "),Za=o("a"),Fv=a("Constraint"),Dv=a(" can be used to force the generation to include specific tokens or sequences in the output."),Rp=c(),$=o("div"),f(mn.$$.fragment),Sv=c(),Ml=o("p"),zv=a(`Abstract base class for all constraints that can be applied during generation. It must define how the constraint can be satisfied.`),Ov=c(),Gl=o("p"),qv=a("All classes that inherit Constraint must follow the requirement that"),Bv=c(),f(jr.$$.fragment),Iv=c(),jl=o("p"),Av=a("will always terminate (halt)."),Cv=c(),Hr=o("div"),f(fn.$$.fragment),Wv=c(),Hl=o("p"),Nv=a("When called, returns the token that would take this constraint one step closer to being fulfilled."),Vv=c(),Rr=o("div"),f(gn.$$.fragment),Mv=c(),Rl=o("p"),Gv=a("Creates a new instance of this constraint."),jv=c(),Kr=o("div"),f(un.$$.fragment),Hv=c(),Kl=o("p"),Rv=a("Reads in a token and returns whether it creates progress."),Kv=c(),Ur=o("div"),f(hn.$$.fragment),Uv=c(),_n=o("p"),Yv=a("Returns the number of remaining steps of "),Ul=o("code"),Xv=a("advance()"),Jv=a(" in order to complete this constraint."),Qv=c(),Yr=o("div"),f(bn.$$.fragment),Zv=c(),Yl=o("p"),e1=a(`Resets the state of this constraint to its initialization. We would call this in cases where the fulfillment of a constraint is abrupted by an unwanted token.`),r1=c(),Xr=o("div"),f(vn.$$.fragment),t1=c(),Xl=o("p"),o1=a("Tests whether this constraint has been properly defined."),n1=c(),Ve=o("div"),f($n.$$.fragment),s1=c(),Tn=o("p"),a1=a(`Reads in a token and returns booleans that indicate the progress made by it. This function will update the state of this object unlikes `),Jl=o("code"),i1=a("does_advance(self, token_id: int)"),c1=a("."),l1=c(),Ql=o("p"),d1=a(`This isn\u2019t to test whether a certain token will advance the progress; it\u2019s to update its state as if it has been generated. This becomes important if token_id != desired token (refer to else statement in PhrasalConstraint)`),Kp=c(),dr=o("div"),f(yn.$$.fragment),p1=c(),ei=o("p"),ri=o("a"),m1=a("Constraint"),f1=a(" enforcing that an ordered sequence of tokens is included in the output."),Up=c(),pr=o("div"),f(kn.$$.fragment),g1=c(),xn=o("p"),u1=a("A special "),ti=o("a"),h1=a("Constraint"),_1=a(" that is fulfilled by fulfilling just one of several constraints."),Yp=c(),C=o("div"),f(wn.$$.fragment),b1=c(),Zl=o("p"),v1=a("A class for beam scorers to track its progress through a list of constraints."),$1=c(),F=o("div"),f(Ln.$$.fragment),T1=c(),ed=o("p"),y1=a(`The list of tokens to generate such that we can make progress. By \u201Clist\u201D we don\u2019t mean the list of token that will fully fulfill a constraint.`),k1=c(),mr=o("p"),x1=a("Given constraints "),rd=o("code"),w1=a("c_i = {t_ij | j == # of tokens}"),L1=a(`, If we\u2019re not in the middle of progressing through a specific constraint `),td=o("code"),E1=a("c_i"),P1=a(", we return:"),F1=c(),od=o("p"),nd=o("code"),D1=a("[t_k1 for k in indices of unfulfilled constraints]"),S1=c(),qe=o("p"),z1=a(`If we are in the middle of a constraint, then we return: `),sd=o("code"),O1=a("[t_ij]"),q1=a(", where "),ad=o("code"),B1=a("i"),I1=a(" is the index of the inprogress constraint, "),id=o("code"),A1=a("j"),C1=a(" is the next step for the constraint."),W1=c(),cd=o("p"),N1=a(`Though we don\u2019t care which constraint is fulfilled first, if we are in the progress of fulfilling a constraint, that\u2019s the only one we\u2019ll return.`),V1=c(),Jr=o("div"),f(En.$$.fragment),M1=c(),ld=o("p"),G1=a("token_ids: the tokens generated thus far to reset the state of the progress through constraints."),Xp=c(),fr=o("h2"),Qr=o("a"),dd=o("span"),f(Pn.$$.fragment),j1=c(),pd=o("span"),H1=a("BeamSearch"),Jp=c(),W=o("div"),f(Fn.$$.fragment),R1=c(),gr=o("p"),K1=a("Abstract base class for all beam scorers that are used for "),oi=o("a"),U1=a("beam_search()"),Y1=a(` and `),ni=o("a"),X1=a("beam_sample()"),J1=a("."),Q1=c(),si=o("div"),f(Dn.$$.fragment),Z1=c(),ai=o("div"),f(Sn.$$.fragment),Qp=c(),P=o("div"),f(zn.$$.fragment),e2=c(),ii=o("p"),ci=o("a"),r2=a("BeamScorer"),t2=a(" implementing standard beam search decoding."),o2=c(),On=o("p"),n2=a("Adapted in part from "),qn=o("a"),s2=a(`Facebook\u2019s XLM beam search code`),a2=a("."),i2=c(),li=o("p"),c2=a("Reference for the diverse beam search algorithm and implementation "),Bn=o("a"),l2=a(`Ashwin Kalyan\u2019s DBS implementation`),d2=c(),di=o("div"),f(In.$$.fragment),p2=c(),pi=o("div"),f(An.$$.fragment),Zp=c(),N=o("div"),f(Cn.$$.fragment),m2=c(),mi=o("p"),fi=o("a"),f2=a("BeamScorer"),g2=a(" implementing constrained beam search decoding."),u2=c(),gi=o("div"),f(Wn.$$.fragment),h2=c(),ui=o("div"),f(Nn.$$.fragment),em=c(),ur=o("h2"),Zr=o("a"),md=o("span"),f(Vn.$$.fragment),_2=c(),fd=o("span"),b2=a("Utilities"),rm=c(),Be=o("div"),f(Mn.$$.fragment),v2=c(),gd=o("p"),$2=a("Filter a distribution of logits using top-k and/or nucleus (top-p) filtering"),T2=c(),hi=o("p"),y2=a("From: "),Gn=o("a"),k2=a("https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317"),tm=c(),Ie=o("div"),f(jn.$$.fragment),x2=c(),ud=o("p"),w2=a("Filter a distribution of logits using top-k and/or nucleus (top-p) filtering"),L2=c(),_i=o("p"),E2=a("From: "),Hn=o("a"),P2=a("https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317"),this.h()},l(e){const p=E0('[data-svelte="svelte-1phssyn"]',document.head);w=n(p,"META",{name:!0,content:!0}),p.forEach(t),Re=l(e),k=n(e,"H1",{class:!0});var Rn=s(k);Ce=n(Rn,"A",{id:!0,class:!0,href:!0});var s$=s(Ce);mc=n(s$,"SPAN",{});var a$=s(mc);g(at.$$.fragment,a$),a$.forEach(t),s$.forEach(t),Xm=l(Rn),fc=n(Rn,"SPAN",{});var i$=s(fc);Jm=i(i$,"Utilities for Generation"),i$.forEach(t),Rn.forEach(t),xd=l(e),y=n(e,"P",{});var D=s(y);Qm=i(D,"This page lists all the utility functions used by "),Un=n(D,"A",{href:!0});var c$=s(Un);Zm=i(c$,"generate()"),c$.forEach(t),ef=i(D,`, `),Yn=n(D,"A",{href:!0});var l$=s(Yn);rf=i(l$,"greedy_search()"),l$.forEach(t),tf=i(D,`, `),Xn=n(D,"A",{href:!0});var d$=s(Xn);of=i(d$,"sample()"),d$.forEach(t),nf=i(D,`, `),Jn=n(D,"A",{href:!0});var p$=s(Jn);sf=i(p$,"beam_search()"),p$.forEach(t),af=i(D,`, `),Qn=n(D,"A",{href:!0});var m$=s(Qn);cf=i(m$,"beam_sample()"),m$.forEach(t),lf=i(D,`, `),Zn=n(D,"A",{href:!0});var f$=s(Zn);df=i(f$,"group_beam_search()"),f$.forEach(t),pf=i(D,`, and `),es=n(D,"A",{href:!0});var g$=s(es);mf=i(g$,"constrained_beam_search()"),g$.forEach(t),ff=i(D,"."),D.forEach(t),wd=l(e),rs=n(e,"P",{});var u$=s(rs);gf=i(u$,"Most of those are only useful if you are studying the code of the generate methods in the library."),u$.forEach(t),Ld=l(e),Ke=n(e,"H2",{class:!0});var nm=s(Ke);_r=n(nm,"A",{id:!0,class:!0,href:!0});var h$=s(_r);gc=n(h$,"SPAN",{});var _$=s(gc);g(it.$$.fragment,_$),_$.forEach(t),h$.forEach(t),uf=l(nm),uc=n(nm,"SPAN",{});var b$=s(uc);hf=i(b$,"Generate Outputs"),b$.forEach(t),nm.forEach(t),Ed=l(e),V=n(e,"P",{});var et=s(V);_f=i(et,"The output of "),ts=n(et,"A",{href:!0});var v$=s(ts);bf=i(v$,"generate()"),v$.forEach(t),vf=i(et,` is an instance of a subclass of `),os=n(et,"A",{href:!0});var $$=s(os);$f=i($$,"ModelOutput"),$$.forEach(t),Tf=i(et,`. This output is a data structure containing all the information returned by `),ns=n(et,"A",{href:!0});var T$=s(ns);yf=i(T$,"generate()"),T$.forEach(t),kf=i(et,", but that can also be used as tuple or dictionary."),et.forEach(t),Pd=l(e),ss=n(e,"P",{});var y$=s(ss);xf=i(y$,"Here\u2019s an example:"),y$.forEach(t),Fd=l(e),g(ct.$$.fragment,e),Dd=l(e),We=n(e,"P",{});var bi=s(We);wf=i(bi,"The "),hc=n(bi,"CODE",{});var k$=s(hc);Lf=i(k$,"generation_output"),k$.forEach(t),Ef=i(bi," object is a "),as=n(bi,"A",{href:!0});var x$=s(as);Pf=i(x$,"GreedySearchDecoderOnlyOutput"),x$.forEach(t),Ff=i(bi,`, as we can see in the documentation of that class below, it means it has the following attributes:`),bi.forEach(t),Sd=l(e),M=n(e,"UL",{});var rt=s(M);is=n(rt,"LI",{});var F2=s(is);_c=n(F2,"CODE",{});var w$=s(_c);Df=i(w$,"sequences"),w$.forEach(t),Sf=i(F2,": the generated sequences of tokens"),F2.forEach(t),zf=l(rt),cs=n(rt,"LI",{});var D2=s(cs);bc=n(D2,"CODE",{});var L$=s(bc);Of=i(L$,"scores"),L$.forEach(t),qf=i(D2," (optional): the prediction scores of the language modelling head, for each generation step"),D2.forEach(t),Bf=l(rt),ls=n(rt,"LI",{});var S2=s(ls);vc=n(S2,"CODE",{});var E$=s(vc);If=i(E$,"hidden_states"),E$.forEach(t),Af=i(S2," (optional): the hidden states of the model, for each generation step"),S2.forEach(t),Cf=l(rt),ds=n(rt,"LI",{});var z2=s(ds);$c=n(z2,"CODE",{});var P$=s($c);Wf=i(P$,"attentions"),P$.forEach(t),Nf=i(z2," (optional): the attention weights of the model, for each generation step"),z2.forEach(t),rt.forEach(t),zd=l(e),x=n(e,"P",{});var q=s(x);Vf=i(q,"Here we have the "),Tc=n(q,"CODE",{});var F$=s(Tc);Mf=i(F$,"scores"),F$.forEach(t),Gf=i(q," since we passed along "),yc=n(q,"CODE",{});var D$=s(yc);jf=i(D$,"output_scores=True"),D$.forEach(t),Hf=i(q,", but we don\u2019t have "),kc=n(q,"CODE",{});var S$=s(kc);Rf=i(S$,"hidden_states"),S$.forEach(t),Kf=i(q,` and `),xc=n(q,"CODE",{});var z$=s(xc);Uf=i(z$,"attentions"),z$.forEach(t),Yf=i(q," because we didn\u2019t pass "),wc=n(q,"CODE",{});var O$=s(wc);Xf=i(O$,"output_hidden_states=True"),O$.forEach(t),Jf=i(q," or "),Lc=n(q,"CODE",{});var q$=s(Lc);Qf=i(q$,"output_attentions=True"),q$.forEach(t),Zf=i(q,"."),q.forEach(t),Od=l(e),S=n(e,"P",{});var Me=s(S);eg=i(Me,`You can access each attribute as you would usually do, and if that attribute has not been returned by the model, you will get `),Ec=n(Me,"CODE",{});var B$=s(Ec);rg=i(B$,"None"),B$.forEach(t),tg=i(Me,". Here for instance "),Pc=n(Me,"CODE",{});var I$=s(Pc);og=i(I$,"generation_output.scores"),I$.forEach(t),ng=i(Me,` are all the generated prediction scores of the language modeling head, and `),Fc=n(Me,"CODE",{});var A$=s(Fc);sg=i(A$,"generation_output.attentions"),A$.forEach(t),ag=i(Me," is "),Dc=n(Me,"CODE",{});var C$=s(Dc);ig=i(C$,"None"),C$.forEach(t),cg=i(Me,"."),Me.forEach(t),qd=l(e),z=n(e,"P",{});var Ge=s(z);lg=i(Ge,"When using our "),Sc=n(Ge,"CODE",{});var W$=s(Sc);dg=i(W$,"generation_output"),W$.forEach(t),pg=i(Ge," object as a tuple, it only keeps the attributes that don\u2019t have "),zc=n(Ge,"CODE",{});var N$=s(zc);mg=i(N$,"None"),N$.forEach(t),fg=i(Ge,` values. Here, for instance, it has two elements, `),Oc=n(Ge,"CODE",{});var V$=s(Oc);gg=i(V$,"loss"),V$.forEach(t),ug=i(Ge," then "),qc=n(Ge,"CODE",{});var M$=s(qc);hg=i(M$,"logits"),M$.forEach(t),_g=i(Ge,", so"),Ge.forEach(t),Bd=l(e),g(lt.$$.fragment,e),Id=l(e),br=n(e,"P",{});var sm=s(br);bg=i(sm,"will return the tuple "),Bc=n(sm,"CODE",{});var G$=s(Bc);vg=i(G$,"(generation_output.sequences, generation_output.scores)"),G$.forEach(t),$g=i(sm," for instance."),sm.forEach(t),Ad=l(e),O=n(e,"P",{});var je=s(O);Tg=i(je,"When using our "),Ic=n(je,"CODE",{});var j$=s(Ic);yg=i(j$,"generation_output"),j$.forEach(t),kg=i(je," object as a dictionary, it only keeps the attributes that don\u2019t have "),Ac=n(je,"CODE",{});var H$=s(Ac);xg=i(H$,"None"),H$.forEach(t),wg=i(je,` values. Here, for instance, it has two keys that are `),Cc=n(je,"CODE",{});var R$=s(Cc);Lg=i(R$,"sequences"),R$.forEach(t),Eg=i(je," and "),Wc=n(je,"CODE",{});var K$=s(Wc);Pg=i(K$,"scores"),K$.forEach(t),Fg=i(je,"."),je.forEach(t),Cd=l(e),ps=n(e,"P",{});var U$=s(ps);Dg=i(U$,"We document here all output types."),U$.forEach(t),Wd=l(e),Ue=n(e,"H3",{class:!0});var am=s(Ue);vr=n(am,"A",{id:!0,class:!0,href:!0});var Y$=s(vr);Nc=n(Y$,"SPAN",{});var X$=s(Nc);g(dt.$$.fragment,X$),X$.forEach(t),Y$.forEach(t),Sg=l(am),Vc=n(am,"SPAN",{});var J$=s(Vc);zg=i(J$,"GreedySearchOutput"),J$.forEach(t),am.forEach(t),Nd=l(e),Ye=n(e,"DIV",{class:!0});var im=s(Ye);g(pt.$$.fragment,im),Og=l(im),Mc=n(im,"P",{});var Q$=s(Mc);qg=i(Q$,"Base class for outputs of decoder-only generation models using greedy search."),Q$.forEach(t),im.forEach(t),Vd=l(e),Xe=n(e,"DIV",{class:!0});var cm=s(Xe);g(mt.$$.fragment,cm),Bg=l(cm),Gc=n(cm,"P",{});var Z$=s(Gc);Ig=i(Z$,`Base class for outputs of encoder-decoder generation models using greedy search. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)`),Z$.forEach(t),cm.forEach(t),Md=l(e),R=n(e,"DIV",{class:!0});var vi=s(R);g(ft.$$.fragment,vi),Ag=l(vi),jc=n(vi,"P",{});var eT=s(jc);Cg=i(eT,"Flax Base class for outputs of decoder-only generation models using greedy search."),eT.forEach(t),Wg=l(vi),$r=n(vi,"DIV",{class:!0});var lm=s($r);g(gt.$$.fragment,lm),Ng=l(lm),Hc=n(lm,"P",{});var rT=s(Hc);Vg=i(rT,"\u201CReturns a new object replacing the specified fields with new values."),rT.forEach(t),lm.forEach(t),vi.forEach(t),Gd=l(e),Je=n(e,"H3",{class:!0});var dm=s(Je);Tr=n(dm,"A",{id:!0,class:!0,href:!0});var tT=s(Tr);Rc=n(tT,"SPAN",{});var oT=s(Rc);g(ut.$$.fragment,oT),oT.forEach(t),tT.forEach(t),Mg=l(dm),Kc=n(dm,"SPAN",{});var nT=s(Kc);Gg=i(nT,"SampleOutput"),nT.forEach(t),dm.forEach(t),jd=l(e),Qe=n(e,"DIV",{class:!0});var pm=s(Qe);g(ht.$$.fragment,pm),jg=l(pm),Uc=n(pm,"P",{});var sT=s(Uc);Hg=i(sT,"Base class for outputs of decoder-only generation models using sampling."),sT.forEach(t),pm.forEach(t),Hd=l(e),Ze=n(e,"DIV",{class:!0});var mm=s(Ze);g(_t.$$.fragment,mm),Rg=l(mm),Yc=n(mm,"P",{});var aT=s(Yc);Kg=i(aT,`Base class for outputs of encoder-decoder generation models using sampling. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)`),aT.forEach(t),mm.forEach(t),Rd=l(e),K=n(e,"DIV",{class:!0});var $i=s(K);g(bt.$$.fragment,$i),Ug=l($i),Xc=n($i,"P",{});var iT=s(Xc);Yg=i(iT,"Flax Base class for outputs of decoder-only generation models using sampling."),iT.forEach(t),Xg=l($i),yr=n($i,"DIV",{class:!0});var fm=s(yr);g(vt.$$.fragment,fm),Jg=l(fm),Jc=n(fm,"P",{});var cT=s(Jc);Qg=i(cT,"\u201CReturns a new object replacing the specified fields with new values."),cT.forEach(t),fm.forEach(t),$i.forEach(t),Kd=l(e),er=n(e,"H3",{class:!0});var gm=s(er);kr=n(gm,"A",{id:!0,class:!0,href:!0});var lT=s(kr);Qc=n(lT,"SPAN",{});var dT=s(Qc);g($t.$$.fragment,dT),dT.forEach(t),lT.forEach(t),Zg=l(gm),Zc=n(gm,"SPAN",{});var pT=s(Zc);eu=i(pT,"BeamSearchOutput"),pT.forEach(t),gm.forEach(t),Ud=l(e),rr=n(e,"DIV",{class:!0});var um=s(rr);g(Tt.$$.fragment,um),ru=l(um),el=n(um,"P",{});var mT=s(el);tu=i(mT,"Base class for outputs of decoder-only generation models using beam search."),mT.forEach(t),um.forEach(t),Yd=l(e),tr=n(e,"DIV",{class:!0});var hm=s(tr);g(yt.$$.fragment,hm),ou=l(hm),rl=n(hm,"P",{});var fT=s(rl);nu=i(fT,`Base class for outputs of encoder-decoder generation models using beam search. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)`),fT.forEach(t),hm.forEach(t),Xd=l(e),or=n(e,"H3",{class:!0});var _m=s(or);xr=n(_m,"A",{id:!0,class:!0,href:!0});var gT=s(xr);tl=n(gT,"SPAN",{});var uT=s(tl);g(kt.$$.fragment,uT),uT.forEach(t),gT.forEach(t),su=l(_m),ol=n(_m,"SPAN",{});var hT=s(ol);au=i(hT,"BeamSampleOutput"),hT.forEach(t),_m.forEach(t),Jd=l(e),nr=n(e,"DIV",{class:!0});var bm=s(nr);g(xt.$$.fragment,bm),iu=l(bm),nl=n(bm,"P",{});var _T=s(nl);cu=i(_T,"Base class for outputs of decoder-only generation models using beam sample."),_T.forEach(t),bm.forEach(t),Qd=l(e),sr=n(e,"DIV",{class:!0});var vm=s(sr);g(wt.$$.fragment,vm),lu=l(vm),sl=n(vm,"P",{});var bT=s(sl);du=i(bT,`Base class for outputs of encoder-decoder generation models using beam sampling. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)`),bT.forEach(t),vm.forEach(t),Zd=l(e),ar=n(e,"H2",{class:!0});var $m=s(ar);wr=n($m,"A",{id:!0,class:!0,href:!0});var vT=s(wr);al=n(vT,"SPAN",{});var $T=s(al);g(Lt.$$.fragment,$T),$T.forEach(t),vT.forEach(t),pu=l($m),il=n($m,"SPAN",{});var TT=s(il);mu=i(TT,"LogitsProcessor"),TT.forEach(t),$m.forEach(t),ep=l(e),Lr=n(e,"P",{});var Tm=s(Lr);fu=i(Tm,"A "),ms=n(Tm,"A",{href:!0});var yT=s(ms);gu=i(yT,"LogitsProcessor"),yT.forEach(t),uu=i(Tm,` can be used to modify the prediction scores of a language model head for generation.`),Tm.forEach(t),rp=l(e),U=n(e,"DIV",{class:!0});var Ti=s(U);g(Et.$$.fragment,Ti),hu=l(Ti),cl=n(Ti,"P",{});var kT=s(cl);_u=i(kT,"Abstract base class for all logit processors that can be applied during generation."),kT.forEach(t),bu=l(Ti),Er=n(Ti,"DIV",{class:!0});var ym=s(Er);g(Pt.$$.fragment,ym),vu=l(ym),ll=n(ym,"P",{});var xT=s(ll);$u=i(xT,"Torch method for processing logits."),xT.forEach(t),ym.forEach(t),Ti.forEach(t),tp=l(e),Y=n(e,"DIV",{class:!0});var yi=s(Y);g(Ft.$$.fragment,yi),Tu=l(yi),L=n(yi,"P",{});var B=s(L);yu=i(B,"This class can be used to create a list of "),fs=n(B,"A",{href:!0});var wT=s(fs);ku=i(wT,"LogitsProcessor"),wT.forEach(t),xu=i(B," or "),gs=n(B,"A",{href:!0});var LT=s(gs);wu=i(LT,"LogitsWarper"),LT.forEach(t),Lu=i(B,` to subsequently process a `),dl=n(B,"CODE",{});var ET=s(dl);Eu=i(ET,"scores"),ET.forEach(t),Pu=i(B," input tensor. This class inherits from list and adds a specific "),pl=n(B,"EM",{});var PT=s(pl);ml=n(PT,"STRONG",{});var FT=s(ml);Fu=i(FT,"call"),FT.forEach(t),PT.forEach(t),Du=i(B,` method to apply each `),us=n(B,"A",{href:!0});var DT=s(us);Su=i(DT,"LogitsProcessor"),DT.forEach(t),zu=i(B," or "),hs=n(B,"A",{href:!0});var ST=s(hs);Ou=i(ST,"LogitsWarper"),ST.forEach(t),qu=i(B," to the inputs."),B.forEach(t),Bu=l(yi),_s=n(yi,"DIV",{class:!0});var zT=s(_s);g(Dt.$$.fragment,zT),zT.forEach(t),yi.forEach(t),op=l(e),X=n(e,"DIV",{class:!0});var ki=s(X);g(St.$$.fragment,ki),Iu=l(ki),fl=n(ki,"P",{});var OT=s(fl);Au=i(OT,"Abstract base class for all logit warpers that can be applied during generation with multinomial sampling."),OT.forEach(t),Cu=l(ki),Pr=n(ki,"DIV",{class:!0});var km=s(Pr);g(zt.$$.fragment,km),Wu=l(km),gl=n(km,"P",{});var qT=s(gl);Nu=i(qT,"Torch method for warping logits."),qT.forEach(t),km.forEach(t),ki.forEach(t),np=l(e),J=n(e,"DIV",{class:!0});var xi=s(J);g(Ot.$$.fragment,xi),Vu=l(xi),bs=n(xi,"P",{});var O2=s(bs);vs=n(O2,"A",{href:!0});var BT=s(vs);Mu=i(BT,"LogitsProcessor"),BT.forEach(t),Gu=i(O2," enforcing a min-length by setting EOS probability to 0."),O2.forEach(t),ju=l(xi),$s=n(xi,"DIV",{class:!0});var IT=s($s);g(qt.$$.fragment,IT),IT.forEach(t),xi.forEach(t),sp=l(e),Q=n(e,"DIV",{class:!0});var wi=s(Q);g(Bt.$$.fragment,wi),Hu=l(wi),Ts=n(wi,"P",{});var q2=s(Ts);ys=n(q2,"A",{href:!0});var AT=s(ys);Ru=i(AT,"LogitsWarper"),AT.forEach(t),Ku=i(q2," for temperature (exponential scaling output probability distribution)."),q2.forEach(t),Uu=l(wi),ks=n(wi,"DIV",{class:!0});var CT=s(ks);g(It.$$.fragment,CT),CT.forEach(t),wi.forEach(t),ap=l(e),Z=n(e,"DIV",{class:!0});var Li=s(Z);g(At.$$.fragment,Li),Yu=l(Li),xs=n(Li,"P",{});var B2=s(xs);ws=n(B2,"A",{href:!0});var WT=s(ws);Xu=i(WT,"LogitsProcessor"),WT.forEach(t),Ju=i(B2," enforcing an exponential penalty on repeated sequences."),B2.forEach(t),Qu=l(Li),Ls=n(Li,"DIV",{class:!0});var NT=s(Ls);g(Ct.$$.fragment,NT),NT.forEach(t),Li.forEach(t),ip=l(e),ee=n(e,"DIV",{class:!0});var Ei=s(ee);g(Wt.$$.fragment,Ei),Zu=l(Ei),Es=n(Ei,"P",{});var I2=s(Es);Ps=n(I2,"A",{href:!0});var VT=s(Ps);eh=i(VT,"LogitsWarper"),VT.forEach(t),rh=i(I2," that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off."),I2.forEach(t),th=l(Ei),Fs=n(Ei,"DIV",{class:!0});var MT=s(Fs);g(Nt.$$.fragment,MT),MT.forEach(t),Ei.forEach(t),cp=l(e),re=n(e,"DIV",{class:!0});var Pi=s(re);g(Vt.$$.fragment,Pi),oh=l(Pi),Ds=n(Pi,"P",{});var A2=s(Ds);Ss=n(A2,"A",{href:!0});var GT=s(Ss);nh=i(GT,"LogitsWarper"),GT.forEach(t),sh=i(A2," that performs top-k, i.e. restricting to the k highest probability elements."),A2.forEach(t),ah=l(Pi),zs=n(Pi,"DIV",{class:!0});var jT=s(zs);g(Mt.$$.fragment,jT),jT.forEach(t),Pi.forEach(t),lp=l(e),te=n(e,"DIV",{class:!0});var Fi=s(te);g(Gt.$$.fragment,Fi),ih=l(Fi),Fr=n(Fi,"P",{});var hd=s(Fr);Os=n(hd,"A",{href:!0});var HT=s(Os);ch=i(HT,"LogitsWarper"),HT.forEach(t),lh=i(hd," that performs typical decoding. See "),jt=n(hd,"A",{href:!0,rel:!0});var RT=s(jt);dh=i(RT,`Typical Decoding for Natural Language Generation`),RT.forEach(t),ph=i(hd," for more information."),hd.forEach(t),mh=l(Fi),qs=n(Fi,"DIV",{class:!0});var KT=s(qs);g(Ht.$$.fragment,KT),KT.forEach(t),Fi.forEach(t),dp=l(e),oe=n(e,"DIV",{class:!0});var Di=s(oe);g(Rt.$$.fragment,Di),fh=l(Di),Dr=n(Di,"P",{});var _d=s(Dr);Bs=n(_d,"A",{href:!0});var UT=s(Bs);gh=i(UT,"LogitsProcessor"),UT.forEach(t),uh=i(_d,` that enforces no repetition of n-grams. See `),Kt=n(_d,"A",{href:!0,rel:!0});var YT=s(Kt);hh=i(YT,"Fairseq"),YT.forEach(t),_h=i(_d,"."),_d.forEach(t),bh=l(Di),Is=n(Di,"DIV",{class:!0});var XT=s(Is);g(Ut.$$.fragment,XT),XT.forEach(t),Di.forEach(t),pp=l(e),ne=n(e,"DIV",{class:!0});var Si=s(ne);g(Yt.$$.fragment,Si),vh=l(Si),As=n(Si,"P",{});var C2=s(As);Cs=n(C2,"A",{href:!0});var JT=s(Cs);$h=i(JT,"LogitsProcessor"),JT.forEach(t),Th=i(C2," that enforces that specified sequences will never be sampled."),C2.forEach(t),yh=l(Si),Ws=n(Si,"DIV",{class:!0});var QT=s(Ws);g(Xt.$$.fragment,QT),QT.forEach(t),Si.forEach(t),mp=l(e),se=n(e,"DIV",{class:!0});var zi=s(se);g(Jt.$$.fragment,zi),kh=l(zi),Sr=n(zi,"P",{});var bd=s(Sr);Ns=n(bd,"A",{href:!0});var ZT=s(Ns);xh=i(ZT,"LogitsProcessor"),ZT.forEach(t),wh=i(bd,` that enforces constrained generation and is useful for prefix-conditioned constrained generation. See `),Qt=n(bd,"A",{href:!0,rel:!0});var e4=s(Qt);Lh=i(e4,"Autoregressive Entity Retrieval"),e4.forEach(t),Eh=i(bd," for more information."),bd.forEach(t),Ph=l(zi),Vs=n(zi,"DIV",{class:!0});var r4=s(Vs);g(Zt.$$.fragment,r4),r4.forEach(t),zi.forEach(t),fp=l(e),ae=n(e,"DIV",{class:!0});var Oi=s(ae);g(eo.$$.fragment,Oi),Fh=l(Oi),Ne=n(Oi,"P",{});var Kn=s(Ne);Ms=n(Kn,"A",{href:!0});var t4=s(Ms);Dh=i(t4,"LogitsProcessor"),t4.forEach(t),Sh=i(Kn,` that enforces diverse beam search. Note that this logits processor is only effective for `),Gs=n(Kn,"A",{href:!0});var o4=s(Gs);zh=i(o4,"PreTrainedModel.group_beam_search()"),o4.forEach(t),Oh=i(Kn,". See "),ro=n(Kn,"A",{href:!0,rel:!0});var n4=s(ro);qh=i(n4,`Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence Models`),n4.forEach(t),Bh=i(Kn," for more details."),Kn.forEach(t),Ih=l(Oi),js=n(Oi,"DIV",{class:!0});var s4=s(js);g(to.$$.fragment,s4),s4.forEach(t),Oi.forEach(t),gp=l(e),ie=n(e,"DIV",{class:!0});var qi=s(ie);g(oo.$$.fragment,qi),Ah=l(qi),Hs=n(qi,"P",{});var W2=s(Hs);Rs=n(W2,"A",{href:!0});var a4=s(Rs);Ch=i(a4,"LogitsProcessor"),a4.forEach(t),Wh=i(W2," that enforces the specified token as the first generated token."),W2.forEach(t),Nh=l(qi),Ks=n(qi,"DIV",{class:!0});var i4=s(Ks);g(no.$$.fragment,i4),i4.forEach(t),qi.forEach(t),up=l(e),ce=n(e,"DIV",{class:!0});var Bi=s(ce);g(so.$$.fragment,Bi),Vh=l(Bi),zr=n(Bi,"P",{});var vd=s(zr);Us=n(vd,"A",{href:!0});var c4=s(Us);Mh=i(c4,"LogitsProcessor"),c4.forEach(t),Gh=i(vd," that enforces the specified token as the last generated token when "),ul=n(vd,"CODE",{});var l4=s(ul);jh=i(l4,"max_length"),l4.forEach(t),Hh=i(vd," is reached."),vd.forEach(t),Rh=l(Bi),Ys=n(Bi,"DIV",{class:!0});var d4=s(Ys);g(ao.$$.fragment,d4),d4.forEach(t),Bi.forEach(t),hp=l(e),le=n(e,"DIV",{class:!0});var Ii=s(le);g(io.$$.fragment,Ii),Kh=l(Ii),G=n(Ii,"P",{});var hr=s(G);Xs=n(hr,"A",{href:!0});var p4=s(Xs);Uh=i(p4,"LogitsProcessor"),p4.forEach(t),Yh=i(hr," that removes all "),hl=n(hr,"CODE",{});var m4=s(hl);Xh=i(m4,"nan"),m4.forEach(t),Jh=i(hr," and "),_l=n(hr,"CODE",{});var f4=s(_l);Qh=i(f4,"inf"),f4.forEach(t),Zh=i(hr,` values to avoid the generation method to fail. Note that using the logits processor should only be used if necessary since it can slow down the generation method. `),bl=n(hr,"CODE",{});var g4=s(bl);e_=i(g4,"max_length"),g4.forEach(t),r_=i(hr,` is reached.`),hr.forEach(t),t_=l(Ii),Js=n(Ii,"DIV",{class:!0});var u4=s(Js);g(co.$$.fragment,u4),u4.forEach(t),Ii.forEach(t),_p=l(e),de=n(e,"DIV",{class:!0});var Ai=s(de);g(lo.$$.fragment,Ai),o_=l(Ai),vl=n(Ai,"P",{});var h4=s(vl);n_=i(h4,"Abstract base class for all logit processors that can be applied during generation."),h4.forEach(t),s_=l(Ai),Or=n(Ai,"DIV",{class:!0});var xm=s(Or);g(po.$$.fragment,xm),a_=l(xm),$l=n(xm,"P",{});var _4=s($l);i_=i(_4,"TF method for processing logits."),_4.forEach(t),xm.forEach(t),Ai.forEach(t),bp=l(e),pe=n(e,"DIV",{class:!0});var Ci=s(pe);g(mo.$$.fragment,Ci),c_=l(Ci),A=n(Ci,"P",{});var He=s(A);l_=i(He,"This class can be used to create a list of "),Qs=n(He,"A",{href:!0});var b4=s(Qs);d_=i(b4,"TFLogitsProcessor"),b4.forEach(t),p_=i(He," to subsequently process a "),Tl=n(He,"CODE",{});var v4=s(Tl);m_=i(v4,"scores"),v4.forEach(t),f_=i(He,` input tensor. This class inherits from list and adds a specific `),yl=n(He,"EM",{});var $4=s(yl);kl=n($4,"STRONG",{});var T4=s(kl);g_=i(T4,"call"),T4.forEach(t),$4.forEach(t),u_=i(He," method to apply each "),Zs=n(He,"A",{href:!0});var y4=s(Zs);h_=i(y4,"TFLogitsProcessor"),y4.forEach(t),__=i(He,` to the inputs.`),He.forEach(t),b_=l(Ci),ea=n(Ci,"DIV",{class:!0});var k4=s(ea);g(fo.$$.fragment,k4),k4.forEach(t),Ci.forEach(t),vp=l(e),me=n(e,"DIV",{class:!0});var Wi=s(me);g(go.$$.fragment,Wi),v_=l(Wi),xl=n(Wi,"P",{});var x4=s(xl);$_=i(x4,"Abstract base class for all logit warpers that can be applied during generation with multinomial sampling."),x4.forEach(t),T_=l(Wi),qr=n(Wi,"DIV",{class:!0});var wm=s(qr);g(uo.$$.fragment,wm),y_=l(wm),wl=n(wm,"P",{});var w4=s(wl);k_=i(w4,"TF method for warping logits."),w4.forEach(t),wm.forEach(t),Wi.forEach(t),$p=l(e),fe=n(e,"DIV",{class:!0});var Ni=s(fe);g(ho.$$.fragment,Ni),x_=l(Ni),ra=n(Ni,"P",{});var N2=s(ra);ta=n(N2,"A",{href:!0});var L4=s(ta);w_=i(L4,"TFLogitsWarper"),L4.forEach(t),L_=i(N2," for temperature (exponential scaling output probability distribution)."),N2.forEach(t),E_=l(Ni),oa=n(Ni,"DIV",{class:!0});var E4=s(oa);g(_o.$$.fragment,E4),E4.forEach(t),Ni.forEach(t),Tp=l(e),ge=n(e,"DIV",{class:!0});var Vi=s(ge);g(bo.$$.fragment,Vi),P_=l(Vi),na=n(Vi,"P",{});var V2=s(na);sa=n(V2,"A",{href:!0});var P4=s(sa);F_=i(P4,"TFLogitsWarper"),P4.forEach(t),D_=i(V2," that performs top-p, i.e. restricting to top tokens summing to <= prob_cut_off."),V2.forEach(t),S_=l(Vi),aa=n(Vi,"DIV",{class:!0});var F4=s(aa);g(vo.$$.fragment,F4),F4.forEach(t),Vi.forEach(t),yp=l(e),ue=n(e,"DIV",{class:!0});var Mi=s(ue);g($o.$$.fragment,Mi),z_=l(Mi),ia=n(Mi,"P",{});var M2=s(ia);ca=n(M2,"A",{href:!0});var D4=s(ca);O_=i(D4,"TFLogitsWarper"),D4.forEach(t),q_=i(M2," that performs top-k, i.e. restricting to the k highest probability elements."),M2.forEach(t),B_=l(Mi),la=n(Mi,"DIV",{class:!0});var S4=s(la);g(To.$$.fragment,S4),S4.forEach(t),Mi.forEach(t),kp=l(e),he=n(e,"DIV",{class:!0});var Gi=s(he);g(yo.$$.fragment,Gi),I_=l(Gi),da=n(Gi,"P",{});var G2=s(da);pa=n(G2,"A",{href:!0});var z4=s(pa);A_=i(z4,"TFLogitsProcessor"),z4.forEach(t),C_=i(G2," enforcing a min-length by setting EOS probability to 0."),G2.forEach(t),W_=l(Gi),ma=n(Gi,"DIV",{class:!0});var O4=s(ma);g(ko.$$.fragment,O4),O4.forEach(t),Gi.forEach(t),xp=l(e),_e=n(e,"DIV",{class:!0});var ji=s(_e);g(xo.$$.fragment,ji),N_=l(ji),fa=n(ji,"P",{});var j2=s(fa);ga=n(j2,"A",{href:!0});var q4=s(ga);V_=i(q4,"TFLogitsProcessor"),q4.forEach(t),M_=i(j2," that enforces that specified sequences will never be sampled."),j2.forEach(t),G_=l(ji),ua=n(ji,"DIV",{class:!0});var B4=s(ua);g(wo.$$.fragment,B4),B4.forEach(t),ji.forEach(t),wp=l(e),be=n(e,"DIV",{class:!0});var Hi=s(be);g(Lo.$$.fragment,Hi),j_=l(Hi),Br=n(Hi,"P",{});var $d=s(Br);ha=n($d,"A",{href:!0});var I4=s(ha);H_=i(I4,"TFLogitsProcessor"),I4.forEach(t),R_=i($d,` that enforces no repetition of n-grams. See `),Eo=n($d,"A",{href:!0,rel:!0});var A4=s(Eo);K_=i(A4,"Fairseq"),A4.forEach(t),U_=i($d,"."),$d.forEach(t),Y_=l(Hi),_a=n(Hi,"DIV",{class:!0});var C4=s(_a);g(Po.$$.fragment,C4),C4.forEach(t),Hi.forEach(t),Lp=l(e),ve=n(e,"DIV",{class:!0});var Ri=s(ve);g(Fo.$$.fragment,Ri),X_=l(Ri),ba=n(Ri,"P",{});var H2=s(ba);va=n(H2,"A",{href:!0});var W4=s(va);J_=i(W4,"TFLogitsProcessor"),W4.forEach(t),Q_=i(H2," enforcing an exponential penalty on repeated sequences."),H2.forEach(t),Z_=l(Ri),$a=n(Ri,"DIV",{class:!0});var N4=s($a);g(Do.$$.fragment,N4),N4.forEach(t),Ri.forEach(t),Ep=l(e),$e=n(e,"DIV",{class:!0});var Ki=s($e);g(So.$$.fragment,Ki),eb=l(Ki),Ta=n(Ki,"P",{});var R2=s(Ta);ya=n(R2,"A",{href:!0});var V4=s(ya);rb=i(V4,"TFLogitsProcessor"),V4.forEach(t),tb=i(R2," that enforces the specified token as the first generated token."),R2.forEach(t),ob=l(Ki),ka=n(Ki,"DIV",{class:!0});var M4=s(ka);g(zo.$$.fragment,M4),M4.forEach(t),Ki.forEach(t),Pp=l(e),Te=n(e,"DIV",{class:!0});var Ui=s(Te);g(Oo.$$.fragment,Ui),nb=l(Ui),Ir=n(Ui,"P",{});var Td=s(Ir);xa=n(Td,"A",{href:!0});var G4=s(xa);sb=i(G4,"TFLogitsProcessor"),G4.forEach(t),ab=i(Td," that enforces the specified token as the last generated token when "),Ll=n(Td,"CODE",{});var j4=s(Ll);ib=i(j4,"max_length"),j4.forEach(t),cb=i(Td," is reached."),Td.forEach(t),lb=l(Ui),wa=n(Ui,"DIV",{class:!0});var H4=s(wa);g(qo.$$.fragment,H4),H4.forEach(t),Ui.forEach(t),Fp=l(e),ye=n(e,"DIV",{class:!0});var Yi=s(ye);g(Bo.$$.fragment,Yi),db=l(Yi),El=n(Yi,"P",{});var R4=s(El);pb=i(R4,"Abstract base class for all logit processors that can be applied during generation."),R4.forEach(t),mb=l(Yi),Ar=n(Yi,"DIV",{class:!0});var Lm=s(Ar);g(Io.$$.fragment,Lm),fb=l(Lm),Pl=n(Lm,"P",{});var K4=s(Pl);gb=i(K4,"Flax method for processing logits."),K4.forEach(t),Lm.forEach(t),Yi.forEach(t),Dp=l(e),ke=n(e,"DIV",{class:!0});var Xi=s(ke);g(Ao.$$.fragment,Xi),ub=l(Xi),E=n(Xi,"P",{});var I=s(E);hb=i(I,"This class can be used to create a list of "),La=n(I,"A",{href:!0});var U4=s(La);_b=i(U4,"FlaxLogitsProcessor"),U4.forEach(t),bb=i(I," or "),Ea=n(I,"A",{href:!0});var Y4=s(Ea);vb=i(Y4,"FlaxLogitsWarper"),Y4.forEach(t),$b=i(I,` to subsequently process a `),Fl=n(I,"CODE",{});var X4=s(Fl);Tb=i(X4,"scores"),X4.forEach(t),yb=i(I," input tensor. This class inherits from list and adds a specific "),Dl=n(I,"EM",{});var J4=s(Dl);Sl=n(J4,"STRONG",{});var Q4=s(Sl);kb=i(Q4,"call"),Q4.forEach(t),J4.forEach(t),xb=i(I,` method to apply each `),Pa=n(I,"A",{href:!0});var Z4=s(Pa);wb=i(Z4,"FlaxLogitsProcessor"),Z4.forEach(t),Lb=i(I," or "),Fa=n(I,"A",{href:!0});var ey=s(Fa);Eb=i(ey,"FlaxLogitsWarper"),ey.forEach(t),Pb=i(I," to the inputs."),I.forEach(t),Fb=l(Xi),Da=n(Xi,"DIV",{class:!0});var ry=s(Da);g(Co.$$.fragment,ry),ry.forEach(t),Xi.forEach(t),Sp=l(e),xe=n(e,"DIV",{class:!0});var Ji=s(xe);g(Wo.$$.fragment,Ji),Db=l(Ji),zl=n(Ji,"P",{});var ty=s(zl);Sb=i(ty,"Abstract base class for all logit warpers that can be applied during generation with multinomial sampling."),ty.forEach(t),zb=l(Ji),Cr=n(Ji,"DIV",{class:!0});var Em=s(Cr);g(No.$$.fragment,Em),Ob=l(Em),Ol=n(Em,"P",{});var oy=s(Ol);qb=i(oy,"Flax method for warping logits."),oy.forEach(t),Em.forEach(t),Ji.forEach(t),zp=l(e),we=n(e,"DIV",{class:!0});var Qi=s(we);g(Vo.$$.fragment,Qi),Bb=l(Qi),Sa=n(Qi,"P",{});var K2=s(Sa);za=n(K2,"A",{href:!0});var ny=s(za);Ib=i(ny,"FlaxLogitsWarper"),ny.forEach(t),Ab=i(K2," for temperature (exponential scaling output probability distribution)."),K2.forEach(t),Cb=l(Qi),Oa=n(Qi,"DIV",{class:!0});var sy=s(Oa);g(Mo.$$.fragment,sy),sy.forEach(t),Qi.forEach(t),Op=l(e),Le=n(e,"DIV",{class:!0});var Zi=s(Le);g(Go.$$.fragment,Zi),Wb=l(Zi),qa=n(Zi,"P",{});var U2=s(qa);Ba=n(U2,"A",{href:!0});var ay=s(Ba);Nb=i(ay,"FlaxLogitsWarper"),ay.forEach(t),Vb=i(U2," that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off."),U2.forEach(t),Mb=l(Zi),Ia=n(Zi,"DIV",{class:!0});var iy=s(Ia);g(jo.$$.fragment,iy),iy.forEach(t),Zi.forEach(t),qp=l(e),Ee=n(e,"DIV",{class:!0});var ec=s(Ee);g(Ho.$$.fragment,ec),Gb=l(ec),Aa=n(ec,"P",{});var Y2=s(Aa);Ca=n(Y2,"A",{href:!0});var cy=s(Ca);jb=i(cy,"FlaxLogitsWarper"),cy.forEach(t),Hb=i(Y2," that performs top-k, i.e. restricting to the k highest probability elements."),Y2.forEach(t),Rb=l(ec),Wa=n(ec,"DIV",{class:!0});var ly=s(Wa);g(Ro.$$.fragment,ly),ly.forEach(t),ec.forEach(t),Bp=l(e),Pe=n(e,"DIV",{class:!0});var rc=s(Pe);g(Ko.$$.fragment,rc),Kb=l(rc),Na=n(rc,"P",{});var X2=s(Na);Va=n(X2,"A",{href:!0});var dy=s(Va);Ub=i(dy,"FlaxLogitsProcessor"),dy.forEach(t),Yb=i(X2," that enforces the specified token as the first generated token."),X2.forEach(t),Xb=l(rc),Ma=n(rc,"DIV",{class:!0});var py=s(Ma);g(Uo.$$.fragment,py),py.forEach(t),rc.forEach(t),Ip=l(e),Fe=n(e,"DIV",{class:!0});var tc=s(Fe);g(Yo.$$.fragment,tc),Jb=l(tc),Wr=n(tc,"P",{});var yd=s(Wr);Ga=n(yd,"A",{href:!0});var my=s(Ga);Qb=i(my,"FlaxLogitsProcessor"),my.forEach(t),Zb=i(yd," that enforces the specified token as the last generated token when "),ql=n(yd,"CODE",{});var fy=s(ql);ev=i(fy,"max_length"),fy.forEach(t),rv=i(yd," is reached."),yd.forEach(t),tv=l(tc),ja=n(tc,"DIV",{class:!0});var gy=s(ja);g(Xo.$$.fragment,gy),gy.forEach(t),tc.forEach(t),Ap=l(e),De=n(e,"DIV",{class:!0});var oc=s(De);g(Jo.$$.fragment,oc),ov=l(oc),Ha=n(oc,"P",{});var J2=s(Ha);Ra=n(J2,"A",{href:!0});var uy=s(Ra);nv=i(uy,"FlaxLogitsProcessor"),uy.forEach(t),sv=i(J2," enforcing a min-length by setting EOS probability to 0."),J2.forEach(t),av=l(oc),Ka=n(oc,"DIV",{class:!0});var hy=s(Ka);g(Qo.$$.fragment,hy),hy.forEach(t),oc.forEach(t),Cp=l(e),ir=n(e,"H2",{class:!0});var Pm=s(ir);Nr=n(Pm,"A",{id:!0,class:!0,href:!0});var _y=s(Nr);Bl=n(_y,"SPAN",{});var by=s(Bl);g(Zo.$$.fragment,by),by.forEach(t),_y.forEach(t),iv=l(Pm),Il=n(Pm,"SPAN",{});var vy=s(Il);cv=i(vy,"StoppingCriteria"),vy.forEach(t),Pm.forEach(t),Wp=l(e),Vr=n(e,"P",{});var Fm=s(Vr);lv=i(Fm,"A "),Ua=n(Fm,"A",{href:!0});var $y=s(Ua);dv=i($y,"StoppingCriteria"),$y.forEach(t),pv=i(Fm," can be used to change when to stop generation (other than EOS token)."),Fm.forEach(t),Np=l(e),Se=n(e,"DIV",{class:!0});var nc=s(Se);g(en.$$.fragment,nc),mv=l(nc),Al=n(nc,"P",{});var Ty=s(Al);fv=i(Ty,"Abstract base class for all stopping criteria that can be applied during generation."),Ty.forEach(t),gv=l(nc),Ya=n(nc,"DIV",{class:!0});var yy=s(Ya);g(rn.$$.fragment,yy),yy.forEach(t),nc.forEach(t),Vp=l(e),cr=n(e,"DIV",{class:!0});var Dm=s(cr);g(tn.$$.fragment,Dm),uv=l(Dm),Xa=n(Dm,"DIV",{class:!0});var ky=s(Xa);g(on.$$.fragment,ky),ky.forEach(t),Dm.forEach(t),Mp=l(e),ze=n(e,"DIV",{class:!0});var sc=s(ze);g(nn.$$.fragment,sc),hv=l(sc),sn=n(sc,"P",{});var Sm=s(sn);_v=i(Sm,"This class can be used to stop generation whenever the full generated number of tokens exceeds "),Cl=n(Sm,"CODE",{});var xy=s(Cl);bv=i(xy,"max_length"),xy.forEach(t),vv=i(Sm,`. Keep in mind for decoder-only type of transformers, this will include the initial prompted tokens.`),Sm.forEach(t),$v=l(sc),Ja=n(sc,"DIV",{class:!0});var wy=s(Ja);g(an.$$.fragment,wy),wy.forEach(t),sc.forEach(t),Gp=l(e),Oe=n(e,"DIV",{class:!0});var ac=s(Oe);g(cn.$$.fragment,ac),Tv=l(ac),ln=n(ac,"P",{});var zm=s(ln);yv=i(zm,`This class can be used to stop generation whenever the full generation exceeds some amount of time. By default, the time will start being counted when you initialize this function. You can override this by passing an `),Wl=n(zm,"CODE",{});var Ly=s(Wl);kv=i(Ly,"initial_time"),Ly.forEach(t),xv=i(zm,"."),zm.forEach(t),wv=l(ac),Qa=n(ac,"DIV",{class:!0});var Ey=s(Qa);g(dn.$$.fragment,Ey),Ey.forEach(t),ac.forEach(t),jp=l(e),lr=n(e,"H2",{class:!0});var Om=s(lr);Mr=n(Om,"A",{id:!0,class:!0,href:!0});var Py=s(Mr);Nl=n(Py,"SPAN",{});var Fy=s(Nl);g(pn.$$.fragment,Fy),Fy.forEach(t),Py.forEach(t),Lv=l(Om),Vl=n(Om,"SPAN",{});var Dy=s(Vl);Ev=i(Dy,"Constraints"),Dy.forEach(t),Om.forEach(t),Hp=l(e),Gr=n(e,"P",{});var qm=s(Gr);Pv=i(qm,"A "),Za=n(qm,"A",{href:!0});var Sy=s(Za);Fv=i(Sy,"Constraint"),Sy.forEach(t),Dv=i(qm," can be used to force the generation to include specific tokens or sequences in the output."),qm.forEach(t),Rp=l(e),$=n(e,"DIV",{class:!0});var T=s($);g(mn.$$.fragment,T),Sv=l(T),Ml=n(T,"P",{});var zy=s(Ml);zv=i(zy,`Abstract base class for all constraints that can be applied during generation. It must define how the constraint can be satisfied.`),zy.forEach(t),Ov=l(T),Gl=n(T,"P",{});var Oy=s(Gl);qv=i(Oy,"All classes that inherit Constraint must follow the requirement that"),Oy.forEach(t),Bv=l(T),g(jr.$$.fragment,T),Iv=l(T),jl=n(T,"P",{});var qy=s(jl);Av=i(qy,"will always terminate (halt)."),qy.forEach(t),Cv=l(T),Hr=n(T,"DIV",{class:!0});var Bm=s(Hr);g(fn.$$.fragment,Bm),Wv=l(Bm),Hl=n(Bm,"P",{});var By=s(Hl);Nv=i(By,"When called, returns the token that would take this constraint one step closer to being fulfilled."),By.forEach(t),Bm.forEach(t),Vv=l(T),Rr=n(T,"DIV",{class:!0});var Im=s(Rr);g(gn.$$.fragment,Im),Mv=l(Im),Rl=n(Im,"P",{});var Iy=s(Rl);Gv=i(Iy,"Creates a new instance of this constraint."),Iy.forEach(t),Im.forEach(t),jv=l(T),Kr=n(T,"DIV",{class:!0});var Am=s(Kr);g(un.$$.fragment,Am),Hv=l(Am),Kl=n(Am,"P",{});var Ay=s(Kl);Rv=i(Ay,"Reads in a token and returns whether it creates progress."),Ay.forEach(t),Am.forEach(t),Kv=l(T),Ur=n(T,"DIV",{class:!0});var Cm=s(Ur);g(hn.$$.fragment,Cm),Uv=l(Cm),_n=n(Cm,"P",{});var Wm=s(_n);Yv=i(Wm,"Returns the number of remaining steps of "),Ul=n(Wm,"CODE",{});var Cy=s(Ul);Xv=i(Cy,"advance()"),Cy.forEach(t),Jv=i(Wm," in order to complete this constraint."),Wm.forEach(t),Cm.forEach(t),Qv=l(T),Yr=n(T,"DIV",{class:!0});var Nm=s(Yr);g(bn.$$.fragment,Nm),Zv=l(Nm),Yl=n(Nm,"P",{});var Wy=s(Yl);e1=i(Wy,`Resets the state of this constraint to its initialization. We would call this in cases where the fulfillment of a constraint is abrupted by an unwanted token.`),Wy.forEach(t),Nm.forEach(t),r1=l(T),Xr=n(T,"DIV",{class:!0});var Vm=s(Xr);g(vn.$$.fragment,Vm),t1=l(Vm),Xl=n(Vm,"P",{});var Ny=s(Xl);o1=i(Ny,"Tests whether this constraint has been properly defined."),Ny.forEach(t),Vm.forEach(t),n1=l(T),Ve=n(T,"DIV",{class:!0});var ic=s(Ve);g($n.$$.fragment,ic),s1=l(ic),Tn=n(ic,"P",{});var Mm=s(Tn);a1=i(Mm,`Reads in a token and returns booleans that indicate the progress made by it. This function will update the state of this object unlikes `),Jl=n(Mm,"CODE",{});var Vy=s(Jl);i1=i(Vy,"does_advance(self, token_id: int)"),Vy.forEach(t),c1=i(Mm,"."),Mm.forEach(t),l1=l(ic),Ql=n(ic,"P",{});var My=s(Ql);d1=i(My,`This isn\u2019t to test whether a certain token will advance the progress; it\u2019s to update its state as if it has been generated. This becomes important if token_id != desired token (refer to else statement in PhrasalConstraint)`),My.forEach(t),ic.forEach(t),T.forEach(t),Kp=l(e),dr=n(e,"DIV",{class:!0});var Gm=s(dr);g(yn.$$.fragment,Gm),p1=l(Gm),ei=n(Gm,"P",{});var Q2=s(ei);ri=n(Q2,"A",{href:!0});var Gy=s(ri);m1=i(Gy,"Constraint"),Gy.forEach(t),f1=i(Q2," enforcing that an ordered sequence of tokens is included in the output."),Q2.forEach(t),Gm.forEach(t),Up=l(e),pr=n(e,"DIV",{class:!0});var jm=s(pr);g(kn.$$.fragment,jm),g1=l(jm),xn=n(jm,"P",{});var Hm=s(xn);u1=i(Hm,"A special "),ti=n(Hm,"A",{href:!0});var jy=s(ti);h1=i(jy,"Constraint"),jy.forEach(t),_1=i(Hm," that is fulfilled by fulfilling just one of several constraints."),Hm.forEach(t),jm.forEach(t),Yp=l(e),C=n(e,"DIV",{class:!0});var tt=s(C);g(wn.$$.fragment,tt),b1=l(tt),Zl=n(tt,"P",{});var Hy=s(Zl);v1=i(Hy,"A class for beam scorers to track its progress through a list of constraints."),Hy.forEach(t),$1=l(tt),F=n(tt,"DIV",{class:!0});var j=s(F);g(Ln.$$.fragment,j),T1=l(j),ed=n(j,"P",{});var Ry=s(ed);y1=i(Ry,`The list of tokens to generate such that we can make progress. By \u201Clist\u201D we don\u2019t mean the list of token that will fully fulfill a constraint.`),Ry.forEach(t),k1=l(j),mr=n(j,"P",{});var cc=s(mr);x1=i(cc,"Given constraints "),rd=n(cc,"CODE",{});var Ky=s(rd);w1=i(Ky,"c_i = {t_ij | j == # of tokens}"),Ky.forEach(t),L1=i(cc,`, If we\u2019re not in the middle of progressing through a specific constraint `),td=n(cc,"CODE",{});var Uy=s(td);E1=i(Uy,"c_i"),Uy.forEach(t),P1=i(cc,", we return:"),cc.forEach(t),F1=l(j),od=n(j,"P",{});var Yy=s(od);nd=n(Yy,"CODE",{});var Xy=s(nd);D1=i(Xy,"[t_k1 for k in indices of unfulfilled constraints]"),Xy.forEach(t),Yy.forEach(t),S1=l(j),qe=n(j,"P",{});var ot=s(qe);z1=i(ot,`If we are in the middle of a constraint, then we return: `),sd=n(ot,"CODE",{});var Jy=s(sd);O1=i(Jy,"[t_ij]"),Jy.forEach(t),q1=i(ot,", where "),ad=n(ot,"CODE",{});var Qy=s(ad);B1=i(Qy,"i"),Qy.forEach(t),I1=i(ot," is the index of the inprogress constraint, "),id=n(ot,"CODE",{});var Zy=s(id);A1=i(Zy,"j"),Zy.forEach(t),C1=i(ot," is the next step for the constraint."),ot.forEach(t),W1=l(j),cd=n(j,"P",{});var e0=s(cd);N1=i(e0,`Though we don\u2019t care which constraint is fulfilled first, if we are in the progress of fulfilling a constraint, that\u2019s the only one we\u2019ll return.`),e0.forEach(t),j.forEach(t),V1=l(tt),Jr=n(tt,"DIV",{class:!0});var Rm=s(Jr);g(En.$$.fragment,Rm),M1=l(Rm),ld=n(Rm,"P",{});var r0=s(ld);G1=i(r0,"token_ids: the tokens generated thus far to reset the state of the progress through constraints."),r0.forEach(t),Rm.forEach(t),tt.forEach(t),Xp=l(e),fr=n(e,"H2",{class:!0});var Km=s(fr);Qr=n(Km,"A",{id:!0,class:!0,href:!0});var t0=s(Qr);dd=n(t0,"SPAN",{});var o0=s(dd);g(Pn.$$.fragment,o0),o0.forEach(t),t0.forEach(t),j1=l(Km),pd=n(Km,"SPAN",{});var n0=s(pd);H1=i(n0,"BeamSearch"),n0.forEach(t),Km.forEach(t),Jp=l(e),W=n(e,"DIV",{class:!0});var nt=s(W);g(Fn.$$.fragment,nt),R1=l(nt),gr=n(nt,"P",{});var lc=s(gr);K1=i(lc,"Abstract base class for all beam scorers that are used for "),oi=n(lc,"A",{href:!0});var s0=s(oi);U1=i(s0,"beam_search()"),s0.forEach(t),Y1=i(lc,` and `),ni=n(lc,"A",{href:!0});var a0=s(ni);X1=i(a0,"beam_sample()"),a0.forEach(t),J1=i(lc,"."),lc.forEach(t),Q1=l(nt),si=n(nt,"DIV",{class:!0});var i0=s(si);g(Dn.$$.fragment,i0),i0.forEach(t),Z1=l(nt),ai=n(nt,"DIV",{class:!0});var c0=s(ai);g(Sn.$$.fragment,c0),c0.forEach(t),nt.forEach(t),Qp=l(e),P=n(e,"DIV",{class:!0});var H=s(P);g(zn.$$.fragment,H),e2=l(H),ii=n(H,"P",{});var Z2=s(ii);ci=n(Z2,"A",{href:!0});var l0=s(ci);r2=i(l0,"BeamScorer"),l0.forEach(t),t2=i(Z2," implementing standard beam search decoding."),Z2.forEach(t),o2=l(H),On=n(H,"P",{});var Um=s(On);n2=i(Um,"Adapted in part from "),qn=n(Um,"A",{href:!0,rel:!0});var d0=s(qn);s2=i(d0,`Facebook\u2019s XLM beam search code`),d0.forEach(t),a2=i(Um,"."),Um.forEach(t),i2=l(H),li=n(H,"P",{});var e$=s(li);c2=i(e$,"Reference for the diverse beam search algorithm and implementation "),Bn=n(e$,"A",{href:!0,rel:!0});var p0=s(Bn);l2=i(p0,`Ashwin Kalyan\u2019s DBS implementation`),p0.forEach(t),e$.forEach(t),d2=l(H),di=n(H,"DIV",{class:!0});var m0=s(di);g(In.$$.fragment,m0),m0.forEach(t),p2=l(H),pi=n(H,"DIV",{class:!0});var f0=s(pi);g(An.$$.fragment,f0),f0.forEach(t),H.forEach(t),Zp=l(e),N=n(e,"DIV",{class:!0});var st=s(N);g(Cn.$$.fragment,st),m2=l(st),mi=n(st,"P",{});var r$=s(mi);fi=n(r$,"A",{href:!0});var g0=s(fi);f2=i(g0,"BeamScorer"),g0.forEach(t),g2=i(r$," implementing constrained beam search decoding."),r$.forEach(t),u2=l(st),gi=n(st,"DIV",{class:!0});var u0=s(gi);g(Wn.$$.fragment,u0),u0.forEach(t),h2=l(st),ui=n(st,"DIV",{class:!0});var h0=s(ui);g(Nn.$$.fragment,h0),h0.forEach(t),st.forEach(t),em=l(e),ur=n(e,"H2",{class:!0});var Ym=s(ur);Zr=n(Ym,"A",{id:!0,class:!0,href:!0});var _0=s(Zr);md=n(_0,"SPAN",{});var b0=s(md);g(Vn.$$.fragment,b0),b0.forEach(t),_0.forEach(t),_2=l(Ym),fd=n(Ym,"SPAN",{});var v0=s(fd);b2=i(v0,"Utilities"),v0.forEach(t),Ym.forEach(t),rm=l(e),Be=n(e,"DIV",{class:!0});var dc=s(Be);g(Mn.$$.fragment,dc),v2=l(dc),gd=n(dc,"P",{});var $0=s(gd);$2=i($0,"Filter a distribution of logits using top-k and/or nucleus (top-p) filtering"),$0.forEach(t),T2=l(dc),hi=n(dc,"P",{});var t$=s(hi);y2=i(t$,"From: "),Gn=n(t$,"A",{href:!0,rel:!0});var T0=s(Gn);k2=i(T0,"https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317"),T0.forEach(t),t$.forEach(t),dc.forEach(t),tm=l(e),Ie=n(e,"DIV",{class:!0});var pc=s(Ie);g(jn.$$.fragment,pc),x2=l(pc),ud=n(pc,"P",{});var y0=s(ud);w2=i(y0,"Filter a distribution of logits using top-k and/or nucleus (top-p) filtering"),y0.forEach(t),L2=l(pc),_i=n(pc,"P",{});var o$=s(_i);E2=i(o$,"From: "),Hn=n(o$,"A",{href:!0,rel:!0});var k0=s(Hn);P2=i(k0,"https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317"),k0.forEach(t),o$.forEach(t),pc.forEach(t),this.h()},h(){d(w,"name","hf:doc:metadata"),d(w,"content",JSON.stringify(O0)),d(Ce,"id","utilities-for-generation"),d(Ce,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Ce,"href","#utilities-for-generation"),d(k,"class","relative group"),d(Un,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate"),d(Yn,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.greedy_search"),d(Xn,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.sample"),d(Jn,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_search"),d(Qn,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_sample"),d(Zn,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.group_beam_search"),d(es,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.constrained_beam_search"),d(_r,"id","generate-outputs"),d(_r,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(_r,"href","#generate-outputs"),d(Ke,"class","relative group"),d(ts,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate"),d(os,"href","/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput"),d(ns,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate"),d(as,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.GreedySearchDecoderOnlyOutput"),d(vr,"id","transformers.generation_utils.GreedySearchDecoderOnlyOutput"),d(vr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(vr,"href","#transformers.generation_utils.GreedySearchDecoderOnlyOutput"),d(Ue,"class","relative group"),d(Ye,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Xe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d($r,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(R,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Tr,"id","transformers.generation_utils.SampleDecoderOnlyOutput"),d(Tr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Tr,"href","#transformers.generation_utils.SampleDecoderOnlyOutput"),d(Je,"class","relative group"),d(Qe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Ze,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(yr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(K,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(kr,"id","transformers.generation_utils.BeamSearchDecoderOnlyOutput"),d(kr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(kr,"href","#transformers.generation_utils.BeamSearchDecoderOnlyOutput"),d(er,"class","relative group"),d(rr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(tr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(xr,"id","transformers.generation_utils.BeamSampleDecoderOnlyOutput"),d(xr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(xr,"href","#transformers.generation_utils.BeamSampleDecoderOnlyOutput"),d(or,"class","relative group"),d(nr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(sr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(wr,"id","transformers.LogitsProcessor"),d(wr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(wr,"href","#transformers.LogitsProcessor"),d(ar,"class","relative group"),d(ms,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor"),d(Er,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(U,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(fs,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor"),d(gs,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsWarper"),d(us,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor"),d(hs,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsWarper"),d(_s,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Y,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Pr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(X,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(vs,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor"),d($s,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(J,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(ys,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsWarper"),d(ks,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(ws,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor"),d(Ls,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Z,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Ps,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsWarper"),d(Fs,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(ee,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Ss,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsWarper"),d(zs,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(re,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Os,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsWarper"),d(jt,"href","https://arxiv.org/abs/2202.00666"),d(jt,"rel","nofollow"),d(qs,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(te,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Bs,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor"),d(Kt,"href","https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345"),d(Kt,"rel","nofollow"),d(Is,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(oe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Cs,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor"),d(Ws,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(ne,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Ns,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor"),d(Qt,"href","https://arxiv.org/abs/2010.00904"),d(Qt,"rel","nofollow"),d(Vs,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(se,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Ms,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor"),d(Gs,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.group_beam_search"),d(ro,"href","https://arxiv.org/pdf/1610.02424.pdf"),d(ro,"rel","nofollow"),d(js,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(ae,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Rs,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor"),d(Ks,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(ie,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Us,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor"),d(Ys,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(ce,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Xs,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor"),d(Js,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(le,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Or,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(de,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Qs,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.TFLogitsProcessor"),d(Zs,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.TFLogitsProcessor"),d(ea,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(pe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(qr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(me,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(ta,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.TFLogitsWarper"),d(oa,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(fe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(sa,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.TFLogitsWarper"),d(aa,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(ge,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(ca,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.TFLogitsWarper"),d(la,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(ue,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(pa,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.TFLogitsProcessor"),d(ma,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(he,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(ga,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.TFLogitsProcessor"),d(ua,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(_e,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(ha,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.TFLogitsProcessor"),d(Eo,"href","https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345"),d(Eo,"rel","nofollow"),d(_a,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(be,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(va,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.TFLogitsProcessor"),d($a,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(ve,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(ya,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.TFLogitsProcessor"),d(ka,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d($e,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(xa,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.TFLogitsProcessor"),d(wa,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Te,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Ar,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(ye,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(La,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.FlaxLogitsProcessor"),d(Ea,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.FlaxLogitsWarper"),d(Pa,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.FlaxLogitsProcessor"),d(Fa,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.FlaxLogitsWarper"),d(Da,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(ke,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Cr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(xe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(za,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.FlaxLogitsWarper"),d(Oa,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(we,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Ba,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.FlaxLogitsWarper"),d(Ia,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Le,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Ca,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.FlaxLogitsWarper"),d(Wa,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Ee,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Va,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.FlaxLogitsProcessor"),d(Ma,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Pe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Ga,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.FlaxLogitsProcessor"),d(ja,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Fe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Ra,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.FlaxLogitsProcessor"),d(Ka,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(De,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Nr,"id","transformers.StoppingCriteria"),d(Nr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Nr,"href","#transformers.StoppingCriteria"),d(ir,"class","relative group"),d(Ua,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.StoppingCriteria"),d(Ya,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Se,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Xa,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(cr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Ja,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(ze,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Qa,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Oe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Mr,"id","transformers.Constraint"),d(Mr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Mr,"href","#transformers.Constraint"),d(lr,"class","relative group"),d(Za,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.Constraint"),d(Hr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Rr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Kr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Ur,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Yr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Xr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Ve,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d($,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(ri,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.Constraint"),d(dr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(ti,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.Constraint"),d(pr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(F,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Jr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(C,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Qr,"id","transformers.BeamScorer"),d(Qr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Qr,"href","#transformers.BeamScorer"),d(fr,"class","relative group"),d(oi,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_search"),d(ni,"href","/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_sample"),d(si,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(ai,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(W,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(ci,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.BeamScorer"),d(qn,"href","https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529"),d(qn,"rel","nofollow"),d(Bn,"href","https://github.com/ashwinkalyan/dbs/blob/master/dbs/beam_utils.lua"),d(Bn,"rel","nofollow"),d(di,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(pi,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(P,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(fi,"href","/docs/transformers/pr_19429/en/internal/generation_utils#transformers.BeamScorer"),d(gi,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(ui,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(N,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Zr,"id","transformers.top_k_top_p_filtering"),d(Zr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Zr,"href","#transformers.top_k_top_p_filtering"),d(ur,"class","relative group"),d(Gn,"href","https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317"),d(Gn,"rel","nofollow"),d(Be,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),d(Hn,"href","https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317"),d(Hn,"rel","nofollow"),d(Ie,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,p){r(document.head,w),m(e,Re,p),m(e,k,p),r(k,Ce),r(Ce,mc),u(at,mc,null),r(k,Xm),r(k,fc),r(fc,Jm),m(e,xd,p),m(e,y,p),r(y,Qm),r(y,Un),r(Un,Zm),r(y,ef),r(y,Yn),r(Yn,rf),r(y,tf),r(y,Xn),r(Xn,of),r(y,nf),r(y,Jn),r(Jn,sf),r(y,af),r(y,Qn),r(Qn,cf),r(y,lf),r(y,Zn),r(Zn,df),r(y,pf),r(y,es),r(es,mf),r(y,ff),m(e,wd,p),m(e,rs,p),r(rs,gf),m(e,Ld,p),m(e,Ke,p),r(Ke,_r),r(_r,gc),u(it,gc,null),r(Ke,uf),r(Ke,uc),r(uc,hf),m(e,Ed,p),m(e,V,p),r(V,_f),r(V,ts),r(ts,bf),r(V,vf),r(V,os),r(os,$f),r(V,Tf),r(V,ns),r(ns,yf),r(V,kf),m(e,Pd,p),m(e,ss,p),r(ss,xf),m(e,Fd,p),u(ct,e,p),m(e,Dd,p),m(e,We,p),r(We,wf),r(We,hc),r(hc,Lf),r(We,Ef),r(We,as),r(as,Pf),r(We,Ff),m(e,Sd,p),m(e,M,p),r(M,is),r(is,_c),r(_c,Df),r(is,Sf),r(M,zf),r(M,cs),r(cs,bc),r(bc,Of),r(cs,qf),r(M,Bf),r(M,ls),r(ls,vc),r(vc,If),r(ls,Af),r(M,Cf),r(M,ds),r(ds,$c),r($c,Wf),r(ds,Nf),m(e,zd,p),m(e,x,p),r(x,Vf),r(x,Tc),r(Tc,Mf),r(x,Gf),r(x,yc),r(yc,jf),r(x,Hf),r(x,kc),r(kc,Rf),r(x,Kf),r(x,xc),r(xc,Uf),r(x,Yf),r(x,wc),r(wc,Xf),r(x,Jf),r(x,Lc),r(Lc,Qf),r(x,Zf),m(e,Od,p),m(e,S,p),r(S,eg),r(S,Ec),r(Ec,rg),r(S,tg),r(S,Pc),r(Pc,og),r(S,ng),r(S,Fc),r(Fc,sg),r(S,ag),r(S,Dc),r(Dc,ig),r(S,cg),m(e,qd,p),m(e,z,p),r(z,lg),r(z,Sc),r(Sc,dg),r(z,pg),r(z,zc),r(zc,mg),r(z,fg),r(z,Oc),r(Oc,gg),r(z,ug),r(z,qc),r(qc,hg),r(z,_g),m(e,Bd,p),u(lt,e,p),m(e,Id,p),m(e,br,p),r(br,bg),r(br,Bc),r(Bc,vg),r(br,$g),m(e,Ad,p),m(e,O,p),r(O,Tg),r(O,Ic),r(Ic,yg),r(O,kg),r(O,Ac),r(Ac,xg),r(O,wg),r(O,Cc),r(Cc,Lg),r(O,Eg),r(O,Wc),r(Wc,Pg),r(O,Fg),m(e,Cd,p),m(e,ps,p),r(ps,Dg),m(e,Wd,p),m(e,Ue,p),r(Ue,vr),r(vr,Nc),u(dt,Nc,null),r(Ue,Sg),r(Ue,Vc),r(Vc,zg),m(e,Nd,p),m(e,Ye,p),u(pt,Ye,null),r(Ye,Og),r(Ye,Mc),r(Mc,qg),m(e,Vd,p),m(e,Xe,p),u(mt,Xe,null),r(Xe,Bg),r(Xe,Gc),r(Gc,Ig),m(e,Md,p),m(e,R,p),u(ft,R,null),r(R,Ag),r(R,jc),r(jc,Cg),r(R,Wg),r(R,$r),u(gt,$r,null),r($r,Ng),r($r,Hc),r(Hc,Vg),m(e,Gd,p),m(e,Je,p),r(Je,Tr),r(Tr,Rc),u(ut,Rc,null),r(Je,Mg),r(Je,Kc),r(Kc,Gg),m(e,jd,p),m(e,Qe,p),u(ht,Qe,null),r(Qe,jg),r(Qe,Uc),r(Uc,Hg),m(e,Hd,p),m(e,Ze,p),u(_t,Ze,null),r(Ze,Rg),r(Ze,Yc),r(Yc,Kg),m(e,Rd,p),m(e,K,p),u(bt,K,null),r(K,Ug),r(K,Xc),r(Xc,Yg),r(K,Xg),r(K,yr),u(vt,yr,null),r(yr,Jg),r(yr,Jc),r(Jc,Qg),m(e,Kd,p),m(e,er,p),r(er,kr),r(kr,Qc),u($t,Qc,null),r(er,Zg),r(er,Zc),r(Zc,eu),m(e,Ud,p),m(e,rr,p),u(Tt,rr,null),r(rr,ru),r(rr,el),r(el,tu),m(e,Yd,p),m(e,tr,p),u(yt,tr,null),r(tr,ou),r(tr,rl),r(rl,nu),m(e,Xd,p),m(e,or,p),r(or,xr),r(xr,tl),u(kt,tl,null),r(or,su),r(or,ol),r(ol,au),m(e,Jd,p),m(e,nr,p),u(xt,nr,null),r(nr,iu),r(nr,nl),r(nl,cu),m(e,Qd,p),m(e,sr,p),u(wt,sr,null),r(sr,lu),r(sr,sl),r(sl,du),m(e,Zd,p),m(e,ar,p),r(ar,wr),r(wr,al),u(Lt,al,null),r(ar,pu),r(ar,il),r(il,mu),m(e,ep,p),m(e,Lr,p),r(Lr,fu),r(Lr,ms),r(ms,gu),r(Lr,uu),m(e,rp,p),m(e,U,p),u(Et,U,null),r(U,hu),r(U,cl),r(cl,_u),r(U,bu),r(U,Er),u(Pt,Er,null),r(Er,vu),r(Er,ll),r(ll,$u),m(e,tp,p),m(e,Y,p),u(Ft,Y,null),r(Y,Tu),r(Y,L),r(L,yu),r(L,fs),r(fs,ku),r(L,xu),r(L,gs),r(gs,wu),r(L,Lu),r(L,dl),r(dl,Eu),r(L,Pu),r(L,pl),r(pl,ml),r(ml,Fu),r(L,Du),r(L,us),r(us,Su),r(L,zu),r(L,hs),r(hs,Ou),r(L,qu),r(Y,Bu),r(Y,_s),u(Dt,_s,null),m(e,op,p),m(e,X,p),u(St,X,null),r(X,Iu),r(X,fl),r(fl,Au),r(X,Cu),r(X,Pr),u(zt,Pr,null),r(Pr,Wu),r(Pr,gl),r(gl,Nu),m(e,np,p),m(e,J,p),u(Ot,J,null),r(J,Vu),r(J,bs),r(bs,vs),r(vs,Mu),r(bs,Gu),r(J,ju),r(J,$s),u(qt,$s,null),m(e,sp,p),m(e,Q,p),u(Bt,Q,null),r(Q,Hu),r(Q,Ts),r(Ts,ys),r(ys,Ru),r(Ts,Ku),r(Q,Uu),r(Q,ks),u(It,ks,null),m(e,ap,p),m(e,Z,p),u(At,Z,null),r(Z,Yu),r(Z,xs),r(xs,ws),r(ws,Xu),r(xs,Ju),r(Z,Qu),r(Z,Ls),u(Ct,Ls,null),m(e,ip,p),m(e,ee,p),u(Wt,ee,null),r(ee,Zu),r(ee,Es),r(Es,Ps),r(Ps,eh),r(Es,rh),r(ee,th),r(ee,Fs),u(Nt,Fs,null),m(e,cp,p),m(e,re,p),u(Vt,re,null),r(re,oh),r(re,Ds),r(Ds,Ss),r(Ss,nh),r(Ds,sh),r(re,ah),r(re,zs),u(Mt,zs,null),m(e,lp,p),m(e,te,p),u(Gt,te,null),r(te,ih),r(te,Fr),r(Fr,Os),r(Os,ch),r(Fr,lh),r(Fr,jt),r(jt,dh),r(Fr,ph),r(te,mh),r(te,qs),u(Ht,qs,null),m(e,dp,p),m(e,oe,p),u(Rt,oe,null),r(oe,fh),r(oe,Dr),r(Dr,Bs),r(Bs,gh),r(Dr,uh),r(Dr,Kt),r(Kt,hh),r(Dr,_h),r(oe,bh),r(oe,Is),u(Ut,Is,null),m(e,pp,p),m(e,ne,p),u(Yt,ne,null),r(ne,vh),r(ne,As),r(As,Cs),r(Cs,$h),r(As,Th),r(ne,yh),r(ne,Ws),u(Xt,Ws,null),m(e,mp,p),m(e,se,p),u(Jt,se,null),r(se,kh),r(se,Sr),r(Sr,Ns),r(Ns,xh),r(Sr,wh),r(Sr,Qt),r(Qt,Lh),r(Sr,Eh),r(se,Ph),r(se,Vs),u(Zt,Vs,null),m(e,fp,p),m(e,ae,p),u(eo,ae,null),r(ae,Fh),r(ae,Ne),r(Ne,Ms),r(Ms,Dh),r(Ne,Sh),r(Ne,Gs),r(Gs,zh),r(Ne,Oh),r(Ne,ro),r(ro,qh),r(Ne,Bh),r(ae,Ih),r(ae,js),u(to,js,null),m(e,gp,p),m(e,ie,p),u(oo,ie,null),r(ie,Ah),r(ie,Hs),r(Hs,Rs),r(Rs,Ch),r(Hs,Wh),r(ie,Nh),r(ie,Ks),u(no,Ks,null),m(e,up,p),m(e,ce,p),u(so,ce,null),r(ce,Vh),r(ce,zr),r(zr,Us),r(Us,Mh),r(zr,Gh),r(zr,ul),r(ul,jh),r(zr,Hh),r(ce,Rh),r(ce,Ys),u(ao,Ys,null),m(e,hp,p),m(e,le,p),u(io,le,null),r(le,Kh),r(le,G),r(G,Xs),r(Xs,Uh),r(G,Yh),r(G,hl),r(hl,Xh),r(G,Jh),r(G,_l),r(_l,Qh),r(G,Zh),r(G,bl),r(bl,e_),r(G,r_),r(le,t_),r(le,Js),u(co,Js,null),m(e,_p,p),m(e,de,p),u(lo,de,null),r(de,o_),r(de,vl),r(vl,n_),r(de,s_),r(de,Or),u(po,Or,null),r(Or,a_),r(Or,$l),r($l,i_),m(e,bp,p),m(e,pe,p),u(mo,pe,null),r(pe,c_),r(pe,A),r(A,l_),r(A,Qs),r(Qs,d_),r(A,p_),r(A,Tl),r(Tl,m_),r(A,f_),r(A,yl),r(yl,kl),r(kl,g_),r(A,u_),r(A,Zs),r(Zs,h_),r(A,__),r(pe,b_),r(pe,ea),u(fo,ea,null),m(e,vp,p),m(e,me,p),u(go,me,null),r(me,v_),r(me,xl),r(xl,$_),r(me,T_),r(me,qr),u(uo,qr,null),r(qr,y_),r(qr,wl),r(wl,k_),m(e,$p,p),m(e,fe,p),u(ho,fe,null),r(fe,x_),r(fe,ra),r(ra,ta),r(ta,w_),r(ra,L_),r(fe,E_),r(fe,oa),u(_o,oa,null),m(e,Tp,p),m(e,ge,p),u(bo,ge,null),r(ge,P_),r(ge,na),r(na,sa),r(sa,F_),r(na,D_),r(ge,S_),r(ge,aa),u(vo,aa,null),m(e,yp,p),m(e,ue,p),u($o,ue,null),r(ue,z_),r(ue,ia),r(ia,ca),r(ca,O_),r(ia,q_),r(ue,B_),r(ue,la),u(To,la,null),m(e,kp,p),m(e,he,p),u(yo,he,null),r(he,I_),r(he,da),r(da,pa),r(pa,A_),r(da,C_),r(he,W_),r(he,ma),u(ko,ma,null),m(e,xp,p),m(e,_e,p),u(xo,_e,null),r(_e,N_),r(_e,fa),r(fa,ga),r(ga,V_),r(fa,M_),r(_e,G_),r(_e,ua),u(wo,ua,null),m(e,wp,p),m(e,be,p),u(Lo,be,null),r(be,j_),r(be,Br),r(Br,ha),r(ha,H_),r(Br,R_),r(Br,Eo),r(Eo,K_),r(Br,U_),r(be,Y_),r(be,_a),u(Po,_a,null),m(e,Lp,p),m(e,ve,p),u(Fo,ve,null),r(ve,X_),r(ve,ba),r(ba,va),r(va,J_),r(ba,Q_),r(ve,Z_),r(ve,$a),u(Do,$a,null),m(e,Ep,p),m(e,$e,p),u(So,$e,null),r($e,eb),r($e,Ta),r(Ta,ya),r(ya,rb),r(Ta,tb),r($e,ob),r($e,ka),u(zo,ka,null),m(e,Pp,p),m(e,Te,p),u(Oo,Te,null),r(Te,nb),r(Te,Ir),r(Ir,xa),r(xa,sb),r(Ir,ab),r(Ir,Ll),r(Ll,ib),r(Ir,cb),r(Te,lb),r(Te,wa),u(qo,wa,null),m(e,Fp,p),m(e,ye,p),u(Bo,ye,null),r(ye,db),r(ye,El),r(El,pb),r(ye,mb),r(ye,Ar),u(Io,Ar,null),r(Ar,fb),r(Ar,Pl),r(Pl,gb),m(e,Dp,p),m(e,ke,p),u(Ao,ke,null),r(ke,ub),r(ke,E),r(E,hb),r(E,La),r(La,_b),r(E,bb),r(E,Ea),r(Ea,vb),r(E,$b),r(E,Fl),r(Fl,Tb),r(E,yb),r(E,Dl),r(Dl,Sl),r(Sl,kb),r(E,xb),r(E,Pa),r(Pa,wb),r(E,Lb),r(E,Fa),r(Fa,Eb),r(E,Pb),r(ke,Fb),r(ke,Da),u(Co,Da,null),m(e,Sp,p),m(e,xe,p),u(Wo,xe,null),r(xe,Db),r(xe,zl),r(zl,Sb),r(xe,zb),r(xe,Cr),u(No,Cr,null),r(Cr,Ob),r(Cr,Ol),r(Ol,qb),m(e,zp,p),m(e,we,p),u(Vo,we,null),r(we,Bb),r(we,Sa),r(Sa,za),r(za,Ib),r(Sa,Ab),r(we,Cb),r(we,Oa),u(Mo,Oa,null),m(e,Op,p),m(e,Le,p),u(Go,Le,null),r(Le,Wb),r(Le,qa),r(qa,Ba),r(Ba,Nb),r(qa,Vb),r(Le,Mb),r(Le,Ia),u(jo,Ia,null),m(e,qp,p),m(e,Ee,p),u(Ho,Ee,null),r(Ee,Gb),r(Ee,Aa),r(Aa,Ca),r(Ca,jb),r(Aa,Hb),r(Ee,Rb),r(Ee,Wa),u(Ro,Wa,null),m(e,Bp,p),m(e,Pe,p),u(Ko,Pe,null),r(Pe,Kb),r(Pe,Na),r(Na,Va),r(Va,Ub),r(Na,Yb),r(Pe,Xb),r(Pe,Ma),u(Uo,Ma,null),m(e,Ip,p),m(e,Fe,p),u(Yo,Fe,null),r(Fe,Jb),r(Fe,Wr),r(Wr,Ga),r(Ga,Qb),r(Wr,Zb),r(Wr,ql),r(ql,ev),r(Wr,rv),r(Fe,tv),r(Fe,ja),u(Xo,ja,null),m(e,Ap,p),m(e,De,p),u(Jo,De,null),r(De,ov),r(De,Ha),r(Ha,Ra),r(Ra,nv),r(Ha,sv),r(De,av),r(De,Ka),u(Qo,Ka,null),m(e,Cp,p),m(e,ir,p),r(ir,Nr),r(Nr,Bl),u(Zo,Bl,null),r(ir,iv),r(ir,Il),r(Il,cv),m(e,Wp,p),m(e,Vr,p),r(Vr,lv),r(Vr,Ua),r(Ua,dv),r(Vr,pv),m(e,Np,p),m(e,Se,p),u(en,Se,null),r(Se,mv),r(Se,Al),r(Al,fv),r(Se,gv),r(Se,Ya),u(rn,Ya,null),m(e,Vp,p),m(e,cr,p),u(tn,cr,null),r(cr,uv),r(cr,Xa),u(on,Xa,null),m(e,Mp,p),m(e,ze,p),u(nn,ze,null),r(ze,hv),r(ze,sn),r(sn,_v),r(sn,Cl),r(Cl,bv),r(sn,vv),r(ze,$v),r(ze,Ja),u(an,Ja,null),m(e,Gp,p),m(e,Oe,p),u(cn,Oe,null),r(Oe,Tv),r(Oe,ln),r(ln,yv),r(ln,Wl),r(Wl,kv),r(ln,xv),r(Oe,wv),r(Oe,Qa),u(dn,Qa,null),m(e,jp,p),m(e,lr,p),r(lr,Mr),r(Mr,Nl),u(pn,Nl,null),r(lr,Lv),r(lr,Vl),r(Vl,Ev),m(e,Hp,p),m(e,Gr,p),r(Gr,Pv),r(Gr,Za),r(Za,Fv),r(Gr,Dv),m(e,Rp,p),m(e,$,p),u(mn,$,null),r($,Sv),r($,Ml),r(Ml,zv),r($,Ov),r($,Gl),r(Gl,qv),r($,Bv),u(jr,$,null),r($,Iv),r($,jl),r(jl,Av),r($,Cv),r($,Hr),u(fn,Hr,null),r(Hr,Wv),r(Hr,Hl),r(Hl,Nv),r($,Vv),r($,Rr),u(gn,Rr,null),r(Rr,Mv),r(Rr,Rl),r(Rl,Gv),r($,jv),r($,Kr),u(un,Kr,null),r(Kr,Hv),r(Kr,Kl),r(Kl,Rv),r($,Kv),r($,Ur),u(hn,Ur,null),r(Ur,Uv),r(Ur,_n),r(_n,Yv),r(_n,Ul),r(Ul,Xv),r(_n,Jv),r($,Qv),r($,Yr),u(bn,Yr,null),r(Yr,Zv),r(Yr,Yl),r(Yl,e1),r($,r1),r($,Xr),u(vn,Xr,null),r(Xr,t1),r(Xr,Xl),r(Xl,o1),r($,n1),r($,Ve),u($n,Ve,null),r(Ve,s1),r(Ve,Tn),r(Tn,a1),r(Tn,Jl),r(Jl,i1),r(Tn,c1),r(Ve,l1),r(Ve,Ql),r(Ql,d1),m(e,Kp,p),m(e,dr,p),u(yn,dr,null),r(dr,p1),r(dr,ei),r(ei,ri),r(ri,m1),r(ei,f1),m(e,Up,p),m(e,pr,p),u(kn,pr,null),r(pr,g1),r(pr,xn),r(xn,u1),r(xn,ti),r(ti,h1),r(xn,_1),m(e,Yp,p),m(e,C,p),u(wn,C,null),r(C,b1),r(C,Zl),r(Zl,v1),r(C,$1),r(C,F),u(Ln,F,null),r(F,T1),r(F,ed),r(ed,y1),r(F,k1),r(F,mr),r(mr,x1),r(mr,rd),r(rd,w1),r(mr,L1),r(mr,td),r(td,E1),r(mr,P1),r(F,F1),r(F,od),r(od,nd),r(nd,D1),r(F,S1),r(F,qe),r(qe,z1),r(qe,sd),r(sd,O1),r(qe,q1),r(qe,ad),r(ad,B1),r(qe,I1),r(qe,id),r(id,A1),r(qe,C1),r(F,W1),r(F,cd),r(cd,N1),r(C,V1),r(C,Jr),u(En,Jr,null),r(Jr,M1),r(Jr,ld),r(ld,G1),m(e,Xp,p),m(e,fr,p),r(fr,Qr),r(Qr,dd),u(Pn,dd,null),r(fr,j1),r(fr,pd),r(pd,H1),m(e,Jp,p),m(e,W,p),u(Fn,W,null),r(W,R1),r(W,gr),r(gr,K1),r(gr,oi),r(oi,U1),r(gr,Y1),r(gr,ni),r(ni,X1),r(gr,J1),r(W,Q1),r(W,si),u(Dn,si,null),r(W,Z1),r(W,ai),u(Sn,ai,null),m(e,Qp,p),m(e,P,p),u(zn,P,null),r(P,e2),r(P,ii),r(ii,ci),r(ci,r2),r(ii,t2),r(P,o2),r(P,On),r(On,n2),r(On,qn),r(qn,s2),r(On,a2),r(P,i2),r(P,li),r(li,c2),r(li,Bn),r(Bn,l2),r(P,d2),r(P,di),u(In,di,null),r(P,p2),r(P,pi),u(An,pi,null),m(e,Zp,p),m(e,N,p),u(Cn,N,null),r(N,m2),r(N,mi),r(mi,fi),r(fi,f2),r(mi,g2),r(N,u2),r(N,gi),u(Wn,gi,null),r(N,h2),r(N,ui),u(Nn,ui,null),m(e,em,p),m(e,ur,p),r(ur,Zr),r(Zr,md),u(Vn,md,null),r(ur,_2),r(ur,fd),r(fd,b2),m(e,rm,p),m(e,Be,p),u(Mn,Be,null),r(Be,v2),r(Be,gd),r(gd,$2),r(Be,T2),r(Be,hi),r(hi,y2),r(hi,Gn),r(Gn,k2),m(e,tm,p),m(e,Ie,p),u(jn,Ie,null),r(Ie,x2),r(Ie,ud),r(ud,w2),r(Ie,L2),r(Ie,_i),r(_i,E2),r(_i,Hn),r(Hn,P2),om=!0},p(e,[p]){const Rn={};p&2&&(Rn.$$scope={dirty:p,ctx:e}),jr.$set(Rn)},i(e){om||(h(at.$$.fragment,e),h(it.$$.fragment,e),h(ct.$$.fragment,e),h(lt.$$.fragment,e),h(dt.$$.fragment,e),h(pt.$$.fragment,e),h(mt.$$.fragment,e),h(ft.$$.fragment,e),h(gt.$$.fragment,e),h(ut.$$.fragment,e),h(ht.$$.fragment,e),h(_t.$$.fragment,e),h(bt.$$.fragment,e),h(vt.$$.fragment,e),h($t.$$.fragment,e),h(Tt.$$.fragment,e),h(yt.$$.fragment,e),h(kt.$$.fragment,e),h(xt.$$.fragment,e),h(wt.$$.fragment,e),h(Lt.$$.fragment,e),h(Et.$$.fragment,e),h(Pt.$$.fragment,e),h(Ft.$$.fragment,e),h(Dt.$$.fragment,e),h(St.$$.fragment,e),h(zt.$$.fragment,e),h(Ot.$$.fragment,e),h(qt.$$.fragment,e),h(Bt.$$.fragment,e),h(It.$$.fragment,e),h(At.$$.fragment,e),h(Ct.$$.fragment,e),h(Wt.$$.fragment,e),h(Nt.$$.fragment,e),h(Vt.$$.fragment,e),h(Mt.$$.fragment,e),h(Gt.$$.fragment,e),h(Ht.$$.fragment,e),h(Rt.$$.fragment,e),h(Ut.$$.fragment,e),h(Yt.$$.fragment,e),h(Xt.$$.fragment,e),h(Jt.$$.fragment,e),h(Zt.$$.fragment,e),h(eo.$$.fragment,e),h(to.$$.fragment,e),h(oo.$$.fragment,e),h(no.$$.fragment,e),h(so.$$.fragment,e),h(ao.$$.fragment,e),h(io.$$.fragment,e),h(co.$$.fragment,e),h(lo.$$.fragment,e),h(po.$$.fragment,e),h(mo.$$.fragment,e),h(fo.$$.fragment,e),h(go.$$.fragment,e),h(uo.$$.fragment,e),h(ho.$$.fragment,e),h(_o.$$.fragment,e),h(bo.$$.fragment,e),h(vo.$$.fragment,e),h($o.$$.fragment,e),h(To.$$.fragment,e),h(yo.$$.fragment,e),h(ko.$$.fragment,e),h(xo.$$.fragment,e),h(wo.$$.fragment,e),h(Lo.$$.fragment,e),h(Po.$$.fragment,e),h(Fo.$$.fragment,e),h(Do.$$.fragment,e),h(So.$$.fragment,e),h(zo.$$.fragment,e),h(Oo.$$.fragment,e),h(qo.$$.fragment,e),h(Bo.$$.fragment,e),h(Io.$$.fragment,e),h(Ao.$$.fragment,e),h(Co.$$.fragment,e),h(Wo.$$.fragment,e),h(No.$$.fragment,e),h(Vo.$$.fragment,e),h(Mo.$$.fragment,e),h(Go.$$.fragment,e),h(jo.$$.fragment,e),h(Ho.$$.fragment,e),h(Ro.$$.fragment,e),h(Ko.$$.fragment,e),h(Uo.$$.fragment,e),h(Yo.$$.fragment,e),h(Xo.$$.fragment,e),h(Jo.$$.fragment,e),h(Qo.$$.fragment,e),h(Zo.$$.fragment,e),h(en.$$.fragment,e),h(rn.$$.fragment,e),h(tn.$$.fragment,e),h(on.$$.fragment,e),h(nn.$$.fragment,e),h(an.$$.fragment,e),h(cn.$$.fragment,e),h(dn.$$.fragment,e),h(pn.$$.fragment,e),h(mn.$$.fragment,e),h(jr.$$.fragment,e),h(fn.$$.fragment,e),h(gn.$$.fragment,e),h(un.$$.fragment,e),h(hn.$$.fragment,e),h(bn.$$.fragment,e),h(vn.$$.fragment,e),h($n.$$.fragment,e),h(yn.$$.fragment,e),h(kn.$$.fragment,e),h(wn.$$.fragment,e),h(Ln.$$.fragment,e),h(En.$$.fragment,e),h(Pn.$$.fragment,e),h(Fn.$$.fragment,e),h(Dn.$$.fragment,e),h(Sn.$$.fragment,e),h(zn.$$.fragment,e),h(In.$$.fragment,e),h(An.$$.fragment,e),h(Cn.$$.fragment,e),h(Wn.$$.fragment,e),h(Nn.$$.fragment,e),h(Vn.$$.fragment,e),h(Mn.$$.fragment,e),h(jn.$$.fragment,e),om=!0)},o(e){_(at.$$.fragment,e),_(it.$$.fragment,e),_(ct.$$.fragment,e),_(lt.$$.fragment,e),_(dt.$$.fragment,e),_(pt.$$.fragment,e),_(mt.$$.fragment,e),_(ft.$$.fragment,e),_(gt.$$.fragment,e),_(ut.$$.fragment,e),_(ht.$$.fragment,e),_(_t.$$.fragment,e),_(bt.$$.fragment,e),_(vt.$$.fragment,e),_($t.$$.fragment,e),_(Tt.$$.fragment,e),_(yt.$$.fragment,e),_(kt.$$.fragment,e),_(xt.$$.fragment,e),_(wt.$$.fragment,e),_(Lt.$$.fragment,e),_(Et.$$.fragment,e),_(Pt.$$.fragment,e),_(Ft.$$.fragment,e),_(Dt.$$.fragment,e),_(St.$$.fragment,e),_(zt.$$.fragment,e),_(Ot.$$.fragment,e),_(qt.$$.fragment,e),_(Bt.$$.fragment,e),_(It.$$.fragment,e),_(At.$$.fragment,e),_(Ct.$$.fragment,e),_(Wt.$$.fragment,e),_(Nt.$$.fragment,e),_(Vt.$$.fragment,e),_(Mt.$$.fragment,e),_(Gt.$$.fragment,e),_(Ht.$$.fragment,e),_(Rt.$$.fragment,e),_(Ut.$$.fragment,e),_(Yt.$$.fragment,e),_(Xt.$$.fragment,e),_(Jt.$$.fragment,e),_(Zt.$$.fragment,e),_(eo.$$.fragment,e),_(to.$$.fragment,e),_(oo.$$.fragment,e),_(no.$$.fragment,e),_(so.$$.fragment,e),_(ao.$$.fragment,e),_(io.$$.fragment,e),_(co.$$.fragment,e),_(lo.$$.fragment,e),_(po.$$.fragment,e),_(mo.$$.fragment,e),_(fo.$$.fragment,e),_(go.$$.fragment,e),_(uo.$$.fragment,e),_(ho.$$.fragment,e),_(_o.$$.fragment,e),_(bo.$$.fragment,e),_(vo.$$.fragment,e),_($o.$$.fragment,e),_(To.$$.fragment,e),_(yo.$$.fragment,e),_(ko.$$.fragment,e),_(xo.$$.fragment,e),_(wo.$$.fragment,e),_(Lo.$$.fragment,e),_(Po.$$.fragment,e),_(Fo.$$.fragment,e),_(Do.$$.fragment,e),_(So.$$.fragment,e),_(zo.$$.fragment,e),_(Oo.$$.fragment,e),_(qo.$$.fragment,e),_(Bo.$$.fragment,e),_(Io.$$.fragment,e),_(Ao.$$.fragment,e),_(Co.$$.fragment,e),_(Wo.$$.fragment,e),_(No.$$.fragment,e),_(Vo.$$.fragment,e),_(Mo.$$.fragment,e),_(Go.$$.fragment,e),_(jo.$$.fragment,e),_(Ho.$$.fragment,e),_(Ro.$$.fragment,e),_(Ko.$$.fragment,e),_(Uo.$$.fragment,e),_(Yo.$$.fragment,e),_(Xo.$$.fragment,e),_(Jo.$$.fragment,e),_(Qo.$$.fragment,e),_(Zo.$$.fragment,e),_(en.$$.fragment,e),_(rn.$$.fragment,e),_(tn.$$.fragment,e),_(on.$$.fragment,e),_(nn.$$.fragment,e),_(an.$$.fragment,e),_(cn.$$.fragment,e),_(dn.$$.fragment,e),_(pn.$$.fragment,e),_(mn.$$.fragment,e),_(jr.$$.fragment,e),_(fn.$$.fragment,e),_(gn.$$.fragment,e),_(un.$$.fragment,e),_(hn.$$.fragment,e),_(bn.$$.fragment,e),_(vn.$$.fragment,e),_($n.$$.fragment,e),_(yn.$$.fragment,e),_(kn.$$.fragment,e),_(wn.$$.fragment,e),_(Ln.$$.fragment,e),_(En.$$.fragment,e),_(Pn.$$.fragment,e),_(Fn.$$.fragment,e),_(Dn.$$.fragment,e),_(Sn.$$.fragment,e),_(zn.$$.fragment,e),_(In.$$.fragment,e),_(An.$$.fragment,e),_(Cn.$$.fragment,e),_(Wn.$$.fragment,e),_(Nn.$$.fragment,e),_(Vn.$$.fragment,e),_(Mn.$$.fragment,e),_(jn.$$.fragment,e),om=!1},d(e){t(w),e&&t(Re),e&&t(k),b(at),e&&t(xd),e&&t(y),e&&t(wd),e&&t(rs),e&&t(Ld),e&&t(Ke),b(it),e&&t(Ed),e&&t(V),e&&t(Pd),e&&t(ss),e&&t(Fd),b(ct,e),e&&t(Dd),e&&t(We),e&&t(Sd),e&&t(M),e&&t(zd),e&&t(x),e&&t(Od),e&&t(S),e&&t(qd),e&&t(z),e&&t(Bd),b(lt,e),e&&t(Id),e&&t(br),e&&t(Ad),e&&t(O),e&&t(Cd),e&&t(ps),e&&t(Wd),e&&t(Ue),b(dt),e&&t(Nd),e&&t(Ye),b(pt),e&&t(Vd),e&&t(Xe),b(mt),e&&t(Md),e&&t(R),b(ft),b(gt),e&&t(Gd),e&&t(Je),b(ut),e&&t(jd),e&&t(Qe),b(ht),e&&t(Hd),e&&t(Ze),b(_t),e&&t(Rd),e&&t(K),b(bt),b(vt),e&&t(Kd),e&&t(er),b($t),e&&t(Ud),e&&t(rr),b(Tt),e&&t(Yd),e&&t(tr),b(yt),e&&t(Xd),e&&t(or),b(kt),e&&t(Jd),e&&t(nr),b(xt),e&&t(Qd),e&&t(sr),b(wt),e&&t(Zd),e&&t(ar),b(Lt),e&&t(ep),e&&t(Lr),e&&t(rp),e&&t(U),b(Et),b(Pt),e&&t(tp),e&&t(Y),b(Ft),b(Dt),e&&t(op),e&&t(X),b(St),b(zt),e&&t(np),e&&t(J),b(Ot),b(qt),e&&t(sp),e&&t(Q),b(Bt),b(It),e&&t(ap),e&&t(Z),b(At),b(Ct),e&&t(ip),e&&t(ee),b(Wt),b(Nt),e&&t(cp),e&&t(re),b(Vt),b(Mt),e&&t(lp),e&&t(te),b(Gt),b(Ht),e&&t(dp),e&&t(oe),b(Rt),b(Ut),e&&t(pp),e&&t(ne),b(Yt),b(Xt),e&&t(mp),e&&t(se),b(Jt),b(Zt),e&&t(fp),e&&t(ae),b(eo),b(to),e&&t(gp),e&&t(ie),b(oo),b(no),e&&t(up),e&&t(ce),b(so),b(ao),e&&t(hp),e&&t(le),b(io),b(co),e&&t(_p),e&&t(de),b(lo),b(po),e&&t(bp),e&&t(pe),b(mo),b(fo),e&&t(vp),e&&t(me),b(go),b(uo),e&&t($p),e&&t(fe),b(ho),b(_o),e&&t(Tp),e&&t(ge),b(bo),b(vo),e&&t(yp),e&&t(ue),b($o),b(To),e&&t(kp),e&&t(he),b(yo),b(ko),e&&t(xp),e&&t(_e),b(xo),b(wo),e&&t(wp),e&&t(be),b(Lo),b(Po),e&&t(Lp),e&&t(ve),b(Fo),b(Do),e&&t(Ep),e&&t($e),b(So),b(zo),e&&t(Pp),e&&t(Te),b(Oo),b(qo),e&&t(Fp),e&&t(ye),b(Bo),b(Io),e&&t(Dp),e&&t(ke),b(Ao),b(Co),e&&t(Sp),e&&t(xe),b(Wo),b(No),e&&t(zp),e&&t(we),b(Vo),b(Mo),e&&t(Op),e&&t(Le),b(Go),b(jo),e&&t(qp),e&&t(Ee),b(Ho),b(Ro),e&&t(Bp),e&&t(Pe),b(Ko),b(Uo),e&&t(Ip),e&&t(Fe),b(Yo),b(Xo),e&&t(Ap),e&&t(De),b(Jo),b(Qo),e&&t(Cp),e&&t(ir),b(Zo),e&&t(Wp),e&&t(Vr),e&&t(Np),e&&t(Se),b(en),b(rn),e&&t(Vp),e&&t(cr),b(tn),b(on),e&&t(Mp),e&&t(ze),b(nn),b(an),e&&t(Gp),e&&t(Oe),b(cn),b(dn),e&&t(jp),e&&t(lr),b(pn),e&&t(Hp),e&&t(Gr),e&&t(Rp),e&&t($),b(mn),b(jr),b(fn),b(gn),b(un),b(hn),b(bn),b(vn),b($n),e&&t(Kp),e&&t(dr),b(yn),e&&t(Up),e&&t(pr),b(kn),e&&t(Yp),e&&t(C),b(wn),b(Ln),b(En),e&&t(Xp),e&&t(fr),b(Pn),e&&t(Jp),e&&t(W),b(Fn),b(Dn),b(Sn),e&&t(Qp),e&&t(P),b(zn),b(In),b(An),e&&t(Zp),e&&t(N),b(Cn),b(Wn),b(Nn),e&&t(em),e&&t(ur),b(Vn),e&&t(rm),e&&t(Be),b(Mn),e&&t(tm),e&&t(Ie),b(jn)}}}const O0={local:"utilities-for-generation",sections:[{local:"generate-outputs",sections:[{local:"transformers.generation_utils.GreedySearchDecoderOnlyOutput",title:"GreedySearchOutput"},{local:"transformers.generation_utils.SampleDecoderOnlyOutput",title:"SampleOutput"},{local:"transformers.generation_utils.BeamSearchDecoderOnlyOutput",title:"BeamSearchOutput"},{local:"transformers.generation_utils.BeamSampleDecoderOnlyOutput",title:"BeamSampleOutput"}],title:"Generate Outputs"},{local:"transformers.LogitsProcessor",title:"LogitsProcessor"},{local:"transformers.StoppingCriteria",title:"StoppingCriteria"},{local:"transformers.Constraint",title:"Constraints"},{local:"transformers.BeamScorer",title:"BeamSearch"},{local:"transformers.top_k_top_p_filtering",title:"Utilities"}],title:"Utilities for Generation"};function q0(kd){return P0(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class N0 extends x0{constructor(w){super();w0(this,w,q0,z0,L0,{})}}export{N0 as default,O0 as metadata};
37
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/internal/file_utils.mdx-hf-doc-builder.js
import{S as xt,i as kt,s as St,e as a,k as m,w as u,t as p,M as Dt,c as s,d as r,m as f,a as o,x as d,h as c,b as l,G as t,g as i,y as v,L as Tt,q as h,o as _,B as g,v as At}from"../../chunks/vendor-hf-doc-builder.js";import{D as b}from"../../chunks/Docstring-hf-doc-builder.js";import{I as Ae}from"../../chunks/IconCopyLink-hf-doc-builder.js";function Lt(Kr){let y,Le,E,I,pe,j,ir,ce,mr,Ie,z,fr,ue,pr,cr,ze,ie,ur,Me,w,M,de,C,dr,ve,vr,Ue,N,H,hr,he,_r,qe,P,F,gr,x,$r,_e,br,yr,U,Er,ge,wr,Nr,Pr,Ve,k,R,xr,S,kr,$e,Sr,Dr,q,Tr,be,Ar,Lr,Ir,Be,D,V,ye,J,zr,Ee,Mr,Oe,K,Q,Ge,W,X,je,Y,Z,Ce,ee,re,He,te,ae,Fe,T,B,we,se,Ur,Ne,qr,Re,$,oe,Vr,Pe,Br,Or,xe,Gr,jr,ke,Cr,Je,A,O,Se,ne,Hr,De,Fr,Ke,L,le,Rr,Te,Jr,Qe;return j=new Ae({}),C=new Ae({}),H=new b({props:{name:"class transformers.utils.ExplicitEnum",anchor:"transformers.utils.ExplicitEnum",parameters:[{name:"value",val:""},{name:"names",val:" = None"},{name:"module",val:" = None"},{name:"qualname",val:" = None"},{name:"type",val:" = None"},{name:"start",val:" = 1"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/generic.py#L244"}}),F=new b({props:{name:"class transformers.utils.PaddingStrategy",anchor:"transformers.utils.PaddingStrategy",parameters:[{name:"value",val:""},{name:"names",val:" = None"},{name:"module",val:" = None"},{name:"qualname",val:" = None"},{name:"type",val:" = None"},{name:"start",val:" = 1"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/generic.py#L256"}}),R=new b({props:{name:"class transformers.TensorType",anchor:"transformers.TensorType",parameters:[{name:"value",val:""},{name:"names",val:" = None"},{name:"module",val:" = None"},{name:"qualname",val:" = None"},{name:"type",val:" = None"},{name:"start",val:" = 1"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/generic.py#L267"}}),J=new Ae({}),Q=new b({props:{name:"transformers.add_start_docstrings",anchor:"transformers.add_start_docstrings",parameters:[{name:"*docstr",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/doc.py#L23"}}),X=new b({props:{name:"transformers.utils.add_start_docstrings_to_model_forward",anchor:"transformers.utils.add_start_docstrings_to_model_forward",parameters:[{name:"*docstr",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/doc.py#L31"}}),Z=new b({props:{name:"transformers.add_end_docstrings",anchor:"transformers.add_end_docstrings",parameters:[{name:"*docstr",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/doc.py#L53"}}),re=new b({props:{name:"transformers.utils.add_code_sample_docstrings",anchor:"transformers.utils.add_code_sample_docstrings",parameters:[{name:"*docstr",val:""},{name:"processor_class",val:" = None"},{name:"checkpoint",val:" = None"},{name:"output_type",val:" = None"},{name:"config_class",val:" = None"},{name:"mask",val:" = '[MASK]'"},{name:"qa_target_start_index",val:" = 14"},{name:"qa_target_end_index",val:" = 15"},{name:"model_cls",val:" = None"},{name:"modality",val:" = None"},{name:"expected_output",val:" = ''"},{name:"expected_loss",val:" = ''"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/doc.py#L1051"}}),ae=new b({props:{name:"transformers.utils.replace_return_docstrings",anchor:"transformers.utils.replace_return_docstrings",parameters:[{name:"output_type",val:" = None"},{name:"config_class",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/doc.py#L1130"}}),se=new Ae({}),oe=new b({props:{name:"class transformers.utils.cached_property",anchor:"transformers.utils.cached_property",parameters:[{name:"fget",val:" = None"},{name:"fset",val:" = None"},{name:"fdel",val:" = None"},{name:"doc",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/generic.py#L32"}}),ne=new Ae({}),le=new b({props:{name:"class transformers._LazyModule",anchor:"transformers._LazyModule",parameters:[{name:"name",val:""},{name:"module_file",val:""},{name:"import_structure",val:""},{name:"module_spec",val:" = None"},{name:"extra_objects",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/import_utils.py#L1014"}}),{c(){y=a("meta"),Le=m(),E=a("h1"),I=a("a"),pe=a("span"),u(j.$$.fragment),ir=m(),ce=a("span"),mr=p("General Utilities"),Ie=m(),z=a("p"),fr=p("This page lists all of Transformers general utility functions that are found in the file "),ue=a("code"),pr=p("utils.py"),cr=p("."),ze=m(),ie=a("p"),ur=p("Most of those are only useful if you are studying the general code in the library."),Me=m(),w=a("h2"),M=a("a"),de=a("span"),u(C.$$.fragment),dr=m(),ve=a("span"),vr=p("Enums and namedtuples"),Ue=m(),N=a("div"),u(H.$$.fragment),hr=m(),he=a("p"),_r=p("Enum with more explicit error message for missing values."),qe=m(),P=a("div"),u(F.$$.fragment),gr=m(),x=a("p"),$r=p("Possible values for the "),_e=a("code"),br=p("padding"),yr=p(" argument in "),U=a("a"),Er=p("PreTrainedTokenizerBase."),ge=a("strong"),wr=p("call"),Nr=p("()"),Pr=p(`. Useful for tab-completion in an IDE.`),Ve=m(),k=a("div"),u(R.$$.fragment),xr=m(),S=a("p"),kr=p("Possible values for the "),$e=a("code"),Sr=p("return_tensors"),Dr=p(" argument in "),q=a("a"),Tr=p("PreTrainedTokenizerBase."),be=a("strong"),Ar=p("call"),Lr=p("()"),Ir=p(`. Useful for tab-completion in an IDE.`),Be=m(),D=a("h2"),V=a("a"),ye=a("span"),u(J.$$.fragment),zr=m(),Ee=a("span"),Mr=p("Special Decorators"),Oe=m(),K=a("div"),u(Q.$$.fragment),Ge=m(),W=a("div"),u(X.$$.fragment),je=m(),Y=a("div"),u(Z.$$.fragment),Ce=m(),ee=a("div"),u(re.$$.fragment),He=m(),te=a("div"),u(ae.$$.fragment),Fe=m(),T=a("h2"),B=a("a"),we=a("span"),u(se.$$.fragment),Ur=m(),Ne=a("span"),qr=p("Special Properties"),Re=m(),$=a("div"),u(oe.$$.fragment),Vr=m(),Pe=a("p"),Br=p("Descriptor that mimics @property but caches output in member variable."),Or=m(),xe=a("p"),Gr=p("From tensorflow_datasets"),jr=m(),ke=a("p"),Cr=p("Built-in in functools from Python 3.8."),Je=m(),A=a("h2"),O=a("a"),Se=a("span"),u(ne.$$.fragment),Hr=m(),De=a("span"),Fr=p("Other Utilities"),Ke=m(),L=a("div"),u(le.$$.fragment),Rr=m(),Te=a("p"),Jr=p("Module class that surfaces all objects but only performs associated imports when the objects are requested."),this.h()},l(e){const n=Dt('[data-svelte="svelte-1phssyn"]',document.head);y=s(n,"META",{name:!0,content:!0}),n.forEach(r),Le=f(e),E=s(e,"H1",{class:!0});var We=o(E);I=s(We,"A",{id:!0,class:!0,href:!0});var Qr=o(I);pe=s(Qr,"SPAN",{});var Wr=o(pe);d(j.$$.fragment,Wr),Wr.forEach(r),Qr.forEach(r),ir=f(We),ce=s(We,"SPAN",{});var Xr=o(ce);mr=c(Xr,"General Utilities"),Xr.forEach(r),We.forEach(r),Ie=f(e),z=s(e,"P",{});var Xe=o(z);fr=c(Xe,"This page lists all of Transformers general utility functions that are found in the file "),ue=s(Xe,"CODE",{});var Yr=o(ue);pr=c(Yr,"utils.py"),Yr.forEach(r),cr=c(Xe,"."),Xe.forEach(r),ze=f(e),ie=s(e,"P",{});var Zr=o(ie);ur=c(Zr,"Most of those are only useful if you are studying the general code in the library."),Zr.forEach(r),Me=f(e),w=s(e,"H2",{class:!0});var Ye=o(w);M=s(Ye,"A",{id:!0,class:!0,href:!0});var et=o(M);de=s(et,"SPAN",{});var rt=o(de);d(C.$$.fragment,rt),rt.forEach(r),et.forEach(r),dr=f(Ye),ve=s(Ye,"SPAN",{});var tt=o(ve);vr=c(tt,"Enums and namedtuples"),tt.forEach(r),Ye.forEach(r),Ue=f(e),N=s(e,"DIV",{class:!0});var Ze=o(N);d(H.$$.fragment,Ze),hr=f(Ze),he=s(Ze,"P",{});var at=o(he);_r=c(at,"Enum with more explicit error message for missing values."),at.forEach(r),Ze.forEach(r),qe=f(e),P=s(e,"DIV",{class:!0});var er=o(P);d(F.$$.fragment,er),gr=f(er),x=s(er,"P",{});var me=o(x);$r=c(me,"Possible values for the "),_e=s(me,"CODE",{});var st=o(_e);br=c(st,"padding"),st.forEach(r),yr=c(me," argument in "),U=s(me,"A",{href:!0});var rr=o(U);Er=c(rr,"PreTrainedTokenizerBase."),ge=s(rr,"STRONG",{});var ot=o(ge);wr=c(ot,"call"),ot.forEach(r),Nr=c(rr,"()"),rr.forEach(r),Pr=c(me,`. Useful for tab-completion in an IDE.`),me.forEach(r),er.forEach(r),Ve=f(e),k=s(e,"DIV",{class:!0});var tr=o(k);d(R.$$.fragment,tr),xr=f(tr),S=s(tr,"P",{});var fe=o(S);kr=c(fe,"Possible values for the "),$e=s(fe,"CODE",{});var nt=o($e);Sr=c(nt,"return_tensors"),nt.forEach(r),Dr=c(fe," argument in "),q=s(fe,"A",{href:!0});var ar=o(q);Tr=c(ar,"PreTrainedTokenizerBase."),be=s(ar,"STRONG",{});var lt=o(be);Ar=c(lt,"call"),lt.forEach(r),Lr=c(ar,"()"),ar.forEach(r),Ir=c(fe,`. Useful for tab-completion in an IDE.`),fe.forEach(r),tr.forEach(r),Be=f(e),D=s(e,"H2",{class:!0});var sr=o(D);V=s(sr,"A",{id:!0,class:!0,href:!0});var it=o(V);ye=s(it,"SPAN",{});var mt=o(ye);d(J.$$.fragment,mt),mt.forEach(r),it.forEach(r),zr=f(sr),Ee=s(sr,"SPAN",{});var ft=o(Ee);Mr=c(ft,"Special Decorators"),ft.forEach(r),sr.forEach(r),Oe=f(e),K=s(e,"DIV",{class:!0});var pt=o(K);d(Q.$$.fragment,pt),pt.forEach(r),Ge=f(e),W=s(e,"DIV",{class:!0});var ct=o(W);d(X.$$.fragment,ct),ct.forEach(r),je=f(e),Y=s(e,"DIV",{class:!0});var ut=o(Y);d(Z.$$.fragment,ut),ut.forEach(r),Ce=f(e),ee=s(e,"DIV",{class:!0});var dt=o(ee);d(re.$$.fragment,dt),dt.forEach(r),He=f(e),te=s(e,"DIV",{class:!0});var vt=o(te);d(ae.$$.fragment,vt),vt.forEach(r),Fe=f(e),T=s(e,"H2",{class:!0});var or=o(T);B=s(or,"A",{id:!0,class:!0,href:!0});var ht=o(B);we=s(ht,"SPAN",{});var _t=o(we);d(se.$$.fragment,_t),_t.forEach(r),ht.forEach(r),Ur=f(or),Ne=s(or,"SPAN",{});var gt=o(Ne);qr=c(gt,"Special Properties"),gt.forEach(r),or.forEach(r),Re=f(e),$=s(e,"DIV",{class:!0});var G=o($);d(oe.$$.fragment,G),Vr=f(G),Pe=s(G,"P",{});var $t=o(Pe);Br=c($t,"Descriptor that mimics @property but caches output in member variable."),$t.forEach(r),Or=f(G),xe=s(G,"P",{});var bt=o(xe);Gr=c(bt,"From tensorflow_datasets"),bt.forEach(r),jr=f(G),ke=s(G,"P",{});var yt=o(ke);Cr=c(yt,"Built-in in functools from Python 3.8."),yt.forEach(r),G.forEach(r),Je=f(e),A=s(e,"H2",{class:!0});var nr=o(A);O=s(nr,"A",{id:!0,class:!0,href:!0});var Et=o(O);Se=s(Et,"SPAN",{});var wt=o(Se);d(ne.$$.fragment,wt),wt.forEach(r),Et.forEach(r),Hr=f(nr),De=s(nr,"SPAN",{});var Nt=o(De);Fr=c(Nt,"Other Utilities"),Nt.forEach(r),nr.forEach(r),Ke=f(e),L=s(e,"DIV",{class:!0});var lr=o(L);d(le.$$.fragment,lr),Rr=f(lr),Te=s(lr,"P",{});var Pt=o(Te);Jr=c(Pt,"Module class that surfaces all objects but only performs associated imports when the objects are requested."),Pt.forEach(r),lr.forEach(r),this.h()},h(){l(y,"name","hf:doc:metadata"),l(y,"content",JSON.stringify(It)),l(I,"id","general-utilities"),l(I,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(I,"href","#general-utilities"),l(E,"class","relative group"),l(M,"id","transformers.utils.ExplicitEnum"),l(M,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(M,"href","#transformers.utils.ExplicitEnum"),l(w,"class","relative group"),l(N,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(U,"href","/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__"),l(P,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(q,"href","/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__"),l(k,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(V,"id","transformers.add_start_docstrings"),l(V,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(V,"href","#transformers.add_start_docstrings"),l(D,"class","relative group"),l(K,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(W,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(Y,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(ee,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(te,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(B,"id","transformers.utils.cached_property"),l(B,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(B,"href","#transformers.utils.cached_property"),l(T,"class","relative group"),l($,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(O,"id","transformers._LazyModule"),l(O,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(O,"href","#transformers._LazyModule"),l(A,"class","relative group"),l(L,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,n){t(document.head,y),i(e,Le,n),i(e,E,n),t(E,I),t(I,pe),v(j,pe,null),t(E,ir),t(E,ce),t(ce,mr),i(e,Ie,n),i(e,z,n),t(z,fr),t(z,ue),t(ue,pr),t(z,cr),i(e,ze,n),i(e,ie,n),t(ie,ur),i(e,Me,n),i(e,w,n),t(w,M),t(M,de),v(C,de,null),t(w,dr),t(w,ve),t(ve,vr),i(e,Ue,n),i(e,N,n),v(H,N,null),t(N,hr),t(N,he),t(he,_r),i(e,qe,n),i(e,P,n),v(F,P,null),t(P,gr),t(P,x),t(x,$r),t(x,_e),t(_e,br),t(x,yr),t(x,U),t(U,Er),t(U,ge),t(ge,wr),t(U,Nr),t(x,Pr),i(e,Ve,n),i(e,k,n),v(R,k,null),t(k,xr),t(k,S),t(S,kr),t(S,$e),t($e,Sr),t(S,Dr),t(S,q),t(q,Tr),t(q,be),t(be,Ar),t(q,Lr),t(S,Ir),i(e,Be,n),i(e,D,n),t(D,V),t(V,ye),v(J,ye,null),t(D,zr),t(D,Ee),t(Ee,Mr),i(e,Oe,n),i(e,K,n),v(Q,K,null),i(e,Ge,n),i(e,W,n),v(X,W,null),i(e,je,n),i(e,Y,n),v(Z,Y,null),i(e,Ce,n),i(e,ee,n),v(re,ee,null),i(e,He,n),i(e,te,n),v(ae,te,null),i(e,Fe,n),i(e,T,n),t(T,B),t(B,we),v(se,we,null),t(T,Ur),t(T,Ne),t(Ne,qr),i(e,Re,n),i(e,$,n),v(oe,$,null),t($,Vr),t($,Pe),t(Pe,Br),t($,Or),t($,xe),t(xe,Gr),t($,jr),t($,ke),t(ke,Cr),i(e,Je,n),i(e,A,n),t(A,O),t(O,Se),v(ne,Se,null),t(A,Hr),t(A,De),t(De,Fr),i(e,Ke,n),i(e,L,n),v(le,L,null),t(L,Rr),t(L,Te),t(Te,Jr),Qe=!0},p:Tt,i(e){Qe||(h(j.$$.fragment,e),h(C.$$.fragment,e),h(H.$$.fragment,e),h(F.$$.fragment,e),h(R.$$.fragment,e),h(J.$$.fragment,e),h(Q.$$.fragment,e),h(X.$$.fragment,e),h(Z.$$.fragment,e),h(re.$$.fragment,e),h(ae.$$.fragment,e),h(se.$$.fragment,e),h(oe.$$.fragment,e),h(ne.$$.fragment,e),h(le.$$.fragment,e),Qe=!0)},o(e){_(j.$$.fragment,e),_(C.$$.fragment,e),_(H.$$.fragment,e),_(F.$$.fragment,e),_(R.$$.fragment,e),_(J.$$.fragment,e),_(Q.$$.fragment,e),_(X.$$.fragment,e),_(Z.$$.fragment,e),_(re.$$.fragment,e),_(ae.$$.fragment,e),_(se.$$.fragment,e),_(oe.$$.fragment,e),_(ne.$$.fragment,e),_(le.$$.fragment,e),Qe=!1},d(e){r(y),e&&r(Le),e&&r(E),g(j),e&&r(Ie),e&&r(z),e&&r(ze),e&&r(ie),e&&r(Me),e&&r(w),g(C),e&&r(Ue),e&&r(N),g(H),e&&r(qe),e&&r(P),g(F),e&&r(Ve),e&&r(k),g(R),e&&r(Be),e&&r(D),g(J),e&&r(Oe),e&&r(K),g(Q),e&&r(Ge),e&&r(W),g(X),e&&r(je),e&&r(Y),g(Z),e&&r(Ce),e&&r(ee),g(re),e&&r(He),e&&r(te),g(ae),e&&r(Fe),e&&r(T),g(se),e&&r(Re),e&&r($),g(oe),e&&r(Je),e&&r(A),g(ne),e&&r(Ke),e&&r(L),g(le)}}}const It={local:"general-utilities",sections:[{local:"transformers.utils.ExplicitEnum",title:"Enums and namedtuples"},{local:"transformers.add_start_docstrings",title:"Special Decorators"},{local:"transformers.utils.cached_property",title:"Special Properties"},{local:"transformers._LazyModule",title:"Other Utilities"}],title:"General Utilities"};function zt(Kr){return At(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Vt extends xt{constructor(y){super();kt(this,y,zt,Lt,St,{})}}export{Vt as default,It as metadata};
38
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/internal/pipelines_utils.mdx-hf-doc-builder.js
import{S as Ra,i as Ba,s as Ma,e as a,k as l,w as h,t as s,M as za,c as o,d as r,m as p,a as n,x as u,h as i,b as m,G as e,g as c,y as g,L as Ga,q as v,o as _,B as b,v as Za}from"../../chunks/vendor-hf-doc-builder.js";import{D as P}from"../../chunks/Docstring-hf-doc-builder.js";import{I as Ht}from"../../chunks/IconCopyLink-hf-doc-builder.js";function Ka(ia){let x,ot,F,H,Le,G,Nt,Oe,Wt,nt,_e,Jt,st,be,qt,it,S,N,Ce,Z,jt,Ie,Ut,lt,A,K,Qt,X,Rt,$e,Bt,Mt,pt,k,Y,zt,Te,Gt,mt,w,ee,Zt,te,Kt,Ve,Xt,Yt,er,re,tr,He,rr,ar,dt,L,W,Ne,ae,or,We,nr,ct,f,oe,sr,Je,ir,lr,O,qe,pr,mr,je,dr,cr,Ue,fr,hr,J,Qe,ur,gr,Re,vr,_r,br,q,ne,$r,C,Pr,Pe,yr,wr,Be,Dr,Er,xr,j,se,Fr,ie,Sr,ye,Ar,kr,Lr,U,le,Or,Me,Cr,ft,D,pe,Ir,ze,Tr,Vr,Q,me,Hr,de,Nr,we,Wr,Jr,ht,E,ce,qr,Ge,jr,Ur,R,fe,Qr,Ze,Rr,ut,y,he,Br,Ke,Mr,zr,Xe,Gr,Zr,B,ue,Kr,Ye,Xr,gt,I,M,et,ge,Yr,tt,ea,vt,T,ve,ta,V,ra,De,aa,oa,rt,na,sa,_t;return G=new Ht({}),Z=new Ht({}),K=new P({props:{name:"class transformers.pipelines.ArgumentHandler",anchor:"transformers.pipelines.ArgumentHandler",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L406"}}),Y=new P({props:{name:"class transformers.pipelines.ZeroShotClassificationArgumentHandler",anchor:"transformers.pipelines.ZeroShotClassificationArgumentHandler",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/zero_shot_classification.py#L13"}}),ee=new P({props:{name:"class transformers.pipelines.QuestionAnsweringArgumentHandler",anchor:"transformers.pipelines.QuestionAnsweringArgumentHandler",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/question_answering.py#L149"}}),ae=new Ht({}),oe=new P({props:{name:"class transformers.PipelineDataFormat",anchor:"transformers.PipelineDataFormat",parameters:[{name:"output_path",val:": typing.Optional[str]"},{name:"input_path",val:": typing.Optional[str]"},{name:"column",val:": typing.Optional[str]"},{name:"overwrite",val:": bool = False"}],parametersDescription:[{anchor:"transformers.PipelineDataFormat.output_path",description:"<strong>output_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to save the outgoing data.",name:"output_path"},{anchor:"transformers.PipelineDataFormat.input_path",description:"<strong>input_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to look for the input data.",name:"input_path"},{anchor:"transformers.PipelineDataFormat.column",description:"<strong>column</strong> (<code>str</code>, <em>optional</em>) &#x2014; The column to read.",name:"column"},{anchor:"transformers.PipelineDataFormat.overwrite",description:`<strong>overwrite</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to overwrite the <code>output_path</code>.`,name:"overwrite"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L416"}}),ne=new P({props:{name:"from_str",anchor:"transformers.PipelineDataFormat.from_str",parameters:[{name:"format",val:": str"},{name:"output_path",val:": typing.Optional[str]"},{name:"input_path",val:": typing.Optional[str]"},{name:"column",val:": typing.Optional[str]"},{name:"overwrite",val:" = False"}],parametersDescription:[{anchor:"transformers.PipelineDataFormat.from_str.output_path",description:`<strong>output_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to save the outgoing data.`,name:"output_path"},{anchor:"transformers.PipelineDataFormat.from_str.input_path",description:`<strong>input_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to look for the input data.`,name:"input_path"},{anchor:"transformers.PipelineDataFormat.from_str.column",description:`<strong>column</strong> (<code>str</code>, <em>optional</em>) &#x2014; The column to read.`,name:"column"},{anchor:"transformers.PipelineDataFormat.from_str.overwrite",description:`<strong>overwrite</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to overwrite the <code>output_path</code>.`,name:"overwrite"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L493",returnDescription:` <p>The proper data format.</p> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.PipelineDataFormat" >PipelineDataFormat</a></p> `}}),se=new P({props:{name:"save",anchor:"transformers.PipelineDataFormat.save",parameters:[{name:"data",val:": typing.Union[dict, typing.List[dict]]"}],parametersDescription:[{anchor:"transformers.PipelineDataFormat.save.data",description:"<strong>data</strong> (<code>dict</code> or list of <code>dict</code>) &#x2014; The data to store.",name:"data"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L465"}}),le=new P({props:{name:"save_binary",anchor:"transformers.PipelineDataFormat.save_binary",parameters:[{name:"data",val:": typing.Union[dict, typing.List[dict]]"}],parametersDescription:[{anchor:"transformers.PipelineDataFormat.save_binary.data",description:"<strong>data</strong> (<code>dict</code> or list of <code>dict</code>) &#x2014; The data to store.",name:"data"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L475",returnDescription:` <p>Path where the data has been saved.</p> `,returnType:` <p><code>str</code></p> `}}),pe=new P({props:{name:"class transformers.CsvPipelineDataFormat",anchor:"transformers.CsvPipelineDataFormat",parameters:[{name:"output_path",val:": typing.Optional[str]"},{name:"input_path",val:": typing.Optional[str]"},{name:"column",val:": typing.Optional[str]"},{name:"overwrite",val:" = False"}],parametersDescription:[{anchor:"transformers.CsvPipelineDataFormat.output_path",description:"<strong>output_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to save the outgoing data.",name:"output_path"},{anchor:"transformers.CsvPipelineDataFormat.input_path",description:"<strong>input_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to look for the input data.",name:"input_path"},{anchor:"transformers.CsvPipelineDataFormat.column",description:"<strong>column</strong> (<code>str</code>, <em>optional</em>) &#x2014; The column to read.",name:"column"},{anchor:"transformers.CsvPipelineDataFormat.overwrite",description:`<strong>overwrite</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to overwrite the <code>output_path</code>.`,name:"overwrite"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L529"}}),me=new P({props:{name:"save",anchor:"transformers.CsvPipelineDataFormat.save",parameters:[{name:"data",val:": typing.List[dict]"}],parametersDescription:[{anchor:"transformers.CsvPipelineDataFormat.save.data",description:"<strong>data</strong> (<code>List[dict]</code>) &#x2014; The data to store.",name:"data"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L559"}}),ce=new P({props:{name:"class transformers.JsonPipelineDataFormat",anchor:"transformers.JsonPipelineDataFormat",parameters:[{name:"output_path",val:": typing.Optional[str]"},{name:"input_path",val:": typing.Optional[str]"},{name:"column",val:": typing.Optional[str]"},{name:"overwrite",val:" = False"}],parametersDescription:[{anchor:"transformers.JsonPipelineDataFormat.output_path",description:"<strong>output_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to save the outgoing data.",name:"output_path"},{anchor:"transformers.JsonPipelineDataFormat.input_path",description:"<strong>input_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to look for the input data.",name:"input_path"},{anchor:"transformers.JsonPipelineDataFormat.column",description:"<strong>column</strong> (<code>str</code>, <em>optional</em>) &#x2014; The column to read.",name:"column"},{anchor:"transformers.JsonPipelineDataFormat.overwrite",description:`<strong>overwrite</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to overwrite the <code>output_path</code>.`,name:"overwrite"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L573"}}),fe=new P({props:{name:"save",anchor:"transformers.JsonPipelineDataFormat.save",parameters:[{name:"data",val:": dict"}],parametersDescription:[{anchor:"transformers.JsonPipelineDataFormat.save.data",description:"<strong>data</strong> (<code>dict</code>) &#x2014; The data to store.",name:"data"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L604"}}),he=new P({props:{name:"class transformers.PipedPipelineDataFormat",anchor:"transformers.PipedPipelineDataFormat",parameters:[{name:"output_path",val:": typing.Optional[str]"},{name:"input_path",val:": typing.Optional[str]"},{name:"column",val:": typing.Optional[str]"},{name:"overwrite",val:": bool = False"}],parametersDescription:[{anchor:"transformers.PipedPipelineDataFormat.output_path",description:"<strong>output_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to save the outgoing data.",name:"output_path"},{anchor:"transformers.PipedPipelineDataFormat.input_path",description:"<strong>input_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to look for the input data.",name:"input_path"},{anchor:"transformers.PipedPipelineDataFormat.column",description:"<strong>column</strong> (<code>str</code>, <em>optional</em>) &#x2014; The column to read.",name:"column"},{anchor:"transformers.PipedPipelineDataFormat.overwrite",description:`<strong>overwrite</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to overwrite the <code>output_path</code>.`,name:"overwrite"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L615"}}),ue=new P({props:{name:"save",anchor:"transformers.PipedPipelineDataFormat.save",parameters:[{name:"data",val:": dict"}],parametersDescription:[{anchor:"transformers.PipedPipelineDataFormat.save.data",description:"<strong>data</strong> (<code>dict</code>) &#x2014; The data to store.",name:"data"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L644"}}),ge=new Ht({}),ve=new P({props:{name:"class transformers.pipelines.PipelineException",anchor:"transformers.pipelines.PipelineException",parameters:[{name:"task",val:": str"},{name:"model",val:": str"},{name:"reason",val:": str"}],parametersDescription:[{anchor:"transformers.pipelines.PipelineException.task",description:"<strong>task</strong> (<code>str</code>) &#x2014; The task of the pipeline.",name:"task"},{anchor:"transformers.pipelines.PipelineException.model",description:"<strong>model</strong> (<code>str</code>) &#x2014; The model used by the pipeline.",name:"model"},{anchor:"transformers.pipelines.PipelineException.reason",description:"<strong>reason</strong> (<code>str</code>) &#x2014; The error message to display.",name:"reason"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L389"}}),{c(){x=a("meta"),ot=l(),F=a("h1"),H=a("a"),Le=a("span"),h(G.$$.fragment),Nt=l(),Oe=a("span"),Wt=s("Utilities for pipelines"),nt=l(),_e=a("p"),Jt=s("This page lists all the utility functions the library provides for pipelines."),st=l(),be=a("p"),qt=s("Most of those are only useful if you are studying the code of the models in the library."),it=l(),S=a("h2"),N=a("a"),Ce=a("span"),h(Z.$$.fragment),jt=l(),Ie=a("span"),Ut=s("Argument handling"),lt=l(),A=a("div"),h(K.$$.fragment),Qt=l(),X=a("p"),Rt=s("Base interface for handling arguments for each "),$e=a("a"),Bt=s("Pipeline"),Mt=s("."),pt=l(),k=a("div"),h(Y.$$.fragment),zt=l(),Te=a("p"),Gt=s(`Handles arguments for zero-shot for text classification by turning each possible label into an NLI premise/hypothesis pair.`),mt=l(),w=a("div"),h(ee.$$.fragment),Zt=l(),te=a("p"),Kt=s(`QuestionAnsweringPipeline requires the user to provide multiple arguments (i.e. question & context) to be mapped to internal `),Ve=a("code"),Xt=s("SquadExample"),Yt=s("."),er=l(),re=a("p"),tr=s("QuestionAnsweringArgumentHandler manages all the possible to create a "),He=a("code"),rr=s("SquadExample"),ar=s(` from the command-line supplied arguments.`),dt=l(),L=a("h2"),W=a("a"),Ne=a("span"),h(ae.$$.fragment),or=l(),We=a("span"),nr=s("Data format"),ct=l(),f=a("div"),h(oe.$$.fragment),sr=l(),Je=a("p"),ir=s(`Base class for all the pipeline supported data format both for reading and writing. Supported data formats currently includes:`),lr=l(),O=a("ul"),qe=a("li"),pr=s("JSON"),mr=l(),je=a("li"),dr=s("CSV"),cr=l(),Ue=a("li"),fr=s("stdin/stdout (pipe)"),hr=l(),J=a("p"),Qe=a("code"),ur=s("PipelineDataFormat"),gr=s(` also includes some utilities to work with multi-columns like mapping from datasets columns to pipelines keyword arguments through the `),Re=a("code"),vr=s("dataset_kwarg_1=dataset_column_1"),_r=s(" format."),br=l(),q=a("div"),h(ne.$$.fragment),$r=l(),C=a("p"),Pr=s("Creates an instance of the right subclass of "),Pe=a("a"),yr=s("PipelineDataFormat"),wr=s(" depending on "),Be=a("code"),Dr=s("format"),Er=s("."),xr=l(),j=a("div"),h(se.$$.fragment),Fr=l(),ie=a("p"),Sr=s("Save the provided data object with the representation for the current "),ye=a("a"),Ar=s("PipelineDataFormat"),kr=s("."),Lr=l(),U=a("div"),h(le.$$.fragment),Or=l(),Me=a("p"),Cr=s("Save the provided data object as a pickle-formatted binary data on the disk."),ft=l(),D=a("div"),h(pe.$$.fragment),Ir=l(),ze=a("p"),Tr=s("Support for pipelines using CSV data format."),Vr=l(),Q=a("div"),h(me.$$.fragment),Hr=l(),de=a("p"),Nr=s("Save the provided data object with the representation for the current "),we=a("a"),Wr=s("PipelineDataFormat"),Jr=s("."),ht=l(),E=a("div"),h(ce.$$.fragment),qr=l(),Ge=a("p"),jr=s("Support for pipelines using JSON file format."),Ur=l(),R=a("div"),h(fe.$$.fragment),Qr=l(),Ze=a("p"),Rr=s("Save the provided data object in a json file."),ut=l(),y=a("div"),h(he.$$.fragment),Br=l(),Ke=a("p"),Mr=s("Read data from piped input to the python process. For multi columns data, columns should separated by"),zr=l(),Xe=a("p"),Gr=s("If columns are provided, then the output will be a dictionary with {column_x: value_x}"),Zr=l(),B=a("div"),h(ue.$$.fragment),Kr=l(),Ye=a("p"),Xr=s("Print the data."),gt=l(),I=a("h2"),M=a("a"),et=a("span"),h(ge.$$.fragment),Yr=l(),tt=a("span"),ea=s("Utilities"),vt=l(),T=a("div"),h(ve.$$.fragment),ta=l(),V=a("p"),ra=s("Raised by a "),De=a("a"),aa=s("Pipeline"),oa=s(" when handling "),rt=a("strong"),na=s("call"),sa=s("."),this.h()},l(t){const d=za('[data-svelte="svelte-1phssyn"]',document.head);x=o(d,"META",{name:!0,content:!0}),d.forEach(r),ot=p(t),F=o(t,"H1",{class:!0});var bt=n(F);H=o(bt,"A",{id:!0,class:!0,href:!0});var la=n(H);Le=o(la,"SPAN",{});var pa=n(Le);u(G.$$.fragment,pa),pa.forEach(r),la.forEach(r),Nt=p(bt),Oe=o(bt,"SPAN",{});var ma=n(Oe);Wt=i(ma,"Utilities for pipelines"),ma.forEach(r),bt.forEach(r),nt=p(t),_e=o(t,"P",{});var da=n(_e);Jt=i(da,"This page lists all the utility functions the library provides for pipelines."),da.forEach(r),st=p(t),be=o(t,"P",{});var ca=n(be);qt=i(ca,"Most of those are only useful if you are studying the code of the models in the library."),ca.forEach(r),it=p(t),S=o(t,"H2",{class:!0});var $t=n(S);N=o($t,"A",{id:!0,class:!0,href:!0});var fa=n(N);Ce=o(fa,"SPAN",{});var ha=n(Ce);u(Z.$$.fragment,ha),ha.forEach(r),fa.forEach(r),jt=p($t),Ie=o($t,"SPAN",{});var ua=n(Ie);Ut=i(ua,"Argument handling"),ua.forEach(r),$t.forEach(r),lt=p(t),A=o(t,"DIV",{class:!0});var Pt=n(A);u(K.$$.fragment,Pt),Qt=p(Pt),X=o(Pt,"P",{});var yt=n(X);Rt=i(yt,"Base interface for handling arguments for each "),$e=o(yt,"A",{href:!0});var ga=n($e);Bt=i(ga,"Pipeline"),ga.forEach(r),Mt=i(yt,"."),yt.forEach(r),Pt.forEach(r),pt=p(t),k=o(t,"DIV",{class:!0});var wt=n(k);u(Y.$$.fragment,wt),zt=p(wt),Te=o(wt,"P",{});var va=n(Te);Gt=i(va,`Handles arguments for zero-shot for text classification by turning each possible label into an NLI premise/hypothesis pair.`),va.forEach(r),wt.forEach(r),mt=p(t),w=o(t,"DIV",{class:!0});var Ee=n(w);u(ee.$$.fragment,Ee),Zt=p(Ee),te=o(Ee,"P",{});var Dt=n(te);Kt=i(Dt,`QuestionAnsweringPipeline requires the user to provide multiple arguments (i.e. question & context) to be mapped to internal `),Ve=o(Dt,"CODE",{});var _a=n(Ve);Xt=i(_a,"SquadExample"),_a.forEach(r),Yt=i(Dt,"."),Dt.forEach(r),er=p(Ee),re=o(Ee,"P",{});var Et=n(re);tr=i(Et,"QuestionAnsweringArgumentHandler manages all the possible to create a "),He=o(Et,"CODE",{});var ba=n(He);rr=i(ba,"SquadExample"),ba.forEach(r),ar=i(Et,` from the command-line supplied arguments.`),Et.forEach(r),Ee.forEach(r),dt=p(t),L=o(t,"H2",{class:!0});var xt=n(L);W=o(xt,"A",{id:!0,class:!0,href:!0});var $a=n(W);Ne=o($a,"SPAN",{});var Pa=n(Ne);u(ae.$$.fragment,Pa),Pa.forEach(r),$a.forEach(r),or=p(xt),We=o(xt,"SPAN",{});var ya=n(We);nr=i(ya,"Data format"),ya.forEach(r),xt.forEach(r),ct=p(t),f=o(t,"DIV",{class:!0});var $=n(f);u(oe.$$.fragment,$),sr=p($),Je=o($,"P",{});var wa=n(Je);ir=i(wa,`Base class for all the pipeline supported data format both for reading and writing. Supported data formats currently includes:`),wa.forEach(r),lr=p($),O=o($,"UL",{});var xe=n(O);qe=o(xe,"LI",{});var Da=n(qe);pr=i(Da,"JSON"),Da.forEach(r),mr=p(xe),je=o(xe,"LI",{});var Ea=n(je);dr=i(Ea,"CSV"),Ea.forEach(r),cr=p(xe),Ue=o(xe,"LI",{});var xa=n(Ue);fr=i(xa,"stdin/stdout (pipe)"),xa.forEach(r),xe.forEach(r),hr=p($),J=o($,"P",{});var at=n(J);Qe=o(at,"CODE",{});var Fa=n(Qe);ur=i(Fa,"PipelineDataFormat"),Fa.forEach(r),gr=i(at,` also includes some utilities to work with multi-columns like mapping from datasets columns to pipelines keyword arguments through the `),Re=o(at,"CODE",{});var Sa=n(Re);vr=i(Sa,"dataset_kwarg_1=dataset_column_1"),Sa.forEach(r),_r=i(at," format."),at.forEach(r),br=p($),q=o($,"DIV",{class:!0});var Ft=n(q);u(ne.$$.fragment,Ft),$r=p(Ft),C=o(Ft,"P",{});var Fe=n(C);Pr=i(Fe,"Creates an instance of the right subclass of "),Pe=o(Fe,"A",{href:!0});var Aa=n(Pe);yr=i(Aa,"PipelineDataFormat"),Aa.forEach(r),wr=i(Fe," depending on "),Be=o(Fe,"CODE",{});var ka=n(Be);Dr=i(ka,"format"),ka.forEach(r),Er=i(Fe,"."),Fe.forEach(r),Ft.forEach(r),xr=p($),j=o($,"DIV",{class:!0});var St=n(j);u(se.$$.fragment,St),Fr=p(St),ie=o(St,"P",{});var At=n(ie);Sr=i(At,"Save the provided data object with the representation for the current "),ye=o(At,"A",{href:!0});var La=n(ye);Ar=i(La,"PipelineDataFormat"),La.forEach(r),kr=i(At,"."),At.forEach(r),St.forEach(r),Lr=p($),U=o($,"DIV",{class:!0});var kt=n(U);u(le.$$.fragment,kt),Or=p(kt),Me=o(kt,"P",{});var Oa=n(Me);Cr=i(Oa,"Save the provided data object as a pickle-formatted binary data on the disk."),Oa.forEach(r),kt.forEach(r),$.forEach(r),ft=p(t),D=o(t,"DIV",{class:!0});var Se=n(D);u(pe.$$.fragment,Se),Ir=p(Se),ze=o(Se,"P",{});var Ca=n(ze);Tr=i(Ca,"Support for pipelines using CSV data format."),Ca.forEach(r),Vr=p(Se),Q=o(Se,"DIV",{class:!0});var Lt=n(Q);u(me.$$.fragment,Lt),Hr=p(Lt),de=o(Lt,"P",{});var Ot=n(de);Nr=i(Ot,"Save the provided data object with the representation for the current "),we=o(Ot,"A",{href:!0});var Ia=n(we);Wr=i(Ia,"PipelineDataFormat"),Ia.forEach(r),Jr=i(Ot,"."),Ot.forEach(r),Lt.forEach(r),Se.forEach(r),ht=p(t),E=o(t,"DIV",{class:!0});var Ae=n(E);u(ce.$$.fragment,Ae),qr=p(Ae),Ge=o(Ae,"P",{});var Ta=n(Ge);jr=i(Ta,"Support for pipelines using JSON file format."),Ta.forEach(r),Ur=p(Ae),R=o(Ae,"DIV",{class:!0});var Ct=n(R);u(fe.$$.fragment,Ct),Qr=p(Ct),Ze=o(Ct,"P",{});var Va=n(Ze);Rr=i(Va,"Save the provided data object in a json file."),Va.forEach(r),Ct.forEach(r),Ae.forEach(r),ut=p(t),y=o(t,"DIV",{class:!0});var z=n(y);u(he.$$.fragment,z),Br=p(z),Ke=o(z,"P",{});var Ha=n(Ke);Mr=i(Ha,"Read data from piped input to the python process. For multi columns data, columns should separated by"),Ha.forEach(r),zr=p(z),Xe=o(z,"P",{});var Na=n(Xe);Gr=i(Na,"If columns are provided, then the output will be a dictionary with {column_x: value_x}"),Na.forEach(r),Zr=p(z),B=o(z,"DIV",{class:!0});var It=n(B);u(ue.$$.fragment,It),Kr=p(It),Ye=o(It,"P",{});var Wa=n(Ye);Xr=i(Wa,"Print the data."),Wa.forEach(r),It.forEach(r),z.forEach(r),gt=p(t),I=o(t,"H2",{class:!0});var Tt=n(I);M=o(Tt,"A",{id:!0,class:!0,href:!0});var Ja=n(M);et=o(Ja,"SPAN",{});var qa=n(et);u(ge.$$.fragment,qa),qa.forEach(r),Ja.forEach(r),Yr=p(Tt),tt=o(Tt,"SPAN",{});var ja=n(tt);ea=i(ja,"Utilities"),ja.forEach(r),Tt.forEach(r),vt=p(t),T=o(t,"DIV",{class:!0});var Vt=n(T);u(ve.$$.fragment,Vt),ta=p(Vt),V=o(Vt,"P",{});var ke=n(V);ra=i(ke,"Raised by a "),De=o(ke,"A",{href:!0});var Ua=n(De);aa=i(Ua,"Pipeline"),Ua.forEach(r),oa=i(ke," when handling "),rt=o(ke,"STRONG",{});var Qa=n(rt);na=i(Qa,"call"),Qa.forEach(r),sa=i(ke,"."),ke.forEach(r),Vt.forEach(r),this.h()},h(){m(x,"name","hf:doc:metadata"),m(x,"content",JSON.stringify(Xa)),m(H,"id","utilities-for-pipelines"),m(H,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(H,"href","#utilities-for-pipelines"),m(F,"class","relative group"),m(N,"id","transformers.pipelines.ArgumentHandler"),m(N,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(N,"href","#transformers.pipelines.ArgumentHandler"),m(S,"class","relative group"),m($e,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.Pipeline"),m(A,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(k,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(w,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(W,"id","transformers.PipelineDataFormat"),m(W,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(W,"href","#transformers.PipelineDataFormat"),m(L,"class","relative group"),m(Pe,"href","/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.PipelineDataFormat"),m(q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(ye,"href","/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.PipelineDataFormat"),m(j,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(U,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(f,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(we,"href","/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.PipelineDataFormat"),m(Q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(D,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(R,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(E,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(B,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(y,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(M,"id","transformers.pipelines.PipelineException"),m(M,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(M,"href","#transformers.pipelines.PipelineException"),m(I,"class","relative group"),m(De,"href","/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.Pipeline"),m(T,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(t,d){e(document.head,x),c(t,ot,d),c(t,F,d),e(F,H),e(H,Le),g(G,Le,null),e(F,Nt),e(F,Oe),e(Oe,Wt),c(t,nt,d),c(t,_e,d),e(_e,Jt),c(t,st,d),c(t,be,d),e(be,qt),c(t,it,d),c(t,S,d),e(S,N),e(N,Ce),g(Z,Ce,null),e(S,jt),e(S,Ie),e(Ie,Ut),c(t,lt,d),c(t,A,d),g(K,A,null),e(A,Qt),e(A,X),e(X,Rt),e(X,$e),e($e,Bt),e(X,Mt),c(t,pt,d),c(t,k,d),g(Y,k,null),e(k,zt),e(k,Te),e(Te,Gt),c(t,mt,d),c(t,w,d),g(ee,w,null),e(w,Zt),e(w,te),e(te,Kt),e(te,Ve),e(Ve,Xt),e(te,Yt),e(w,er),e(w,re),e(re,tr),e(re,He),e(He,rr),e(re,ar),c(t,dt,d),c(t,L,d),e(L,W),e(W,Ne),g(ae,Ne,null),e(L,or),e(L,We),e(We,nr),c(t,ct,d),c(t,f,d),g(oe,f,null),e(f,sr),e(f,Je),e(Je,ir),e(f,lr),e(f,O),e(O,qe),e(qe,pr),e(O,mr),e(O,je),e(je,dr),e(O,cr),e(O,Ue),e(Ue,fr),e(f,hr),e(f,J),e(J,Qe),e(Qe,ur),e(J,gr),e(J,Re),e(Re,vr),e(J,_r),e(f,br),e(f,q),g(ne,q,null),e(q,$r),e(q,C),e(C,Pr),e(C,Pe),e(Pe,yr),e(C,wr),e(C,Be),e(Be,Dr),e(C,Er),e(f,xr),e(f,j),g(se,j,null),e(j,Fr),e(j,ie),e(ie,Sr),e(ie,ye),e(ye,Ar),e(ie,kr),e(f,Lr),e(f,U),g(le,U,null),e(U,Or),e(U,Me),e(Me,Cr),c(t,ft,d),c(t,D,d),g(pe,D,null),e(D,Ir),e(D,ze),e(ze,Tr),e(D,Vr),e(D,Q),g(me,Q,null),e(Q,Hr),e(Q,de),e(de,Nr),e(de,we),e(we,Wr),e(de,Jr),c(t,ht,d),c(t,E,d),g(ce,E,null),e(E,qr),e(E,Ge),e(Ge,jr),e(E,Ur),e(E,R),g(fe,R,null),e(R,Qr),e(R,Ze),e(Ze,Rr),c(t,ut,d),c(t,y,d),g(he,y,null),e(y,Br),e(y,Ke),e(Ke,Mr),e(y,zr),e(y,Xe),e(Xe,Gr),e(y,Zr),e(y,B),g(ue,B,null),e(B,Kr),e(B,Ye),e(Ye,Xr),c(t,gt,d),c(t,I,d),e(I,M),e(M,et),g(ge,et,null),e(I,Yr),e(I,tt),e(tt,ea),c(t,vt,d),c(t,T,d),g(ve,T,null),e(T,ta),e(T,V),e(V,ra),e(V,De),e(De,aa),e(V,oa),e(V,rt),e(rt,na),e(V,sa),_t=!0},p:Ga,i(t){_t||(v(G.$$.fragment,t),v(Z.$$.fragment,t),v(K.$$.fragment,t),v(Y.$$.fragment,t),v(ee.$$.fragment,t),v(ae.$$.fragment,t),v(oe.$$.fragment,t),v(ne.$$.fragment,t),v(se.$$.fragment,t),v(le.$$.fragment,t),v(pe.$$.fragment,t),v(me.$$.fragment,t),v(ce.$$.fragment,t),v(fe.$$.fragment,t),v(he.$$.fragment,t),v(ue.$$.fragment,t),v(ge.$$.fragment,t),v(ve.$$.fragment,t),_t=!0)},o(t){_(G.$$.fragment,t),_(Z.$$.fragment,t),_(K.$$.fragment,t),_(Y.$$.fragment,t),_(ee.$$.fragment,t),_(ae.$$.fragment,t),_(oe.$$.fragment,t),_(ne.$$.fragment,t),_(se.$$.fragment,t),_(le.$$.fragment,t),_(pe.$$.fragment,t),_(me.$$.fragment,t),_(ce.$$.fragment,t),_(fe.$$.fragment,t),_(he.$$.fragment,t),_(ue.$$.fragment,t),_(ge.$$.fragment,t),_(ve.$$.fragment,t),_t=!1},d(t){r(x),t&&r(ot),t&&r(F),b(G),t&&r(nt),t&&r(_e),t&&r(st),t&&r(be),t&&r(it),t&&r(S),b(Z),t&&r(lt),t&&r(A),b(K),t&&r(pt),t&&r(k),b(Y),t&&r(mt),t&&r(w),b(ee),t&&r(dt),t&&r(L),b(ae),t&&r(ct),t&&r(f),b(oe),b(ne),b(se),b(le),t&&r(ft),t&&r(D),b(pe),b(me),t&&r(ht),t&&r(E),b(ce),b(fe),t&&r(ut),t&&r(y),b(he),b(ue),t&&r(gt),t&&r(I),b(ge),t&&r(vt),t&&r(T),b(ve)}}}const Xa={local:"utilities-for-pipelines",sections:[{local:"transformers.pipelines.ArgumentHandler",title:"Argument handling"},{local:"transformers.PipelineDataFormat",title:"Data format"},{local:"transformers.pipelines.PipelineException",title:"Utilities"}],title:"Utilities for pipelines"};function Ya(ia){return Za(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class ao extends Ra{constructor(x){super();Ba(this,x,Ya,Ka,Ma,{})}}export{ao as default,Xa as metadata};
39
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/internal/trainer_utils.mdx-hf-doc-builder.js
import{S as ap,i as rp,s as sp,e as r,k as i,w as $,t as o,M as np,c as s,d as t,m as p,a as n,x as y,h as l,b as u,G as e,g as b,y as E,q as D,o as j,B as x,v as op,L as qa}from"../../chunks/vendor-hf-doc-builder.js";import{D as A}from"../../chunks/Docstring-hf-doc-builder.js";import{C as Ba}from"../../chunks/CodeBlock-hf-doc-builder.js";import{I as vt}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{E as Ra}from"../../chunks/ExampleCodeBlock-hf-doc-builder.js";function lp(U){let g,T,w,f,k;return f=new Ba({props:{code:"debug_overflow = DebugUnderflowOverflow(model)",highlighted:"debug_overflow = DebugUnderflowOverflow(model)"}}),{c(){g=r("p"),T=o("To activate the underflow/overflow detection, initialize the object with the model :"),w=i(),$(f.$$.fragment)},l(c){g=s(c,"P",{});var _=n(g);T=l(_,"To activate the underflow/overflow detection, initialize the object with the model :"),_.forEach(t),w=p(c),y(f.$$.fragment,c)},m(c,_){b(c,g,_),e(g,T),b(c,w,_),E(f,c,_),k=!0},p:qa,i(c){k||(D(f.$$.fragment,c),k=!0)},o(c){j(f.$$.fragment,c),k=!1},d(c){c&&t(g),c&&t(w),x(f,c)}}}function ip(U){let g,T,w,f,k;return f=new Ba({props:{code:`Detected inf/nan during batch_number=0 Last 21 forward frames: abs min abs max metadata [...] encoder.block.2.layer.1.DenseReluDense.wi_0 Linear 2.17e-07 4.50e+00 weight 1.79e-06 4.65e+00 input[0] 2.68e-06 3.70e+01 output encoder.block.2.layer.1.DenseReluDense.wi_1 Linear 8.08e-07 2.66e+01 weight 1.79e-06 4.65e+00 input[0] 1.27e-04 2.37e+02 output encoder.block.2.layer.1.DenseReluDense.wo Linear 1.01e-06 6.44e+00 weight 0.00e+00 9.74e+03 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense 1.79e-06 4.65e+00 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.dropout Dropout 3.18e-04 6.27e+04 input[0] 0.00e+00 inf output`,highlighted:`<span class="hljs-attribute">Detected</span> inf/nan during batch_number=<span class="hljs-number">0</span> <span class="hljs-attribute">Last</span> <span class="hljs-number">21</span> forward frames: <span class="hljs-attribute">abs</span> min abs max metadata<span class="hljs-meta"> [...]</span> <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.DenseReluDense.wi_0 Linear <span class="hljs-attribute">2</span>.<span class="hljs-number">17</span>e-<span class="hljs-number">07</span> <span class="hljs-number">4</span>.<span class="hljs-number">50</span>e+<span class="hljs-number">00</span> weight <span class="hljs-attribute">1</span>.<span class="hljs-number">79</span>e-<span class="hljs-number">06</span> <span class="hljs-number">4</span>.<span class="hljs-number">65</span>e+<span class="hljs-number">00</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">2</span>.<span class="hljs-number">68</span>e-<span class="hljs-number">06</span> <span class="hljs-number">3</span>.<span class="hljs-number">70</span>e+<span class="hljs-number">01</span> output <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.DenseReluDense.wi_1 Linear <span class="hljs-attribute">8</span>.<span class="hljs-number">08</span>e-<span class="hljs-number">07</span> <span class="hljs-number">2</span>.<span class="hljs-number">66</span>e+<span class="hljs-number">01</span> weight <span class="hljs-attribute">1</span>.<span class="hljs-number">79</span>e-<span class="hljs-number">06</span> <span class="hljs-number">4</span>.<span class="hljs-number">65</span>e+<span class="hljs-number">00</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">1</span>.<span class="hljs-number">27</span>e-<span class="hljs-number">04</span> <span class="hljs-number">2</span>.<span class="hljs-number">37</span>e+<span class="hljs-number">02</span> output <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.DenseReluDense.wo Linear <span class="hljs-attribute">1</span>.<span class="hljs-number">01</span>e-<span class="hljs-number">06</span> <span class="hljs-number">6</span>.<span class="hljs-number">44</span>e+<span class="hljs-number">00</span> weight <span class="hljs-attribute">0</span>.<span class="hljs-number">00</span>e+<span class="hljs-number">00</span> <span class="hljs-number">9</span>.<span class="hljs-number">74</span>e+<span class="hljs-number">03</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">3</span>.<span class="hljs-number">18</span>e-<span class="hljs-number">04</span> <span class="hljs-number">6</span>.<span class="hljs-number">27</span>e+<span class="hljs-number">04</span> output <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.DenseReluDense T5DenseGatedGeluDense <span class="hljs-attribute">1</span>.<span class="hljs-number">79</span>e-<span class="hljs-number">06</span> <span class="hljs-number">4</span>.<span class="hljs-number">65</span>e+<span class="hljs-number">00</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">3</span>.<span class="hljs-number">18</span>e-<span class="hljs-number">04</span> <span class="hljs-number">6</span>.<span class="hljs-number">27</span>e+<span class="hljs-number">04</span> output <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.dropout Dropout <span class="hljs-attribute">3</span>.<span class="hljs-number">18</span>e-<span class="hljs-number">04</span> <span class="hljs-number">6</span>.<span class="hljs-number">27</span>e+<span class="hljs-number">04</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">0</span>.<span class="hljs-number">00</span>e+<span class="hljs-number">00</span> inf output`}}),{c(){g=r("p"),T=o("mixed precision :"),w=i(),$(f.$$.fragment)},l(c){g=s(c,"P",{});var _=n(g);T=l(_,"mixed precision :"),_.forEach(t),w=p(c),y(f.$$.fragment,c)},m(c,_){b(c,g,_),e(g,T),b(c,w,_),E(f,c,_),k=!0},p:qa,i(c){k||(D(f.$$.fragment,c),k=!0)},o(c){j(f.$$.fragment,c),k=!1},d(c){c&&t(g),c&&t(w),x(f,c)}}}function pp(U){let g,T,w,f,k;return f=new Ba({props:{code:"debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100)",highlighted:'debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=<span class="hljs-number">100</span>)'}}),{c(){g=r("p"),T=o("By default the last 21 frames are printed. You can change the default to adjust for your needs. For example :"),w=i(),$(f.$$.fragment)},l(c){g=s(c,"P",{});var _=n(g);T=l(_,"By default the last 21 frames are printed. You can change the default to adjust for your needs. For example :"),_.forEach(t),w=p(c),y(f.$$.fragment,c)},m(c,_){b(c,g,_),e(g,T),b(c,w,_),E(f,c,_),k=!0},p:qa,i(c){k||(D(f.$$.fragment,c),k=!0)},o(c){j(f.$$.fragment,c),k=!1},d(c){c&&t(g),c&&t(w),x(f,c)}}}function cp(U){let g,T,w,f,k;return f=new Ba({props:{code:"debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3])",highlighted:'debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[<span class="hljs-number">1</span>, <span class="hljs-number">3</span>])'}}),{c(){g=r("p"),T=o("given batch, and only do that for batches 1 and 3. Then you instantiate this class as :"),w=i(),$(f.$$.fragment)},l(c){g=s(c,"P",{});var _=n(g);T=l(_,"given batch, and only do that for batches 1 and 3. Then you instantiate this class as :"),_.forEach(t),w=p(c),y(f.$$.fragment,c)},m(c,_){b(c,g,_),e(g,T),b(c,w,_),E(f,c,_),k=!0},p:qa,i(c){k||(D(f.$$.fragment,c),k=!0)},o(c){j(f.$$.fragment,c),k=!1},d(c){c&&t(g),c&&t(w),x(f,c)}}}function dp(U){let g,T,w,f,k;return f=new Ba({props:{code:"debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3], abort_after_batch_num=3)",highlighted:'debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[<span class="hljs-number">1</span>, <span class="hljs-number">3</span>], abort_after_batch_num=<span class="hljs-number">3</span>)'}}),{c(){g=r("p"),T=o("You can also specify the batch number after which to stop the training, with :"),w=i(),$(f.$$.fragment)},l(c){g=s(c,"P",{});var _=n(g);T=l(_,"You can also specify the batch number after which to stop the training, with :"),_.forEach(t),w=p(c),y(f.$$.fragment,c)},m(c,_){b(c,g,_),e(g,T),b(c,w,_),E(f,c,_),k=!0},p:qa,i(c){k||(D(f.$$.fragment,c),k=!0)},o(c){j(f.$$.fragment,c),k=!1},d(c){c&&t(g),c&&t(w),x(f,c)}}}function hp(U){let g,T,w,f,k,c,_,wt,Nr,Ka,ee,Fr,tt,Gr,Vr,Ya,at,Mr,Wa,N,te,$t,be,Rr,yt,qr,Ja,F,_e,Br,Et,Kr,Qa,G,ve,Yr,Dt,Wr,Xa,I,we,Jr,jt,Qr,Xr,$e,rt,ye,Zr,es,ts,st,Ee,as,rs,Za,V,De,ss,L,ns,xt,os,ls,kt,is,ps,Pt,cs,ds,Tt,hs,fs,er,M,je,ms,Ot,us,tr,R,ae,At,xe,gs,Ct,bs,ar,q,ke,_s,Lt,vs,rr,B,re,Ut,Pe,ws,It,$s,sr,v,Te,ys,zt,Es,Ds,Ht,js,xs,St,Nt,ks,Ps,Ft,Ts,Os,K,nt,As,Gt,Cs,Ls,ot,Us,Vt,Is,zs,lt,Hs,Mt,Ss,Ns,Rt,Fs,Gs,Y,it,Vs,qt,Ms,Rs,pt,qs,Bt,Bs,Ks,ct,Ys,Kt,Ws,Js,Yt,Qs,Xs,Wt,Jt,Zs,en,Qt,tn,an,Xt,Zt,rn,sn,ea,nn,on,se,Oe,ln,Ae,pn,ta,cn,dn,hn,ne,Ce,fn,aa,mn,nr,W,oe,ra,Le,un,sa,gn,or,O,Ue,bn,Ie,_n,na,vn,wn,$n,ze,yn,oa,En,Dn,jn,H,He,xn,la,kn,Pn,Se,Tn,ia,On,An,Cn,le,Ne,Ln,Fe,Un,pa,In,zn,Hn,ie,Ge,Sn,Ve,Nn,ca,Fn,Gn,Vn,pe,Me,Mn,Re,Rn,da,qn,Bn,lr,J,ce,ha,qe,Kn,fa,Yn,ir,d,Be,Wn,Q,Jn,ma,Qn,Xn,ua,Zn,eo,to,ga,ao,ro,Ke,ba,so,no,_a,oo,lo,va,io,po,de,co,z,ho,wa,fo,mo,$a,uo,go,ya,bo,_o,vo,Ye,We,wo,Ea,$o,yo,Eo,Da,Do,jo,Je,xo,ja,ko,Po,To,he,Oo,X,Ao,xa,Co,Lo,ka,Uo,Io,zo,Pa,Ho,So,Qe,No,Ta,Fo,Go,Vo,fe,Mo,Oa,Ro,qo,Aa,Bo,Ko,Ca,Yo,Wo,Xe,Jo,La,Qo,Xo,Zo,me,el,Ua,tl,al,Ia,rl,sl,za,nl,ol,ue,ll,Ha,il,pl,dt,Sa,cl,dl,hl,Z,fl,Na,ml,ul,Fa,gl,bl,pr;return c=new vt({}),be=new vt({}),_e=new A({props:{name:"class transformers.EvalPrediction",anchor:"transformers.EvalPrediction",parameters:[{name:"predictions",val:": typing.Union[numpy.ndarray, typing.Tuple[numpy.ndarray]]"},{name:"label_ids",val:": typing.Union[numpy.ndarray, typing.Tuple[numpy.ndarray]]"},{name:"inputs",val:": typing.Union[numpy.ndarray, typing.Tuple[numpy.ndarray], NoneType] = None"}],parametersDescription:[{anchor:"transformers.EvalPrediction.predictions",description:"<strong>predictions</strong> (<code>np.ndarray</code>) &#x2014; Predictions of the model.",name:"predictions"},{anchor:"transformers.EvalPrediction.label_ids",description:"<strong>label_ids</strong> (<code>np.ndarray</code>) &#x2014; Targets to be matched.",name:"label_ids"},{anchor:"transformers.EvalPrediction.inputs",description:"<strong>inputs</strong> (<code>np.ndarray</code>, <em>optional</em>) &#x2014;",name:"inputs"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_utils.py#L100"}}),ve=new A({props:{name:"class transformers.IntervalStrategy",anchor:"transformers.IntervalStrategy",parameters:[{name:"value",val:""},{name:"names",val:" = None"},{name:"module",val:" = None"},{name:"qualname",val:" = None"},{name:"type",val:" = None"},{name:"start",val:" = 1"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_utils.py#L174"}}),we=new A({props:{name:"transformers.enable_full_determinism",anchor:"transformers.enable_full_determinism",parameters:[{name:"seed",val:": int"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_utils.py#L58"}}),De=new A({props:{name:"transformers.set_seed",anchor:"transformers.set_seed",parameters:[{name:"seed",val:": int"}],parametersDescription:[{anchor:"transformers.set_seed.seed",description:"<strong>seed</strong> (<code>int</code>) &#x2014; The seed to set.",name:"seed"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_utils.py#L83"}}),je=new A({props:{name:"transformers.torch_distributed_zero_first",anchor:"transformers.torch_distributed_zero_first",parameters:[{name:"local_rank",val:": int"}],parametersDescription:[{anchor:"transformers.torch_distributed_zero_first.local_rank",description:"<strong>local_rank</strong> (<code>int</code>) &#x2014; The rank of the local process.",name:"local_rank"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_pt_utils.py#L218"}}),xe=new vt({}),ke=new A({props:{name:"class transformers.trainer_callback.CallbackHandler",anchor:"transformers.trainer_callback.CallbackHandler",parameters:[{name:"callbacks",val:""},{name:"model",val:""},{name:"tokenizer",val:""},{name:"optimizer",val:""},{name:"lr_scheduler",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L290"}}),Pe=new vt({}),Te=new A({props:{name:"class transformers.trainer_pt_utils.DistributedTensorGatherer",anchor:"transformers.trainer_pt_utils.DistributedTensorGatherer",parameters:[{name:"world_size",val:""},{name:"num_samples",val:""},{name:"make_multiple_of",val:" = None"},{name:"padding_index",val:" = -100"}],parametersDescription:[{anchor:"transformers.trainer_pt_utils.DistributedTensorGatherer.world_size",description:`<strong>world_size</strong> (<code>int</code>) &#x2014; The number of processes used in the distributed training.`,name:"world_size"},{anchor:"transformers.trainer_pt_utils.DistributedTensorGatherer.num_samples",description:`<strong>num_samples</strong> (<code>int</code>) &#x2014; The number of samples in our dataset.`,name:"num_samples"},{anchor:"transformers.trainer_pt_utils.DistributedTensorGatherer.make_multiple_of",description:`<strong>make_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If passed, the class assumes the datasets passed to each process are made to be a multiple of this argument (by adding samples).`,name:"make_multiple_of"},{anchor:"transformers.trainer_pt_utils.DistributedTensorGatherer.padding_index",description:`<strong>padding_index</strong> (<code>int</code>, <em>optional</em>, defaults to -100) &#x2014; The padding index to use if the arrays don&#x2019;t all have the same sequence length.`,name:"padding_index"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_pt_utils.py#L344"}}),Oe=new A({props:{name:"add_arrays",anchor:"transformers.trainer_pt_utils.DistributedTensorGatherer.add_arrays",parameters:[{name:"arrays",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_pt_utils.py#L404"}}),Ce=new A({props:{name:"finalize",anchor:"transformers.trainer_pt_utils.DistributedTensorGatherer.finalize",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_pt_utils.py#L440"}}),Le=new vt({}),Ue=new A({props:{name:"class transformers.HfArgumentParser",anchor:"transformers.HfArgumentParser",parameters:[{name:"dataclass_types",val:": typing.Union[DataClassType, typing.Iterable[DataClassType]]"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/hf_argparser.py#L46"}}),He=new A({props:{name:"parse_args_into_dataclasses",anchor:"transformers.HfArgumentParser.parse_args_into_dataclasses",parameters:[{name:"args",val:" = None"},{name:"return_remaining_strings",val:" = False"},{name:"look_for_args_file",val:" = True"},{name:"args_filename",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/hf_argparser.py#L180",returnDescription:` <ul> <li>the dataclass instances in the same order as they were passed to the initializer.abspath</li> <li>if applicable, an additional namespace for more (non-dataclass backed) arguments added to the parser after initialization.</li> <li>The potential list of remaining argument strings. (same as argparse.ArgumentParser.parse_known_args)</li> </ul> `,returnType:` <p>Tuple consisting of</p> `}}),Ne=new A({props:{name:"parse_dict",anchor:"transformers.HfArgumentParser.parse_dict",parameters:[{name:"args",val:": typing.Dict[str, typing.Any]"},{name:"allow_extra_keys",val:": bool = False"}],parametersDescription:[{anchor:"transformers.HfArgumentParser.parse_dict.args",description:`<strong>args</strong> (<code>dict</code>) &#x2014; dict containing config values`,name:"args"},{anchor:"transformers.HfArgumentParser.parse_dict.allow_extra_keys",description:`<strong>allow_extra_keys</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Defaults to False. If False, will raise an exception if the dict contains keys that are not parsed.`,name:"allow_extra_keys"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/hf_argparser.py#L239",returnDescription:` <ul> <li>the dataclass instances in the same order as they were passed to the initializer.</li> </ul> `,returnType:` <p>Tuple consisting of</p> `}}),Ge=new A({props:{name:"parse_json_file",anchor:"transformers.HfArgumentParser.parse_json_file",parameters:[{name:"json_file",val:": str"},{name:"allow_extra_keys",val:": bool = False"}],parametersDescription:[{anchor:"transformers.HfArgumentParser.parse_json_file.json_file",description:`<strong>json_file</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; File name of the json file to parse`,name:"json_file"},{anchor:"transformers.HfArgumentParser.parse_json_file.allow_extra_keys",description:`<strong>allow_extra_keys</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Defaults to False. If False, will raise an exception if the json file contains keys that are not parsed.`,name:"allow_extra_keys"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/hf_argparser.py#L267",returnDescription:` <ul> <li>the dataclass instances in the same order as they were passed to the initializer.</li> </ul> `,returnType:` <p>Tuple consisting of</p> `}}),Me=new A({props:{name:"parse_yaml_file",anchor:"transformers.HfArgumentParser.parse_yaml_file",parameters:[{name:"yaml_file",val:": str"},{name:"allow_extra_keys",val:": bool = False"}],parametersDescription:[{anchor:"transformers.HfArgumentParser.parse_yaml_file.yaml_file",description:`<strong>yaml_file</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; File name of the yaml file to parse`,name:"yaml_file"},{anchor:"transformers.HfArgumentParser.parse_yaml_file.allow_extra_keys",description:`<strong>allow_extra_keys</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Defaults to False. If False, will raise an exception if the json file contains keys that are not parsed.`,name:"allow_extra_keys"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/hf_argparser.py#L289",returnDescription:` <ul> <li>the dataclass instances in the same order as they were passed to the initializer.</li> </ul> `,returnType:` <p>Tuple consisting of</p> `}}),qe=new vt({}),Be=new A({props:{name:"class transformers.debug_utils.DebugUnderflowOverflow",anchor:"transformers.debug_utils.DebugUnderflowOverflow",parameters:[{name:"model",val:""},{name:"max_frames_to_save",val:" = 21"},{name:"trace_batch_nums",val:" = []"},{name:"abort_after_batch_num",val:" = None"}],parametersDescription:[{anchor:"transformers.debug_utils.DebugUnderflowOverflow.model",description:`<strong>model</strong> (<code>nn.Module</code>) &#x2014; The model to debug.`,name:"model"},{anchor:"transformers.debug_utils.DebugUnderflowOverflow.max_frames_to_save",description:`<strong>max_frames_to_save</strong> (<code>int</code>, <em>optional</em>, defaults to 21) &#x2014; How many frames back to record`,name:"max_frames_to_save"},{anchor:"transformers.debug_utils.DebugUnderflowOverflow.trace_batch_nums(List[int],",description:`<strong>trace_batch_nums(<code>List[int]</code>,</strong> <em>optional</em>, defaults to <code>[]</code>) &#x2014; Which batch numbers to trace (turns detection off)`,name:"trace_batch_nums(List[int],"},{anchor:"transformers.debug_utils.DebugUnderflowOverflow.abort_after_batch_num",description:"<strong>abort_after_batch_num</strong> (`int&#x201C;, <em>optional</em>) &#x2014;\nWhether to abort after a certain batch number has finished",name:"abort_after_batch_num"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/debug_utils.py#L27"}}),de=new Ra({props:{anchor:"transformers.debug_utils.DebugUnderflowOverflow.example",$$slots:{default:[lp]},$$scope:{ctx:U}}}),he=new Ra({props:{anchor:"transformers.debug_utils.DebugUnderflowOverflow.example-2",$$slots:{default:[ip]},$$scope:{ctx:U}}}),fe=new Ra({props:{anchor:"transformers.debug_utils.DebugUnderflowOverflow.example-3",$$slots:{default:[pp]},$$scope:{ctx:U}}}),me=new Ra({props:{anchor:"transformers.debug_utils.DebugUnderflowOverflow.example-4",$$slots:{default:[cp]},$$scope:{ctx:U}}}),ue=new Ra({props:{anchor:"transformers.debug_utils.DebugUnderflowOverflow.example-5",$$slots:{default:[dp]},$$scope:{ctx:U}}}),{c(){g=r("meta"),T=i(),w=r("h1"),f=r("a"),k=r("span"),$(c.$$.fragment),_=i(),wt=r("span"),Nr=o("Utilities for Trainer"),Ka=i(),ee=r("p"),Fr=o("This page lists all the utility functions used by "),tt=r("a"),Gr=o("Trainer"),Vr=o("."),Ya=i(),at=r("p"),Mr=o("Most of those are only useful if you are studying the code of the Trainer in the library."),Wa=i(),N=r("h2"),te=r("a"),$t=r("span"),$(be.$$.fragment),Rr=i(),yt=r("span"),qr=o("Utilities"),Ja=i(),F=r("div"),$(_e.$$.fragment),Br=i(),Et=r("p"),Kr=o("Evaluation output (always contains labels), to be used to compute metrics."),Qa=i(),G=r("div"),$(ve.$$.fragment),Yr=i(),Dt=r("p"),Wr=o("An enumeration."),Xa=i(),I=r("div"),$(we.$$.fragment),Jr=i(),jt=r("p"),Qr=o("Helper function for reproducible behavior during distributed training. See"),Xr=i(),$e=r("ul"),rt=r("li"),ye=r("a"),Zr=o("https://pytorch.org/docs/stable/notes/randomness.html"),es=o(" for pytorch"),ts=i(),st=r("li"),Ee=r("a"),as=o("https://www.tensorflow.org/api_docs/python/tf/config/experimental/enable_op_determinism"),rs=o(" for tensorflow"),Za=i(),V=r("div"),$(De.$$.fragment),ss=i(),L=r("p"),ns=o("Helper function for reproducible behavior to set the seed in "),xt=r("code"),os=o("random"),ls=o(", "),kt=r("code"),is=o("numpy"),ps=o(", "),Pt=r("code"),cs=o("torch"),ds=o(" and/or "),Tt=r("code"),hs=o("tf"),fs=o(" (if installed)."),er=i(),M=r("div"),$(je.$$.fragment),ms=i(),Ot=r("p"),us=o("Decorator to make all processes in distributed training wait for each local_master to do something."),tr=i(),R=r("h2"),ae=r("a"),At=r("span"),$(xe.$$.fragment),gs=i(),Ct=r("span"),bs=o("Callbacks internals"),ar=i(),q=r("div"),$(ke.$$.fragment),_s=i(),Lt=r("p"),vs=o("Internal class that just calls the list of callbacks in order."),rr=i(),B=r("h2"),re=r("a"),Ut=r("span"),$(Pe.$$.fragment),ws=i(),It=r("span"),$s=o("Distributed Evaluation"),sr=i(),v=r("div"),$(Te.$$.fragment),ys=i(),zt=r("p"),Es=o("A class responsible for properly gathering tensors (or nested list/tuple of tensors) on the CPU by chunks."),Ds=i(),Ht=r("p"),js=o(`If our dataset has 16 samples with a batch size of 2 on 3 processes and we gather then transfer on CPU at every step, our sampler will generate the following indices:`),xs=i(),St=r("p"),Nt=r("code"),ks=o("[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1]"),Ps=i(),Ft=r("p"),Ts=o(`to get something of size a multiple of 3 (so that each process gets the same dataset length). Then process 0, 1 and 2 will be responsible of making predictions for the following samples:`),Os=i(),K=r("ul"),nt=r("li"),As=o("P0: "),Gt=r("code"),Cs=o("[0, 1, 2, 3, 4, 5]"),Ls=i(),ot=r("li"),Us=o("P1: "),Vt=r("code"),Is=o("[6, 7, 8, 9, 10, 11]"),zs=i(),lt=r("li"),Hs=o("P2: "),Mt=r("code"),Ss=o("[12, 13, 14, 15, 0, 1]"),Ns=i(),Rt=r("p"),Fs=o("The first batch treated on each process will be"),Gs=i(),Y=r("ul"),it=r("li"),Vs=o("P0: "),qt=r("code"),Ms=o("[0, 1]"),Rs=i(),pt=r("li"),qs=o("P1: "),Bt=r("code"),Bs=o("[6, 7]"),Ks=i(),ct=r("li"),Ys=o("P2: "),Kt=r("code"),Ws=o("[12, 13]"),Js=i(),Yt=r("p"),Qs=o(`So if we gather at the end of the first batch, we will get a tensor (nested list/tuple of tensor) corresponding to the following indices:`),Xs=i(),Wt=r("p"),Jt=r("code"),Zs=o("[0, 1, 6, 7, 12, 13]"),en=i(),Qt=r("p"),tn=o(`If we directly concatenate our results without taking any precautions, the user will then get the predictions for the indices in this order at the end of the prediction loop:`),an=i(),Xt=r("p"),Zt=r("code"),rn=o("[0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15, 4, 5, 10, 11, 0, 1]"),sn=i(),ea=r("p"),nn=o("For some reason, that\u2019s not going to roll their boat. This class is there to solve that problem."),on=i(),se=r("div"),$(Oe.$$.fragment),ln=i(),Ae=r("p"),pn=o("Add "),ta=r("code"),cn=o("arrays"),dn=o(` to the internal storage, Will initialize the storage to the full size at the first arrays passed so that if we\u2019re bound to get an OOM, it happens at the beginning.`),hn=i(),ne=r("div"),$(Ce.$$.fragment),fn=i(),aa=r("p"),mn=o(`Return the properly gathered arrays and truncate to the number of samples (since the sampler added some extras to get each process a dataset of the same length).`),nr=i(),W=r("h2"),oe=r("a"),ra=r("span"),$(Le.$$.fragment),un=i(),sa=r("span"),gn=o("Distributed Evaluation"),or=i(),O=r("div"),$(Ue.$$.fragment),bn=i(),Ie=r("p"),_n=o("This subclass of "),na=r("code"),vn=o("argparse.ArgumentParser"),wn=o(" uses type hints on dataclasses to generate arguments."),$n=i(),ze=r("p"),yn=o(`The class is designed to play well with the native argparse. In particular, you can add more (non-dataclass backed) arguments to the parser after initialization and you\u2019ll get the output back after parsing as an additional namespace. Optional: To create sub argument groups use the `),oa=r("code"),En=o("_argument_group_name"),Dn=o(" attribute in the dataclass."),jn=i(),H=r("div"),$(He.$$.fragment),xn=i(),la=r("p"),kn=o("Parse command-line args into instances of the specified dataclass types."),Pn=i(),Se=r("p"),Tn=o("This relies on argparse\u2019s "),ia=r("code"),On=o("ArgumentParser.parse_known_args"),An=o(`. See the doc at: docs.python.org/3.7/library/argparse.html#argparse.ArgumentParser.parse_args`),Cn=i(),le=r("div"),$(Ne.$$.fragment),Ln=i(),Fe=r("p"),Un=o("Alternative helper method that does not use "),pa=r("code"),In=o("argparse"),zn=o(` at all, instead uses a dict and populating the dataclass types.`),Hn=i(),ie=r("div"),$(Ge.$$.fragment),Sn=i(),Ve=r("p"),Nn=o("Alternative helper method that does not use "),ca=r("code"),Fn=o("argparse"),Gn=o(` at all, instead loading a json file and populating the dataclass types.`),Vn=i(),pe=r("div"),$(Me.$$.fragment),Mn=i(),Re=r("p"),Rn=o("Alternative helper method that does not use "),da=r("code"),qn=o("argparse"),Bn=o(` at all, instead loading a json file and populating the dataclass types.`),lr=i(),J=r("h2"),ce=r("a"),ha=r("span"),$(qe.$$.fragment),Kn=i(),fa=r("span"),Yn=o("Debug Utilities"),ir=i(),d=r("div"),$(Be.$$.fragment),Wn=i(),Q=r("p"),Jn=o(`This debug class helps detect and understand where the model starts getting very large or very small, and more importantly `),ma=r("code"),Qn=o("nan"),Xn=o(" or "),ua=r("code"),Zn=o("inf"),eo=o(" weight and activation elements."),to=i(),ga=r("p"),ao=o("There are 2 working modes:"),ro=i(),Ke=r("ol"),ba=r("li"),so=o("Underflow/overflow detection (default)"),no=i(),_a=r("li"),oo=o("Specific batch absolute min/max tracing without detection"),lo=i(),va=r("p"),io=o("Mode 1: Underflow/overflow detection"),po=i(),$(de.$$.fragment),co=i(),z=r("p"),ho=o("then run the training as normal and if "),wa=r("code"),fo=o("nan"),mo=o(" or "),$a=r("code"),uo=o("inf"),go=o(` gets detected in at least one of the weight, input or output elements this module will throw an exception and will print `),ya=r("code"),bo=o("max_frames_to_save"),_o=o(` frames that lead to this event, each frame reporting`),vo=i(),Ye=r("ol"),We=r("li"),wo=o("the fully qualified module name plus the class name whose "),Ea=r("code"),$o=o("forward"),yo=o(" was run"),Eo=i(),Da=r("li"),Do=o("the absolute min and max value of all elements for each module weights, and the inputs and output"),jo=i(),Je=r("p"),xo=o("For example, here is the header and the last few frames in detection report for "),ja=r("code"),ko=o("google/mt5-small"),Po=o(" run in fp16"),To=i(),$(he.$$.fragment),Oo=i(),X=r("p"),Ao=o("You can see here, that "),xa=r("code"),Co=o("T5DenseGatedGeluDense.forward"),Lo=o(` resulted in output activations, whose absolute max value was around 62.7K, which is very close to fp16\u2019s top limit of 64K. In the next frame we have `),ka=r("code"),Uo=o("Dropout"),Io=o(` which renormalizes the weights, after it zeroed some of the elements, which pushes the absolute max value to more than 64K, and we get an overlow.`),zo=i(),Pa=r("p"),Ho=o(`As you can see it\u2019s the previous frames that we need to look into when the numbers start going into very large for fp16 numbers.`),So=i(),Qe=r("p"),No=o("The tracking is done in a forward hook, which gets invoked immediately after "),Ta=r("code"),Fo=o("forward"),Go=o(" has completed."),Vo=i(),$(fe.$$.fragment),Mo=i(),Oa=r("p"),Ro=o(`To validate that you have set up this debugging feature correctly, and you intend to use it in a training that may take hours to complete, first run it with normal tracing enabled for one of a few batches as explained in the next section.`),qo=i(),Aa=r("p"),Bo=o("Mode 2. Specific batch absolute min/max tracing without detection"),Ko=i(),Ca=r("p"),Yo=o("The second work mode is per-batch tracing with the underflow/overflow detection feature turned off."),Wo=i(),Xe=r("p"),Jo=o("Let\u2019s say you want to watch the absolute min and max values for all the ingredients of each "),La=r("code"),Qo=o("forward"),Xo=o(" call of a"),Zo=i(),$(me.$$.fragment),el=i(),Ua=r("p"),tl=o("And now full batches 1 and 3 will be traced using the same format as explained above. Batches are 0-indexed."),al=i(),Ia=r("p"),rl=o(`This is helpful if you know that the program starts misbehaving after a certain batch number, so you can fast-forward right to that area.`),sl=i(),za=r("p"),nl=o("Early stopping:"),ol=i(),$(ue.$$.fragment),ll=i(),Ha=r("p"),il=o("This feature is mainly useful in the tracing mode, but you can use it for any mode."),pl=i(),dt=r("p"),Sa=r("strong"),cl=o("Performance"),dl=o(":"),hl=i(),Z=r("p"),fl=o("As this module measures absolute "),Na=r("code"),ml=o("min"),ul=o("/`"),Fa=r("code"),gl=o("max"),bl=o(` of each weight of the model on every forward it\u2019ll slow the training down. Therefore remember to turn it off once the debugging needs have been met.`),this.h()},l(a){const m=np('[data-svelte="svelte-1phssyn"]',document.head);g=s(m,"META",{name:!0,content:!0}),m.forEach(t),T=p(a),w=s(a,"H1",{class:!0});var Ze=n(w);f=s(Ze,"A",{id:!0,class:!0,href:!0});var Ga=n(f);k=s(Ga,"SPAN",{});var Va=n(k);y(c.$$.fragment,Va),Va.forEach(t),Ga.forEach(t),_=p(Ze),wt=s(Ze,"SPAN",{});var Ma=n(wt);Nr=l(Ma,"Utilities for Trainer"),Ma.forEach(t),Ze.forEach(t),Ka=p(a),ee=s(a,"P",{});var et=n(ee);Fr=l(et,"This page lists all the utility functions used by "),tt=s(et,"A",{href:!0});var kl=n(tt);Gr=l(kl,"Trainer"),kl.forEach(t),Vr=l(et,"."),et.forEach(t),Ya=p(a),at=s(a,"P",{});var Pl=n(at);Mr=l(Pl,"Most of those are only useful if you are studying the code of the Trainer in the library."),Pl.forEach(t),Wa=p(a),N=s(a,"H2",{class:!0});var cr=n(N);te=s(cr,"A",{id:!0,class:!0,href:!0});var Tl=n(te);$t=s(Tl,"SPAN",{});var Ol=n($t);y(be.$$.fragment,Ol),Ol.forEach(t),Tl.forEach(t),Rr=p(cr),yt=s(cr,"SPAN",{});var Al=n(yt);qr=l(Al,"Utilities"),Al.forEach(t),cr.forEach(t),Ja=p(a),F=s(a,"DIV",{class:!0});var dr=n(F);y(_e.$$.fragment,dr),Br=p(dr),Et=s(dr,"P",{});var Cl=n(Et);Kr=l(Cl,"Evaluation output (always contains labels), to be used to compute metrics."),Cl.forEach(t),dr.forEach(t),Qa=p(a),G=s(a,"DIV",{class:!0});var hr=n(G);y(ve.$$.fragment,hr),Yr=p(hr),Dt=s(hr,"P",{});var Ll=n(Dt);Wr=l(Ll,"An enumeration."),Ll.forEach(t),hr.forEach(t),Xa=p(a),I=s(a,"DIV",{class:!0});var ht=n(I);y(we.$$.fragment,ht),Jr=p(ht),jt=s(ht,"P",{});var Ul=n(jt);Qr=l(Ul,"Helper function for reproducible behavior during distributed training. See"),Ul.forEach(t),Xr=p(ht),$e=s(ht,"UL",{});var fr=n($e);rt=s(fr,"LI",{});var _l=n(rt);ye=s(_l,"A",{href:!0,rel:!0});var Il=n(ye);Zr=l(Il,"https://pytorch.org/docs/stable/notes/randomness.html"),Il.forEach(t),es=l(_l," for pytorch"),_l.forEach(t),ts=p(fr),st=s(fr,"LI",{});var vl=n(st);Ee=s(vl,"A",{href:!0,rel:!0});var zl=n(Ee);as=l(zl,"https://www.tensorflow.org/api_docs/python/tf/config/experimental/enable_op_determinism"),zl.forEach(t),rs=l(vl," for tensorflow"),vl.forEach(t),fr.forEach(t),ht.forEach(t),Za=p(a),V=s(a,"DIV",{class:!0});var mr=n(V);y(De.$$.fragment,mr),ss=p(mr),L=s(mr,"P",{});var S=n(L);ns=l(S,"Helper function for reproducible behavior to set the seed in "),xt=s(S,"CODE",{});var Hl=n(xt);os=l(Hl,"random"),Hl.forEach(t),ls=l(S,", "),kt=s(S,"CODE",{});var Sl=n(kt);is=l(Sl,"numpy"),Sl.forEach(t),ps=l(S,", "),Pt=s(S,"CODE",{});var Nl=n(Pt);cs=l(Nl,"torch"),Nl.forEach(t),ds=l(S," and/or "),Tt=s(S,"CODE",{});var Fl=n(Tt);hs=l(Fl,"tf"),Fl.forEach(t),fs=l(S," (if installed)."),S.forEach(t),mr.forEach(t),er=p(a),M=s(a,"DIV",{class:!0});var ur=n(M);y(je.$$.fragment,ur),ms=p(ur),Ot=s(ur,"P",{});var Gl=n(Ot);us=l(Gl,"Decorator to make all processes in distributed training wait for each local_master to do something."),Gl.forEach(t),ur.forEach(t),tr=p(a),R=s(a,"H2",{class:!0});var gr=n(R);ae=s(gr,"A",{id:!0,class:!0,href:!0});var Vl=n(ae);At=s(Vl,"SPAN",{});var Ml=n(At);y(xe.$$.fragment,Ml),Ml.forEach(t),Vl.forEach(t),gs=p(gr),Ct=s(gr,"SPAN",{});var Rl=n(Ct);bs=l(Rl,"Callbacks internals"),Rl.forEach(t),gr.forEach(t),ar=p(a),q=s(a,"DIV",{class:!0});var br=n(q);y(ke.$$.fragment,br),_s=p(br),Lt=s(br,"P",{});var ql=n(Lt);vs=l(ql,"Internal class that just calls the list of callbacks in order."),ql.forEach(t),br.forEach(t),rr=p(a),B=s(a,"H2",{class:!0});var _r=n(B);re=s(_r,"A",{id:!0,class:!0,href:!0});var Bl=n(re);Ut=s(Bl,"SPAN",{});var Kl=n(Ut);y(Pe.$$.fragment,Kl),Kl.forEach(t),Bl.forEach(t),ws=p(_r),It=s(_r,"SPAN",{});var Yl=n(It);$s=l(Yl,"Distributed Evaluation"),Yl.forEach(t),_r.forEach(t),sr=p(a),v=s(a,"DIV",{class:!0});var P=n(v);y(Te.$$.fragment,P),ys=p(P),zt=s(P,"P",{});var Wl=n(zt);Es=l(Wl,"A class responsible for properly gathering tensors (or nested list/tuple of tensors) on the CPU by chunks."),Wl.forEach(t),Ds=p(P),Ht=s(P,"P",{});var Jl=n(Ht);js=l(Jl,`If our dataset has 16 samples with a batch size of 2 on 3 processes and we gather then transfer on CPU at every step, our sampler will generate the following indices:`),Jl.forEach(t),xs=p(P),St=s(P,"P",{});var Ql=n(St);Nt=s(Ql,"CODE",{});var Xl=n(Nt);ks=l(Xl,"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1]"),Xl.forEach(t),Ql.forEach(t),Ps=p(P),Ft=s(P,"P",{});var Zl=n(Ft);Ts=l(Zl,`to get something of size a multiple of 3 (so that each process gets the same dataset length). Then process 0, 1 and 2 will be responsible of making predictions for the following samples:`),Zl.forEach(t),Os=p(P),K=s(P,"UL",{});var ft=n(K);nt=s(ft,"LI",{});var wl=n(nt);As=l(wl,"P0: "),Gt=s(wl,"CODE",{});var ei=n(Gt);Cs=l(ei,"[0, 1, 2, 3, 4, 5]"),ei.forEach(t),wl.forEach(t),Ls=p(ft),ot=s(ft,"LI",{});var $l=n(ot);Us=l($l,"P1: "),Vt=s($l,"CODE",{});var ti=n(Vt);Is=l(ti,"[6, 7, 8, 9, 10, 11]"),ti.forEach(t),$l.forEach(t),zs=p(ft),lt=s(ft,"LI",{});var yl=n(lt);Hs=l(yl,"P2: "),Mt=s(yl,"CODE",{});var ai=n(Mt);Ss=l(ai,"[12, 13, 14, 15, 0, 1]"),ai.forEach(t),yl.forEach(t),ft.forEach(t),Ns=p(P),Rt=s(P,"P",{});var ri=n(Rt);Fs=l(ri,"The first batch treated on each process will be"),ri.forEach(t),Gs=p(P),Y=s(P,"UL",{});var mt=n(Y);it=s(mt,"LI",{});var El=n(it);Vs=l(El,"P0: "),qt=s(El,"CODE",{});var si=n(qt);Ms=l(si,"[0, 1]"),si.forEach(t),El.forEach(t),Rs=p(mt),pt=s(mt,"LI",{});var Dl=n(pt);qs=l(Dl,"P1: "),Bt=s(Dl,"CODE",{});var ni=n(Bt);Bs=l(ni,"[6, 7]"),ni.forEach(t),Dl.forEach(t),Ks=p(mt),ct=s(mt,"LI",{});var jl=n(ct);Ys=l(jl,"P2: "),Kt=s(jl,"CODE",{});var oi=n(Kt);Ws=l(oi,"[12, 13]"),oi.forEach(t),jl.forEach(t),mt.forEach(t),Js=p(P),Yt=s(P,"P",{});var li=n(Yt);Qs=l(li,`So if we gather at the end of the first batch, we will get a tensor (nested list/tuple of tensor) corresponding to the following indices:`),li.forEach(t),Xs=p(P),Wt=s(P,"P",{});var ii=n(Wt);Jt=s(ii,"CODE",{});var pi=n(Jt);Zs=l(pi,"[0, 1, 6, 7, 12, 13]"),pi.forEach(t),ii.forEach(t),en=p(P),Qt=s(P,"P",{});var ci=n(Qt);tn=l(ci,`If we directly concatenate our results without taking any precautions, the user will then get the predictions for the indices in this order at the end of the prediction loop:`),ci.forEach(t),an=p(P),Xt=s(P,"P",{});var di=n(Xt);Zt=s(di,"CODE",{});var hi=n(Zt);rn=l(hi,"[0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15, 4, 5, 10, 11, 0, 1]"),hi.forEach(t),di.forEach(t),sn=p(P),ea=s(P,"P",{});var fi=n(ea);nn=l(fi,"For some reason, that\u2019s not going to roll their boat. This class is there to solve that problem."),fi.forEach(t),on=p(P),se=s(P,"DIV",{class:!0});var vr=n(se);y(Oe.$$.fragment,vr),ln=p(vr),Ae=s(vr,"P",{});var wr=n(Ae);pn=l(wr,"Add "),ta=s(wr,"CODE",{});var mi=n(ta);cn=l(mi,"arrays"),mi.forEach(t),dn=l(wr,` to the internal storage, Will initialize the storage to the full size at the first arrays passed so that if we\u2019re bound to get an OOM, it happens at the beginning.`),wr.forEach(t),vr.forEach(t),hn=p(P),ne=s(P,"DIV",{class:!0});var $r=n(ne);y(Ce.$$.fragment,$r),fn=p($r),aa=s($r,"P",{});var ui=n(aa);mn=l(ui,`Return the properly gathered arrays and truncate to the number of samples (since the sampler added some extras to get each process a dataset of the same length).`),ui.forEach(t),$r.forEach(t),P.forEach(t),nr=p(a),W=s(a,"H2",{class:!0});var yr=n(W);oe=s(yr,"A",{id:!0,class:!0,href:!0});var gi=n(oe);ra=s(gi,"SPAN",{});var bi=n(ra);y(Le.$$.fragment,bi),bi.forEach(t),gi.forEach(t),un=p(yr),sa=s(yr,"SPAN",{});var _i=n(sa);gn=l(_i,"Distributed Evaluation"),_i.forEach(t),yr.forEach(t),or=p(a),O=s(a,"DIV",{class:!0});var C=n(O);y(Ue.$$.fragment,C),bn=p(C),Ie=s(C,"P",{});var Er=n(Ie);_n=l(Er,"This subclass of "),na=s(Er,"CODE",{});var vi=n(na);vn=l(vi,"argparse.ArgumentParser"),vi.forEach(t),wn=l(Er," uses type hints on dataclasses to generate arguments."),Er.forEach(t),$n=p(C),ze=s(C,"P",{});var Dr=n(ze);yn=l(Dr,`The class is designed to play well with the native argparse. In particular, you can add more (non-dataclass backed) arguments to the parser after initialization and you\u2019ll get the output back after parsing as an additional namespace. Optional: To create sub argument groups use the `),oa=s(Dr,"CODE",{});var wi=n(oa);En=l(wi,"_argument_group_name"),wi.forEach(t),Dn=l(Dr," attribute in the dataclass."),Dr.forEach(t),jn=p(C),H=s(C,"DIV",{class:!0});var ut=n(H);y(He.$$.fragment,ut),xn=p(ut),la=s(ut,"P",{});var $i=n(la);kn=l($i,"Parse command-line args into instances of the specified dataclass types."),$i.forEach(t),Pn=p(ut),Se=s(ut,"P",{});var jr=n(Se);Tn=l(jr,"This relies on argparse\u2019s "),ia=s(jr,"CODE",{});var yi=n(ia);On=l(yi,"ArgumentParser.parse_known_args"),yi.forEach(t),An=l(jr,`. See the doc at: docs.python.org/3.7/library/argparse.html#argparse.ArgumentParser.parse_args`),jr.forEach(t),ut.forEach(t),Cn=p(C),le=s(C,"DIV",{class:!0});var xr=n(le);y(Ne.$$.fragment,xr),Ln=p(xr),Fe=s(xr,"P",{});var kr=n(Fe);Un=l(kr,"Alternative helper method that does not use "),pa=s(kr,"CODE",{});var Ei=n(pa);In=l(Ei,"argparse"),Ei.forEach(t),zn=l(kr,` at all, instead uses a dict and populating the dataclass types.`),kr.forEach(t),xr.forEach(t),Hn=p(C),ie=s(C,"DIV",{class:!0});var Pr=n(ie);y(Ge.$$.fragment,Pr),Sn=p(Pr),Ve=s(Pr,"P",{});var Tr=n(Ve);Nn=l(Tr,"Alternative helper method that does not use "),ca=s(Tr,"CODE",{});var Di=n(ca);Fn=l(Di,"argparse"),Di.forEach(t),Gn=l(Tr,` at all, instead loading a json file and populating the dataclass types.`),Tr.forEach(t),Pr.forEach(t),Vn=p(C),pe=s(C,"DIV",{class:!0});var Or=n(pe);y(Me.$$.fragment,Or),Mn=p(Or),Re=s(Or,"P",{});var Ar=n(Re);Rn=l(Ar,"Alternative helper method that does not use "),da=s(Ar,"CODE",{});var ji=n(da);qn=l(ji,"argparse"),ji.forEach(t),Bn=l(Ar,` at all, instead loading a json file and populating the dataclass types.`),Ar.forEach(t),Or.forEach(t),C.forEach(t),lr=p(a),J=s(a,"H2",{class:!0});var Cr=n(J);ce=s(Cr,"A",{id:!0,class:!0,href:!0});var xi=n(ce);ha=s(xi,"SPAN",{});var ki=n(ha);y(qe.$$.fragment,ki),ki.forEach(t),xi.forEach(t),Kn=p(Cr),fa=s(Cr,"SPAN",{});var Pi=n(fa);Yn=l(Pi,"Debug Utilities"),Pi.forEach(t),Cr.forEach(t),ir=p(a),d=s(a,"DIV",{class:!0});var h=n(d);y(Be.$$.fragment,h),Wn=p(h),Q=s(h,"P",{});var gt=n(Q);Jn=l(gt,`This debug class helps detect and understand where the model starts getting very large or very small, and more importantly `),ma=s(gt,"CODE",{});var Ti=n(ma);Qn=l(Ti,"nan"),Ti.forEach(t),Xn=l(gt," or "),ua=s(gt,"CODE",{});var Oi=n(ua);Zn=l(Oi,"inf"),Oi.forEach(t),eo=l(gt," weight and activation elements."),gt.forEach(t),to=p(h),ga=s(h,"P",{});var Ai=n(ga);ao=l(Ai,"There are 2 working modes:"),Ai.forEach(t),ro=p(h),Ke=s(h,"OL",{});var Lr=n(Ke);ba=s(Lr,"LI",{});var Ci=n(ba);so=l(Ci,"Underflow/overflow detection (default)"),Ci.forEach(t),no=p(Lr),_a=s(Lr,"LI",{});var Li=n(_a);oo=l(Li,"Specific batch absolute min/max tracing without detection"),Li.forEach(t),Lr.forEach(t),lo=p(h),va=s(h,"P",{});var Ui=n(va);io=l(Ui,"Mode 1: Underflow/overflow detection"),Ui.forEach(t),po=p(h),y(de.$$.fragment,h),co=p(h),z=s(h,"P",{});var ge=n(z);ho=l(ge,"then run the training as normal and if "),wa=s(ge,"CODE",{});var Ii=n(wa);fo=l(Ii,"nan"),Ii.forEach(t),mo=l(ge," or "),$a=s(ge,"CODE",{});var zi=n($a);uo=l(zi,"inf"),zi.forEach(t),go=l(ge,` gets detected in at least one of the weight, input or output elements this module will throw an exception and will print `),ya=s(ge,"CODE",{});var Hi=n(ya);bo=l(Hi,"max_frames_to_save"),Hi.forEach(t),_o=l(ge,` frames that lead to this event, each frame reporting`),ge.forEach(t),vo=p(h),Ye=s(h,"OL",{});var Ur=n(Ye);We=s(Ur,"LI",{});var Ir=n(We);wo=l(Ir,"the fully qualified module name plus the class name whose "),Ea=s(Ir,"CODE",{});var Si=n(Ea);$o=l(Si,"forward"),Si.forEach(t),yo=l(Ir," was run"),Ir.forEach(t),Eo=p(Ur),Da=s(Ur,"LI",{});var Ni=n(Da);Do=l(Ni,"the absolute min and max value of all elements for each module weights, and the inputs and output"),Ni.forEach(t),Ur.forEach(t),jo=p(h),Je=s(h,"P",{});var zr=n(Je);xo=l(zr,"For example, here is the header and the last few frames in detection report for "),ja=s(zr,"CODE",{});var Fi=n(ja);ko=l(Fi,"google/mt5-small"),Fi.forEach(t),Po=l(zr," run in fp16"),zr.forEach(t),To=p(h),y(he.$$.fragment,h),Oo=p(h),X=s(h,"P",{});var bt=n(X);Ao=l(bt,"You can see here, that "),xa=s(bt,"CODE",{});var Gi=n(xa);Co=l(Gi,"T5DenseGatedGeluDense.forward"),Gi.forEach(t),Lo=l(bt,` resulted in output activations, whose absolute max value was around 62.7K, which is very close to fp16\u2019s top limit of 64K. In the next frame we have `),ka=s(bt,"CODE",{});var Vi=n(ka);Uo=l(Vi,"Dropout"),Vi.forEach(t),Io=l(bt,` which renormalizes the weights, after it zeroed some of the elements, which pushes the absolute max value to more than 64K, and we get an overlow.`),bt.forEach(t),zo=p(h),Pa=s(h,"P",{});var Mi=n(Pa);Ho=l(Mi,`As you can see it\u2019s the previous frames that we need to look into when the numbers start going into very large for fp16 numbers.`),Mi.forEach(t),So=p(h),Qe=s(h,"P",{});var Hr=n(Qe);No=l(Hr,"The tracking is done in a forward hook, which gets invoked immediately after "),Ta=s(Hr,"CODE",{});var Ri=n(Ta);Fo=l(Ri,"forward"),Ri.forEach(t),Go=l(Hr," has completed."),Hr.forEach(t),Vo=p(h),y(fe.$$.fragment,h),Mo=p(h),Oa=s(h,"P",{});var qi=n(Oa);Ro=l(qi,`To validate that you have set up this debugging feature correctly, and you intend to use it in a training that may take hours to complete, first run it with normal tracing enabled for one of a few batches as explained in the next section.`),qi.forEach(t),qo=p(h),Aa=s(h,"P",{});var Bi=n(Aa);Bo=l(Bi,"Mode 2. Specific batch absolute min/max tracing without detection"),Bi.forEach(t),Ko=p(h),Ca=s(h,"P",{});var Ki=n(Ca);Yo=l(Ki,"The second work mode is per-batch tracing with the underflow/overflow detection feature turned off."),Ki.forEach(t),Wo=p(h),Xe=s(h,"P",{});var Sr=n(Xe);Jo=l(Sr,"Let\u2019s say you want to watch the absolute min and max values for all the ingredients of each "),La=s(Sr,"CODE",{});var Yi=n(La);Qo=l(Yi,"forward"),Yi.forEach(t),Xo=l(Sr," call of a"),Sr.forEach(t),Zo=p(h),y(me.$$.fragment,h),el=p(h),Ua=s(h,"P",{});var Wi=n(Ua);tl=l(Wi,"And now full batches 1 and 3 will be traced using the same format as explained above. Batches are 0-indexed."),Wi.forEach(t),al=p(h),Ia=s(h,"P",{});var Ji=n(Ia);rl=l(Ji,`This is helpful if you know that the program starts misbehaving after a certain batch number, so you can fast-forward right to that area.`),Ji.forEach(t),sl=p(h),za=s(h,"P",{});var Qi=n(za);nl=l(Qi,"Early stopping:"),Qi.forEach(t),ol=p(h),y(ue.$$.fragment,h),ll=p(h),Ha=s(h,"P",{});var Xi=n(Ha);il=l(Xi,"This feature is mainly useful in the tracing mode, but you can use it for any mode."),Xi.forEach(t),pl=p(h),dt=s(h,"P",{});var xl=n(dt);Sa=s(xl,"STRONG",{});var Zi=n(Sa);cl=l(Zi,"Performance"),Zi.forEach(t),dl=l(xl,":"),xl.forEach(t),hl=p(h),Z=s(h,"P",{});var _t=n(Z);fl=l(_t,"As this module measures absolute "),Na=s(_t,"CODE",{});var ep=n(Na);ml=l(ep,"min"),ep.forEach(t),ul=l(_t,"/`"),Fa=s(_t,"CODE",{});var tp=n(Fa);gl=l(tp,"max"),tp.forEach(t),bl=l(_t,` of each weight of the model on every forward it\u2019ll slow the training down. Therefore remember to turn it off once the debugging needs have been met.`),_t.forEach(t),h.forEach(t),this.h()},h(){u(g,"name","hf:doc:metadata"),u(g,"content",JSON.stringify(fp)),u(f,"id","utilities-for-trainer"),u(f,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(f,"href","#utilities-for-trainer"),u(w,"class","relative group"),u(tt,"href","/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer"),u(te,"id","transformers.EvalPrediction"),u(te,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(te,"href","#transformers.EvalPrediction"),u(N,"class","relative group"),u(F,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),u(G,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),u(ye,"href","https://pytorch.org/docs/stable/notes/randomness.html"),u(ye,"rel","nofollow"),u(Ee,"href","https://www.tensorflow.org/api_docs/python/tf/config/experimental/enable_op_determinism"),u(Ee,"rel","nofollow"),u(I,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),u(V,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),u(M,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),u(ae,"id","transformers.trainer_callback.CallbackHandler"),u(ae,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(ae,"href","#transformers.trainer_callback.CallbackHandler"),u(R,"class","relative group"),u(q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),u(re,"id","transformers.trainer_pt_utils.DistributedTensorGatherer"),u(re,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(re,"href","#transformers.trainer_pt_utils.DistributedTensorGatherer"),u(B,"class","relative group"),u(se,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),u(ne,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),u(v,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),u(oe,"id","transformers.HfArgumentParser"),u(oe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(oe,"href","#transformers.HfArgumentParser"),u(W,"class","relative group"),u(H,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),u(le,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),u(ie,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),u(pe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),u(O,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),u(ce,"id","transformers.debug_utils.DebugUnderflowOverflow"),u(ce,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(ce,"href","#transformers.debug_utils.DebugUnderflowOverflow"),u(J,"class","relative group"),u(d,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(a,m){e(document.head,g),b(a,T,m),b(a,w,m),e(w,f),e(f,k),E(c,k,null),e(w,_),e(w,wt),e(wt,Nr),b(a,Ka,m),b(a,ee,m),e(ee,Fr),e(ee,tt),e(tt,Gr),e(ee,Vr),b(a,Ya,m),b(a,at,m),e(at,Mr),b(a,Wa,m),b(a,N,m),e(N,te),e(te,$t),E(be,$t,null),e(N,Rr),e(N,yt),e(yt,qr),b(a,Ja,m),b(a,F,m),E(_e,F,null),e(F,Br),e(F,Et),e(Et,Kr),b(a,Qa,m),b(a,G,m),E(ve,G,null),e(G,Yr),e(G,Dt),e(Dt,Wr),b(a,Xa,m),b(a,I,m),E(we,I,null),e(I,Jr),e(I,jt),e(jt,Qr),e(I,Xr),e(I,$e),e($e,rt),e(rt,ye),e(ye,Zr),e(rt,es),e($e,ts),e($e,st),e(st,Ee),e(Ee,as),e(st,rs),b(a,Za,m),b(a,V,m),E(De,V,null),e(V,ss),e(V,L),e(L,ns),e(L,xt),e(xt,os),e(L,ls),e(L,kt),e(kt,is),e(L,ps),e(L,Pt),e(Pt,cs),e(L,ds),e(L,Tt),e(Tt,hs),e(L,fs),b(a,er,m),b(a,M,m),E(je,M,null),e(M,ms),e(M,Ot),e(Ot,us),b(a,tr,m),b(a,R,m),e(R,ae),e(ae,At),E(xe,At,null),e(R,gs),e(R,Ct),e(Ct,bs),b(a,ar,m),b(a,q,m),E(ke,q,null),e(q,_s),e(q,Lt),e(Lt,vs),b(a,rr,m),b(a,B,m),e(B,re),e(re,Ut),E(Pe,Ut,null),e(B,ws),e(B,It),e(It,$s),b(a,sr,m),b(a,v,m),E(Te,v,null),e(v,ys),e(v,zt),e(zt,Es),e(v,Ds),e(v,Ht),e(Ht,js),e(v,xs),e(v,St),e(St,Nt),e(Nt,ks),e(v,Ps),e(v,Ft),e(Ft,Ts),e(v,Os),e(v,K),e(K,nt),e(nt,As),e(nt,Gt),e(Gt,Cs),e(K,Ls),e(K,ot),e(ot,Us),e(ot,Vt),e(Vt,Is),e(K,zs),e(K,lt),e(lt,Hs),e(lt,Mt),e(Mt,Ss),e(v,Ns),e(v,Rt),e(Rt,Fs),e(v,Gs),e(v,Y),e(Y,it),e(it,Vs),e(it,qt),e(qt,Ms),e(Y,Rs),e(Y,pt),e(pt,qs),e(pt,Bt),e(Bt,Bs),e(Y,Ks),e(Y,ct),e(ct,Ys),e(ct,Kt),e(Kt,Ws),e(v,Js),e(v,Yt),e(Yt,Qs),e(v,Xs),e(v,Wt),e(Wt,Jt),e(Jt,Zs),e(v,en),e(v,Qt),e(Qt,tn),e(v,an),e(v,Xt),e(Xt,Zt),e(Zt,rn),e(v,sn),e(v,ea),e(ea,nn),e(v,on),e(v,se),E(Oe,se,null),e(se,ln),e(se,Ae),e(Ae,pn),e(Ae,ta),e(ta,cn),e(Ae,dn),e(v,hn),e(v,ne),E(Ce,ne,null),e(ne,fn),e(ne,aa),e(aa,mn),b(a,nr,m),b(a,W,m),e(W,oe),e(oe,ra),E(Le,ra,null),e(W,un),e(W,sa),e(sa,gn),b(a,or,m),b(a,O,m),E(Ue,O,null),e(O,bn),e(O,Ie),e(Ie,_n),e(Ie,na),e(na,vn),e(Ie,wn),e(O,$n),e(O,ze),e(ze,yn),e(ze,oa),e(oa,En),e(ze,Dn),e(O,jn),e(O,H),E(He,H,null),e(H,xn),e(H,la),e(la,kn),e(H,Pn),e(H,Se),e(Se,Tn),e(Se,ia),e(ia,On),e(Se,An),e(O,Cn),e(O,le),E(Ne,le,null),e(le,Ln),e(le,Fe),e(Fe,Un),e(Fe,pa),e(pa,In),e(Fe,zn),e(O,Hn),e(O,ie),E(Ge,ie,null),e(ie,Sn),e(ie,Ve),e(Ve,Nn),e(Ve,ca),e(ca,Fn),e(Ve,Gn),e(O,Vn),e(O,pe),E(Me,pe,null),e(pe,Mn),e(pe,Re),e(Re,Rn),e(Re,da),e(da,qn),e(Re,Bn),b(a,lr,m),b(a,J,m),e(J,ce),e(ce,ha),E(qe,ha,null),e(J,Kn),e(J,fa),e(fa,Yn),b(a,ir,m),b(a,d,m),E(Be,d,null),e(d,Wn),e(d,Q),e(Q,Jn),e(Q,ma),e(ma,Qn),e(Q,Xn),e(Q,ua),e(ua,Zn),e(Q,eo),e(d,to),e(d,ga),e(ga,ao),e(d,ro),e(d,Ke),e(Ke,ba),e(ba,so),e(Ke,no),e(Ke,_a),e(_a,oo),e(d,lo),e(d,va),e(va,io),e(d,po),E(de,d,null),e(d,co),e(d,z),e(z,ho),e(z,wa),e(wa,fo),e(z,mo),e(z,$a),e($a,uo),e(z,go),e(z,ya),e(ya,bo),e(z,_o),e(d,vo),e(d,Ye),e(Ye,We),e(We,wo),e(We,Ea),e(Ea,$o),e(We,yo),e(Ye,Eo),e(Ye,Da),e(Da,Do),e(d,jo),e(d,Je),e(Je,xo),e(Je,ja),e(ja,ko),e(Je,Po),e(d,To),E(he,d,null),e(d,Oo),e(d,X),e(X,Ao),e(X,xa),e(xa,Co),e(X,Lo),e(X,ka),e(ka,Uo),e(X,Io),e(d,zo),e(d,Pa),e(Pa,Ho),e(d,So),e(d,Qe),e(Qe,No),e(Qe,Ta),e(Ta,Fo),e(Qe,Go),e(d,Vo),E(fe,d,null),e(d,Mo),e(d,Oa),e(Oa,Ro),e(d,qo),e(d,Aa),e(Aa,Bo),e(d,Ko),e(d,Ca),e(Ca,Yo),e(d,Wo),e(d,Xe),e(Xe,Jo),e(Xe,La),e(La,Qo),e(Xe,Xo),e(d,Zo),E(me,d,null),e(d,el),e(d,Ua),e(Ua,tl),e(d,al),e(d,Ia),e(Ia,rl),e(d,sl),e(d,za),e(za,nl),e(d,ol),E(ue,d,null),e(d,ll),e(d,Ha),e(Ha,il),e(d,pl),e(d,dt),e(dt,Sa),e(Sa,cl),e(dt,dl),e(d,hl),e(d,Z),e(Z,fl),e(Z,Na),e(Na,ml),e(Z,ul),e(Z,Fa),e(Fa,gl),e(Z,bl),pr=!0},p(a,[m]){const Ze={};m&2&&(Ze.$$scope={dirty:m,ctx:a}),de.$set(Ze);const Ga={};m&2&&(Ga.$$scope={dirty:m,ctx:a}),he.$set(Ga);const Va={};m&2&&(Va.$$scope={dirty:m,ctx:a}),fe.$set(Va);const Ma={};m&2&&(Ma.$$scope={dirty:m,ctx:a}),me.$set(Ma);const et={};m&2&&(et.$$scope={dirty:m,ctx:a}),ue.$set(et)},i(a){pr||(D(c.$$.fragment,a),D(be.$$.fragment,a),D(_e.$$.fragment,a),D(ve.$$.fragment,a),D(we.$$.fragment,a),D(De.$$.fragment,a),D(je.$$.fragment,a),D(xe.$$.fragment,a),D(ke.$$.fragment,a),D(Pe.$$.fragment,a),D(Te.$$.fragment,a),D(Oe.$$.fragment,a),D(Ce.$$.fragment,a),D(Le.$$.fragment,a),D(Ue.$$.fragment,a),D(He.$$.fragment,a),D(Ne.$$.fragment,a),D(Ge.$$.fragment,a),D(Me.$$.fragment,a),D(qe.$$.fragment,a),D(Be.$$.fragment,a),D(de.$$.fragment,a),D(he.$$.fragment,a),D(fe.$$.fragment,a),D(me.$$.fragment,a),D(ue.$$.fragment,a),pr=!0)},o(a){j(c.$$.fragment,a),j(be.$$.fragment,a),j(_e.$$.fragment,a),j(ve.$$.fragment,a),j(we.$$.fragment,a),j(De.$$.fragment,a),j(je.$$.fragment,a),j(xe.$$.fragment,a),j(ke.$$.fragment,a),j(Pe.$$.fragment,a),j(Te.$$.fragment,a),j(Oe.$$.fragment,a),j(Ce.$$.fragment,a),j(Le.$$.fragment,a),j(Ue.$$.fragment,a),j(He.$$.fragment,a),j(Ne.$$.fragment,a),j(Ge.$$.fragment,a),j(Me.$$.fragment,a),j(qe.$$.fragment,a),j(Be.$$.fragment,a),j(de.$$.fragment,a),j(he.$$.fragment,a),j(fe.$$.fragment,a),j(me.$$.fragment,a),j(ue.$$.fragment,a),pr=!1},d(a){t(g),a&&t(T),a&&t(w),x(c),a&&t(Ka),a&&t(ee),a&&t(Ya),a&&t(at),a&&t(Wa),a&&t(N),x(be),a&&t(Ja),a&&t(F),x(_e),a&&t(Qa),a&&t(G),x(ve),a&&t(Xa),a&&t(I),x(we),a&&t(Za),a&&t(V),x(De),a&&t(er),a&&t(M),x(je),a&&t(tr),a&&t(R),x(xe),a&&t(ar),a&&t(q),x(ke),a&&t(rr),a&&t(B),x(Pe),a&&t(sr),a&&t(v),x(Te),x(Oe),x(Ce),a&&t(nr),a&&t(W),x(Le),a&&t(or),a&&t(O),x(Ue),x(He),x(Ne),x(Ge),x(Me),a&&t(lr),a&&t(J),x(qe),a&&t(ir),a&&t(d),x(Be),x(de),x(he),x(fe),x(me),x(ue)}}}const fp={local:"utilities-for-trainer",sections:[{local:"transformers.EvalPrediction",title:"Utilities"},{local:"transformers.trainer_callback.CallbackHandler",title:"Callbacks internals"},{local:"transformers.trainer_pt_utils.DistributedTensorGatherer",title:"Distributed Evaluation"},{local:"transformers.HfArgumentParser",title:"Distributed Evaluation"},{local:"transformers.debug_utils.DebugUnderflowOverflow",title:"Debug Utilities"}],title:"Utilities for Trainer"};function mp(U){return op(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class wp extends ap{constructor(g){super();rp(this,g,mp,hp,sp,{})}}export{wp as default,fp as metadata};
40
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/internal/modeling_utils.mdx-hf-doc-builder.js
import{S as Si,i as Fi,s as Ai,e as r,k as l,w as h,t as a,M as Oi,c as s,d as o,m as d,a as n,x as g,h as i,b as f,G as t,g as m,y as _,q as v,o as b,B as y,v as Ii,L as Ni}from"../../chunks/vendor-hf-doc-builder.js";import{T as rr}from"../../chunks/Tip-hf-doc-builder.js";import{D as k}from"../../chunks/Docstring-hf-doc-builder.js";import{C as Hi}from"../../chunks/CodeBlock-hf-doc-builder.js";import{I as Jt}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{E as Mi}from"../../chunks/ExampleCodeBlock-hf-doc-builder.js";function Vi(S){let p,x,u,$,q,w,D,O,P,Q,F,C,B,A,z,G,L;return{c(){p=r("p"),x=a("One of "),u=r("code"),$=a("start_states"),q=a(" or "),w=r("code"),D=a("start_positions"),O=a(" should be not "),P=r("code"),Q=a("None"),F=a(". If both are set, "),C=r("code"),B=a("start_positions"),A=a(` overrides `),z=r("code"),G=a("start_states"),L=a(".")},l(E){p=s(E,"P",{});var T=n(p);x=i(T,"One of "),u=s(T,"CODE",{});var I=n(u);$=i(I,"start_states"),I.forEach(o),q=i(T," or "),w=s(T,"CODE",{});var ce=n(w);D=i(ce,"start_positions"),ce.forEach(o),O=i(T," should be not "),P=s(T,"CODE",{});var U=n(P);Q=i(U,"None"),U.forEach(o),F=i(T,". If both are set, "),C=s(T,"CODE",{});var pe=n(C);B=i(pe,"start_positions"),pe.forEach(o),A=i(T,` overrides `),z=s(T,"CODE",{});var ae=n(z);G=i(ae,"start_states"),ae.forEach(o),L=i(T,"."),T.forEach(o)},m(E,T){m(E,p,T),t(p,x),t(p,u),t(u,$),t(p,q),t(p,w),t(w,D),t(p,O),t(p,P),t(P,Q),t(p,F),t(p,C),t(C,B),t(p,A),t(p,z),t(z,G),t(p,L)},d(E){E&&o(p)}}}function ji(S){let p,x,u,$,q,w,D,O,P,Q,F,C,B,A,z,G,L;return{c(){p=r("p"),x=a("One of "),u=r("code"),$=a("start_states"),q=a(" or "),w=r("code"),D=a("start_positions"),O=a(" should be not "),P=r("code"),Q=a("None"),F=a(". If both are set, "),C=r("code"),B=a("start_positions"),A=a(` overrides `),z=r("code"),G=a("start_states"),L=a(".")},l(E){p=s(E,"P",{});var T=n(p);x=i(T,"One of "),u=s(T,"CODE",{});var I=n(u);$=i(I,"start_states"),I.forEach(o),q=i(T," or "),w=s(T,"CODE",{});var ce=n(w);D=i(ce,"start_positions"),ce.forEach(o),O=i(T," should be not "),P=s(T,"CODE",{});var U=n(P);Q=i(U,"None"),U.forEach(o),F=i(T,". If both are set, "),C=s(T,"CODE",{});var pe=n(C);B=i(pe,"start_positions"),pe.forEach(o),A=i(T,` overrides `),z=s(T,"CODE",{});var ae=n(z);G=i(ae,"start_states"),ae.forEach(o),L=i(T,"."),T.forEach(o)},m(E,T){m(E,p,T),t(p,x),t(p,u),t(u,$),t(p,q),t(p,w),t(w,D),t(p,O),t(p,P),t(P,Q),t(p,F),t(p,C),t(C,B),t(p,A),t(p,z),t(z,G),t(p,L)},d(E){E&&o(p)}}}function Qi(S){let p,x,u,$,q;return $=new Hi({props:{code:`# rename the usual forward() fn to forward_chunk() def forward_chunk(self, hidden_states): hidden_states = self.decoder(hidden_states) return hidden_states # implement a chunked forward function def forward(self, hidden_states): return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states)`,highlighted:`<span class="hljs-comment"># rename the usual forward() fn to forward_chunk()</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">forward_chunk</span>(<span class="hljs-params">self, hidden_states</span>): hidden_states = self.decoder(hidden_states) <span class="hljs-keyword">return</span> hidden_states <span class="hljs-comment"># implement a chunked forward function</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">forward</span>(<span class="hljs-params">self, hidden_states</span>): <span class="hljs-keyword">return</span> apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states)`}}),{c(){p=r("p"),x=a("Examples:"),u=l(),h($.$$.fragment)},l(w){p=s(w,"P",{});var D=n(p);x=i(D,"Examples:"),D.forEach(o),u=d(w),g($.$$.fragment,w)},m(w,D){m(w,p,D),t(p,x),m(w,u,D),_($,w,D),q=!0},p:Ni,i(w){q||(v($.$$.fragment,w),q=!0)},o(w){b($.$$.fragment,w),q=!1},d(w){w&&o(p),w&&o(u),y($,w)}}}function Bi(S){let p,x;return{c(){p=r("p"),x=a("Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.")},l(u){p=s(u,"P",{});var $=n(p);x=i($,"Any label of -100 will be ignored (along with the corresponding logits) in the loss computation."),$.forEach(o)},m(u,$){m(u,p,$),t(p,x)},d(u){u&&o(p)}}}function Gi(S){let p,x;return{c(){p=r("p"),x=a("Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.")},l(u){p=s(u,"P",{});var $=n(p);x=i($,"Any label of -100 will be ignored (along with the corresponding logits) in the loss computation."),$.forEach(o)},m(u,$){m(u,p,$),t(p,x)},d(u){u&&o(p)}}}function Ui(S){let p,x;return{c(){p=r("p"),x=a("Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.")},l(u){p=s(u,"P",{});var $=n(p);x=i($,"Any label of -100 will be ignored (along with the corresponding logits) in the loss computation."),$.forEach(o)},m(u,$){m(u,p,$),t(p,x)},d(u){u&&o(p)}}}function Ki(S){let p,x,u,$,q,w,D,O,P,Q,F,C,B,A,z,G,L,E,T,I,ce,U,pe,ae,K,Be,Yr,Yt,Zr,es,Zt,ts,sr,R,Ge,os,eo,rs,ss,Pt,Ue,nr,W,Ke,ns,to,as,is,De,Re,ls,Le,ar,X,We,ds,oo,cs,ps,Pe,Xe,ms,Ce,ir,me,Je,fs,Ye,us,Ct,hs,gs,lr,J,Ze,_s,ro,vs,bs,zt,et,dr,Y,tt,ys,so,$s,ws,ze,ot,Ts,no,ks,cr,fe,Se,ao,rt,xs,io,Es,pr,N,st,qs,H,Ds,lo,Ls,Ps,co,Cs,zs,po,Ss,Fs,mo,As,Os,Is,M,Ns,fo,Hs,Ms,uo,Vs,js,ho,Qs,Bs,go,Gs,Us,Ks,Fe,mr,ue,nt,Rs,at,Ws,_o,Xs,Js,fr,Z,it,Ys,vo,Zs,en,bo,tn,ur,ee,lt,on,yo,rn,sn,$o,nn,hr,te,dt,an,wo,ln,dn,To,cn,gr,he,Ae,ko,ct,pn,xo,mn,_r,oe,pt,fn,Eo,un,hn,qo,gn,vr,V,mt,_n,Do,vn,bn,Lo,yn,$n,ie,ft,wn,Po,Tn,kn,ut,xn,ht,En,qn,br,ge,gt,Dn,Co,Ln,yr,_e,Oe,zo,_t,Pn,So,Cn,$r,re,vt,zn,Fo,Sn,Fn,Ie,wr,se,bt,An,Ao,On,In,Ne,Tr,ve,yt,Nn,Oo,Hn,kr,be,$t,Mn,Io,Vn,xr,ye,wt,jn,No,Qn,Er,ne,Tt,Bn,Ho,Gn,Un,He,qr,$e,Me,Mo,kt,Kn,Vo,Rn,Dr,we,xt,Wn,Et,Xn,jo,Jn,Yn,Lr,j,qt,Zn,Qo,ea,ta,Bo,oa,ra,Te,ke,sa,Go,na,aa,Uo,ia,la,da,xe,ca,Ko,pa,ma,Ro,fa,ua,ha,Ee,ga,Wo,_a,va,Xo,ba,ya,Pr,qe,Dt,$a,Jo,wa,Cr;return w=new Jt({}),I=new Jt({}),Be=new k({props:{name:"class transformers.Conv1D",anchor:"transformers.Conv1D",parameters:[{name:"nf",val:""},{name:"nx",val:""}],parametersDescription:[{anchor:"transformers.Conv1D.nf",description:"<strong>nf</strong> (<code>int</code>) &#x2014; The number of output features.",name:"nf"},{anchor:"transformers.Conv1D.nx",description:"<strong>nx</strong> (<code>int</code>) &#x2014; The number of input features.",name:"nx"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pytorch_utils.py#L91"}}),Ge=new k({props:{name:"class transformers.modeling_utils.PoolerStartLogits",anchor:"transformers.modeling_utils.PoolerStartLogits",parameters:[{name:"config",val:": PretrainedConfig"}],parametersDescription:[{anchor:"transformers.modeling_utils.PoolerStartLogits.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) &#x2014; The config used by the model, will be used to grab the <code>hidden_size</code> of the model.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L2713"}}),Ue=new k({props:{name:"forward",anchor:"transformers.modeling_utils.PoolerStartLogits.forward",parameters:[{name:"hidden_states",val:": FloatTensor"},{name:"p_mask",val:": typing.Optional[torch.FloatTensor] = None"}],parametersDescription:[{anchor:"transformers.modeling_utils.PoolerStartLogits.forward.hidden_states",description:`<strong>hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len, hidden_size)</code>) &#x2014; The final hidden states of the model.`,name:"hidden_states"},{anchor:"transformers.modeling_utils.PoolerStartLogits.forward.p_mask",description:`<strong>p_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len)</code>, <em>optional</em>) &#x2014; Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token should be masked.`,name:"p_mask"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L2726",returnDescription:` <p>The start logits for SQuAD.</p> `,returnType:` <p><code>torch.FloatTensor</code></p> `}}),Ke=new k({props:{name:"class transformers.modeling_utils.PoolerEndLogits",anchor:"transformers.modeling_utils.PoolerEndLogits",parameters:[{name:"config",val:": PretrainedConfig"}],parametersDescription:[{anchor:"transformers.modeling_utils.PoolerEndLogits.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) &#x2014; The config used by the model, will be used to grab the <code>hidden_size</code> of the model and the <code>layer_norm_eps</code> to use.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L2751"}}),Re=new k({props:{name:"forward",anchor:"transformers.modeling_utils.PoolerEndLogits.forward",parameters:[{name:"hidden_states",val:": FloatTensor"},{name:"start_states",val:": typing.Optional[torch.FloatTensor] = None"},{name:"start_positions",val:": typing.Optional[torch.LongTensor] = None"},{name:"p_mask",val:": typing.Optional[torch.FloatTensor] = None"}],parametersDescription:[{anchor:"transformers.modeling_utils.PoolerEndLogits.forward.hidden_states",description:`<strong>hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len, hidden_size)</code>) &#x2014; The final hidden states of the model.`,name:"hidden_states"},{anchor:"transformers.modeling_utils.PoolerEndLogits.forward.start_states",description:`<strong>start_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len, hidden_size)</code>, <em>optional</em>) &#x2014; The hidden states of the first tokens for the labeled span.`,name:"start_states"},{anchor:"transformers.modeling_utils.PoolerEndLogits.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; The position of the first token for the labeled span.`,name:"start_positions"},{anchor:"transformers.modeling_utils.PoolerEndLogits.forward.p_mask",description:`<strong>p_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len)</code>, <em>optional</em>) &#x2014; Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token should be masked.`,name:"p_mask"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L2768",returnDescription:` <p>The end logits for SQuAD.</p> `,returnType:` <p><code>torch.FloatTensor</code></p> `}}),Le=new rr({props:{$$slots:{default:[Vi]},$$scope:{ctx:S}}}),We=new k({props:{name:"class transformers.modeling_utils.PoolerAnswerClass",anchor:"transformers.modeling_utils.PoolerAnswerClass",parameters:[{name:"config",val:""}],parametersDescription:[{anchor:"transformers.modeling_utils.PoolerAnswerClass.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) &#x2014; The config used by the model, will be used to grab the <code>hidden_size</code> of the model.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L2820"}}),Xe=new k({props:{name:"forward",anchor:"transformers.modeling_utils.PoolerAnswerClass.forward",parameters:[{name:"hidden_states",val:": FloatTensor"},{name:"start_states",val:": typing.Optional[torch.FloatTensor] = None"},{name:"start_positions",val:": typing.Optional[torch.LongTensor] = None"},{name:"cls_index",val:": typing.Optional[torch.LongTensor] = None"}],parametersDescription:[{anchor:"transformers.modeling_utils.PoolerAnswerClass.forward.hidden_states",description:`<strong>hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len, hidden_size)</code>) &#x2014; The final hidden states of the model.`,name:"hidden_states"},{anchor:"transformers.modeling_utils.PoolerAnswerClass.forward.start_states",description:`<strong>start_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len, hidden_size)</code>, <em>optional</em>) &#x2014; The hidden states of the first tokens for the labeled span.`,name:"start_states"},{anchor:"transformers.modeling_utils.PoolerAnswerClass.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; The position of the first token for the labeled span.`,name:"start_positions"},{anchor:"transformers.modeling_utils.PoolerAnswerClass.forward.cls_index",description:`<strong>cls_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Position of the CLS token for each sentence in the batch. If <code>None</code>, takes the last token.`,name:"cls_index"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L2835",returnDescription:` <p>The SQuAD 2.0 answer class.</p> `,returnType:` <p><code>torch.FloatTensor</code></p> `}}),Ce=new rr({props:{$$slots:{default:[ji]},$$scope:{ctx:S}}}),Je=new k({props:{name:"class transformers.modeling_utils.SquadHeadOutput",anchor:"transformers.modeling_utils.SquadHeadOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"start_top_log_probs",val:": typing.Optional[torch.FloatTensor] = None"},{name:"start_top_index",val:": typing.Optional[torch.LongTensor] = None"},{name:"end_top_log_probs",val:": typing.Optional[torch.FloatTensor] = None"},{name:"end_top_index",val:": typing.Optional[torch.LongTensor] = None"},{name:"cls_logits",val:": typing.Optional[torch.FloatTensor] = None"}],parametersDescription:[{anchor:"transformers.modeling_utils.SquadHeadOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned if both <code>start_positions</code> and <code>end_positions</code> are provided) &#x2014; Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.`,name:"loss"},{anchor:"transformers.modeling_utils.SquadHeadOutput.start_top_log_probs",description:`<strong>start_top_log_probs</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.start_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) &#x2014; Log probabilities for the top config.start_n_top start token possibilities (beam-search).`,name:"start_top_log_probs"},{anchor:"transformers.modeling_utils.SquadHeadOutput.start_top_index",description:`<strong>start_top_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.start_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) &#x2014; Indices for the top config.start_n_top start token possibilities (beam-search).`,name:"start_top_index"},{anchor:"transformers.modeling_utils.SquadHeadOutput.end_top_log_probs",description:`<strong>end_top_log_probs</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.start_n_top * config.end_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) &#x2014; Log probabilities for the top <code>config.start_n_top * config.end_n_top</code> end token possibilities (beam-search).`,name:"end_top_log_probs"},{anchor:"transformers.modeling_utils.SquadHeadOutput.end_top_index",description:`<strong>end_top_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.start_n_top * config.end_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) &#x2014; Indices for the top <code>config.start_n_top * config.end_n_top</code> end token possibilities (beam-search).`,name:"end_top_index"},{anchor:"transformers.modeling_utils.SquadHeadOutput.cls_logits",description:`<strong>cls_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) &#x2014; Log probabilities for the <code>is_impossible</code> label of the answers.`,name:"cls_logits"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L2886"}}),Ze=new k({props:{name:"class transformers.modeling_utils.SQuADHead",anchor:"transformers.modeling_utils.SQuADHead",parameters:[{name:"config",val:""}],parametersDescription:[{anchor:"transformers.modeling_utils.SQuADHead.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) &#x2014; The config used by the model, will be used to grab the <code>hidden_size</code> of the model and the <code>layer_norm_eps</code> to use.`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L2916"}}),et=new k({props:{name:"forward",anchor:"transformers.modeling_utils.SQuADHead.forward",parameters:[{name:"hidden_states",val:": FloatTensor"},{name:"start_positions",val:": typing.Optional[torch.LongTensor] = None"},{name:"end_positions",val:": typing.Optional[torch.LongTensor] = None"},{name:"cls_index",val:": typing.Optional[torch.LongTensor] = None"},{name:"is_impossible",val:": typing.Optional[torch.LongTensor] = None"},{name:"p_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"return_dict",val:": bool = False"}],parametersDescription:[{anchor:"transformers.modeling_utils.SQuADHead.forward.hidden_states",description:`<strong>hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len, hidden_size)</code>) &#x2014; Final hidden states of the model on the sequence tokens.`,name:"hidden_states"},{anchor:"transformers.modeling_utils.SQuADHead.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Positions of the first token for the labeled span.`,name:"start_positions"},{anchor:"transformers.modeling_utils.SQuADHead.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Positions of the last token for the labeled span.`,name:"end_positions"},{anchor:"transformers.modeling_utils.SQuADHead.forward.cls_index",description:`<strong>cls_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Position of the CLS token for each sentence in the batch. If <code>None</code>, takes the last token.`,name:"cls_index"},{anchor:"transformers.modeling_utils.SQuADHead.forward.is_impossible",description:`<strong>is_impossible</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Whether the question has a possible answer in the paragraph or not.`,name:"is_impossible"},{anchor:"transformers.modeling_utils.SQuADHead.forward.p_mask",description:`<strong>p_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len)</code>, <em>optional</em>) &#x2014; Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token should be masked.`,name:"p_mask"},{anchor:"transformers.modeling_utils.SQuADHead.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L2935",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/internal/modeling_utils#transformers.modeling_utils.SquadHeadOutput" >transformers.modeling_utils.SquadHeadOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.configuration_utils.PretrainedConfig'&gt;</code>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned if both <code>start_positions</code> and <code>end_positions</code> are provided) \u2014 Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.</li> <li><strong>start_top_log_probs</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.start_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) \u2014 Log probabilities for the top config.start_n_top start token possibilities (beam-search).</li> <li><strong>start_top_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.start_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) \u2014 Indices for the top config.start_n_top start token possibilities (beam-search).</li> <li><strong>end_top_log_probs</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.start_n_top * config.end_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) \u2014 Log probabilities for the top <code>config.start_n_top * config.end_n_top</code> end token possibilities (beam-search).</li> <li><strong>end_top_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.start_n_top * config.end_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) \u2014 Indices for the top <code>config.start_n_top * config.end_n_top</code> end token possibilities (beam-search).</li> <li><strong>cls_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) \u2014 Log probabilities for the <code>is_impossible</code> label of the answers.</li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/internal/modeling_utils#transformers.modeling_utils.SquadHeadOutput" >transformers.modeling_utils.SquadHeadOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),tt=new k({props:{name:"class transformers.modeling_utils.SequenceSummary",anchor:"transformers.modeling_utils.SequenceSummary",parameters:[{name:"config",val:": PretrainedConfig"}],parametersDescription:[{anchor:"transformers.modeling_utils.SequenceSummary.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) &#x2014; The config used by the model. Relevant arguments in the config class of the model are (refer to the actual config class of your model for the default values it uses):</p> <ul> <li> <p><strong>summary_type</strong> (<code>str</code>) &#x2014; The method to use to make this summary. Accepted values are:</p> <ul> <li><code>&quot;last&quot;</code> &#x2014; Take the last token hidden state (like XLNet)</li> <li><code>&quot;first&quot;</code> &#x2014; Take the first token hidden state (like Bert)</li> <li><code>&quot;mean&quot;</code> &#x2014; Take the mean of all tokens hidden states</li> <li><code>&quot;cls_index&quot;</code> &#x2014; Supply a Tensor of classification token position (GPT/GPT-2)</li> <li><code>&quot;attn&quot;</code> &#x2014; Not implemented now, use multi-head attention</li> </ul> </li> <li> <p><strong>summary_use_proj</strong> (<code>bool</code>) &#x2014; Add a projection after the vector extraction.</p> </li> <li> <p><strong>summary_proj_to_labels</strong> (<code>bool</code>) &#x2014; If <code>True</code>, the projection outputs to <code>config.num_labels</code> classes (otherwise to <code>config.hidden_size</code>).</p> </li> <li> <p><strong>summary_activation</strong> (<code>Optional[str]</code>) &#x2014; Set to <code>&quot;tanh&quot;</code> to add a tanh activation to the output, another string or <code>None</code> will add no activation.</p> </li> <li> <p><strong>summary_first_dropout</strong> (<code>float</code>) &#x2014; Optional dropout probability before the projection and activation.</p> </li> <li> <p><strong>summary_last_dropout</strong> (<code>float</code>)&#x2014; Optional dropout probability after the projection and activation.</p> </li> </ul>`,name:"config"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L3033"}}),ot=new k({props:{name:"forward",anchor:"transformers.modeling_utils.SequenceSummary.forward",parameters:[{name:"hidden_states",val:": FloatTensor"},{name:"cls_index",val:": typing.Optional[torch.LongTensor] = None"}],parametersDescription:[{anchor:"transformers.modeling_utils.SequenceSummary.forward.hidden_states",description:`<strong>hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>[batch_size, seq_len, hidden_size]</code>) &#x2014; The hidden states of the last layer.`,name:"hidden_states"},{anchor:"transformers.modeling_utils.SequenceSummary.forward.cls_index",description:`<strong>cls_index</strong> (<code>torch.LongTensor</code> of shape <code>[batch_size]</code> or <code>[batch_size, ...]</code> where &#x2026; are optional leading dimensions of <code>hidden_states</code>, <em>optional</em>) &#x2014; Used if <code>summary_type == &quot;cls_index&quot;</code> and takes the last token of the sequence as classification token.`,name:"cls_index"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L3088",returnDescription:` <p>The summary of the sequence hidden states.</p> `,returnType:` <p><code>torch.FloatTensor</code></p> `}}),rt=new Jt({}),st=new k({props:{name:"transformers.apply_chunking_to_forward",anchor:"transformers.apply_chunking_to_forward",parameters:[{name:"forward_fn",val:": typing.Callable[..., torch.Tensor]"},{name:"chunk_size",val:": int"},{name:"chunk_dim",val:": int"},{name:"*input_tensors",val:""}],parametersDescription:[{anchor:"transformers.apply_chunking_to_forward.forward_fn",description:`<strong>forward_fn</strong> (<code>Callable[..., torch.Tensor]</code>) &#x2014; The forward function of the model.`,name:"forward_fn"},{anchor:"transformers.apply_chunking_to_forward.chunk_size",description:`<strong>chunk_size</strong> (<code>int</code>) &#x2014; The chunk size of a chunked tensor: <code>num_chunks = len(input_tensors[0]) / chunk_size</code>.`,name:"chunk_size"},{anchor:"transformers.apply_chunking_to_forward.chunk_dim",description:`<strong>chunk_dim</strong> (<code>int</code>) &#x2014; The dimension over which the <code>input_tensors</code> should be chunked.`,name:"chunk_dim"},{anchor:"transformers.apply_chunking_to_forward.input_tensors",description:`<strong>input_tensors</strong> (<code>Tuple[torch.Tensor]</code>) &#x2014; The input tensors of <code>forward_fn</code> which will be chunked`,name:"input_tensors"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pytorch_utils.py#L174",returnDescription:` <p>A tensor with the same shape as the <code>forward_fn</code> would have given if applied\`.</p> `,returnType:` <p><code>torch.Tensor</code></p> `}}),Fe=new Mi({props:{anchor:"transformers.apply_chunking_to_forward.example",$$slots:{default:[Qi]},$$scope:{ctx:S}}}),nt=new k({props:{name:"transformers.pytorch_utils.find_pruneable_heads_and_indices",anchor:"transformers.pytorch_utils.find_pruneable_heads_and_indices",parameters:[{name:"heads",val:": typing.List[int]"},{name:"n_heads",val:": int"},{name:"head_size",val:": int"},{name:"already_pruned_heads",val:": typing.Set[int]"}],parametersDescription:[{anchor:"transformers.pytorch_utils.find_pruneable_heads_and_indices.heads",description:"<strong>heads</strong> (<code>List[int]</code>) &#x2014; List of the indices of heads to prune.",name:"heads"},{anchor:"transformers.pytorch_utils.find_pruneable_heads_and_indices.n_heads",description:"<strong>n_heads</strong> (<code>int</code>) &#x2014; The number of heads in the model.",name:"n_heads"},{anchor:"transformers.pytorch_utils.find_pruneable_heads_and_indices.head_size",description:"<strong>head_size</strong> (<code>int</code>) &#x2014; The size of each head.",name:"head_size"},{anchor:"transformers.pytorch_utils.find_pruneable_heads_and_indices.already_pruned_heads",description:"<strong>already_pruned_heads</strong> (<code>Set[int]</code>) &#x2014; A set of already pruned heads.",name:"already_pruned_heads"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pytorch_utils.py#L249",returnDescription:` <p>A tuple with the remaining heads and their corresponding indices.</p> `,returnType:` <p><code>Tuple[Set[int], torch.LongTensor]</code></p> `}}),it=new k({props:{name:"transformers.prune_layer",anchor:"transformers.prune_layer",parameters:[{name:"layer",val:": typing.Union[torch.nn.modules.linear.Linear, transformers.pytorch_utils.Conv1D]"},{name:"index",val:": LongTensor"},{name:"dim",val:": typing.Optional[int] = None"}],parametersDescription:[{anchor:"transformers.prune_layer.layer",description:"<strong>layer</strong> (<code>Union[torch.nn.Linear, Conv1D]</code>) &#x2014; The layer to prune.",name:"layer"},{anchor:"transformers.prune_layer.index",description:"<strong>index</strong> (<code>torch.LongTensor</code>) &#x2014; The indices to keep in the layer.",name:"index"},{anchor:"transformers.prune_layer.dim",description:"<strong>dim</strong> (<code>int</code>, <em>optional</em>) &#x2014; The dimension on which to keep the indices.",name:"dim"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pytorch_utils.py#L150",returnDescription:` <p>The pruned layer as a new layer with <code>requires_grad=True</code>.</p> `,returnType:` <p><code>torch.nn.Linear</code> or <a href="/docs/transformers/pr_19429/en/internal/modeling_utils#transformers.Conv1D" >Conv1D</a></p> `}}),lt=new k({props:{name:"transformers.pytorch_utils.prune_conv1d_layer",anchor:"transformers.pytorch_utils.prune_conv1d_layer",parameters:[{name:"layer",val:": Conv1D"},{name:"index",val:": LongTensor"},{name:"dim",val:": int = 1"}],parametersDescription:[{anchor:"transformers.pytorch_utils.prune_conv1d_layer.layer",description:'<strong>layer</strong> (<a href="/docs/transformers/pr_19429/en/internal/modeling_utils#transformers.Conv1D">Conv1D</a>) &#x2014; The layer to prune.',name:"layer"},{anchor:"transformers.pytorch_utils.prune_conv1d_layer.index",description:"<strong>index</strong> (<code>torch.LongTensor</code>) &#x2014; The indices to keep in the layer.",name:"index"},{anchor:"transformers.pytorch_utils.prune_conv1d_layer.dim",description:"<strong>dim</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The dimension on which to keep the indices.",name:"dim"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pytorch_utils.py#L117",returnDescription:` <p>The pruned layer as a new layer with <code>requires_grad=True</code>.</p> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/internal/modeling_utils#transformers.Conv1D" >Conv1D</a></p> `}}),dt=new k({props:{name:"transformers.pytorch_utils.prune_linear_layer",anchor:"transformers.pytorch_utils.prune_linear_layer",parameters:[{name:"layer",val:": Linear"},{name:"index",val:": LongTensor"},{name:"dim",val:": int = 0"}],parametersDescription:[{anchor:"transformers.pytorch_utils.prune_linear_layer.layer",description:"<strong>layer</strong> (<code>torch.nn.Linear</code>) &#x2014; The layer to prune.",name:"layer"},{anchor:"transformers.pytorch_utils.prune_linear_layer.index",description:"<strong>index</strong> (<code>torch.LongTensor</code>) &#x2014; The indices to keep in the layer.",name:"index"},{anchor:"transformers.pytorch_utils.prune_linear_layer.dim",description:"<strong>dim</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The dimension on which to keep the indices.",name:"dim"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pytorch_utils.py#L57",returnDescription:` <p>The pruned layer as a new layer with <code>requires_grad=True</code>.</p> `,returnType:` <p><code>torch.nn.Linear</code></p> `}}),ct=new Jt({}),pt=new k({props:{name:"class transformers.modeling_tf_utils.TFConv1D",anchor:"transformers.modeling_tf_utils.TFConv1D",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.modeling_tf_utils.TFConv1D.nf",description:`<strong>nf</strong> (<code>int</code>) &#x2014; The number of output features.`,name:"nf"},{anchor:"transformers.modeling_tf_utils.TFConv1D.nx",description:`<strong>nx</strong> (<code>int</code>) &#x2014; The number of input features.`,name:"nx"},{anchor:"transformers.modeling_tf_utils.TFConv1D.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation to use to initialize the weights. kwargs &#x2014; Additional keyword arguments passed along to the <code>__init__</code> of <code>tf.keras.layers.Layer</code>.`,name:"initializer_range"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L2759"}}),mt=new k({props:{name:"class transformers.TFSharedEmbeddings",anchor:"transformers.TFSharedEmbeddings",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.TFSharedEmbeddings.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>) &#x2014; The size of the vocabulary, e.g., the number of unique tokens.`,name:"vocab_size"},{anchor:"transformers.TFSharedEmbeddings.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>) &#x2014; The size of the embedding vectors.`,name:"hidden_size"},{anchor:"transformers.TFSharedEmbeddings.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>) &#x2014; The standard deviation to use when initializing the weights. If no value is provided, it will default to {@html &quot;<span class="\\&quot;katex\\&quot;"><span class="\\&quot;katex-mathml\\&quot;"><math xmlns="\\&quot;http://www.w3.org/1998/Math/MathML\\&quot;"><semantics><mrow><mn>1</mn><mi mathvariant="\\&quot;normal\\&quot;">/</mi><msqrt><mrow><mi>h</mi><mi>i</mi><mi>d</mi><mi>d</mi><mi>e</mi><mi>n</mi><mi mathvariant="\\&quot;normal\\&quot;">_</mi><mi>s</mi><mi>i</mi><mi>z</mi><mi>e</mi></mrow></msqrt></mrow><annotation encoding="\\&quot;application/x-tex\\&quot;">1/\\\\sqrt{hidden\\\\_size}</annotation></semantics></math></span><span class="\\&quot;katex-html\\&quot;" aria-hidden="\\&quot;true\\&quot;"><span class="\\&quot;base\\&quot;"><span class="\\&quot;strut\\&quot;" style="\\&quot;height:1.24em;vertical-align:-0.3628em;\\&quot;"></span><span class="\\&quot;mord\\&quot;">1/</span><span class="\\&quot;mord" sqrt\\"><span class="\\&quot;vlist-t" vlist-t2\\"><span class="\\&quot;vlist-r\\&quot;"><span class="\\&quot;vlist\\&quot;" style="\\&quot;height:0.8772em;\\&quot;"><span class="\\&quot;svg-align\\&quot;" style="\\&quot;top:-3.2em;\\&quot;"><span class="\\&quot;pstrut\\&quot;" style="\\&quot;height:3.2em;\\&quot;"></span><span class="\\&quot;mord\\&quot;" style="\\&quot;padding-left:1em;\\&quot;"><span class="\\&quot;mord" mathnormal\\">hi</span><span class="\\&quot;mord" mathnormal\\">dd</span><span class="\\&quot;mord" mathnormal\\">e</span><span class="\\&quot;mord" mathnormal\\">n</span><span class="\\&quot;mord\\&quot;" style="\\&quot;margin-right:0.02778em;\\&quot;">_</span><span class="\\&quot;mord" mathnormal\\">s</span><span class="\\&quot;mord" mathnormal\\">i</span><span class="\\&quot;mord" mathnormal\\">ze</span></span></span><span style="\\&quot;top:-2.8372em;\\&quot;"><span class="\\&quot;pstrut\\&quot;" style="\\&quot;height:3.2em;\\&quot;"></span><span class="\\&quot;hide-tail\\&quot;" style="\\&quot;min-width:1.02em;height:1.28em;\\&quot;"><svg xmlns="\\&quot;http://www.w3.org/2000/svg\\&quot;" width="400em" height="1.28em" viewBox="0 0 400000 1296" preserveAspectRatio="xMinYMin slice"><path d="M263,681c0.7,0,18,39.7,52,119\\nc34,79.3,68.167,158.7,102.5,238c34.3,79.3,51.8,119.3,52.5,120\\nc340,-704.7,510.7,-1060.3,512,-1067\\nl0 -0\\nc4.7,-7.3,11,-11,19,-11\\nH40000v40H1012.3\\ns-271.3,567,-271.3,567c-38.7,80.7,-84,175,-136,283c-52,108,-89.167,185.3,-111.5,232\\nc-22.3,46.7,-33.8,70.3,-34.5,71c-4.7,4.7,-12.3,7,-23,7s-12,-1,-12,-1\\ns-109,-253,-109,-253c-72.7,-168,-109.3,-252,-110,-252c-10.7,8,-22,16.7,-34,26\\nc-22,17.3,-33.3,26,-34,26s-26,-26,-26,-26s76,-59,76,-59s76,-60,76,-60z\\nM1001 80h400000v40h-400000z"/></svg></span></span></span><span class="\\&quot;vlist-s\\&quot;">&#x200B;</span></span><span class="\\&quot;vlist-r\\&quot;"><span class="\\&quot;vlist\\&quot;" style="\\&quot;height:0.3628em;\\&quot;"><span></span></span></span></span></span></span></span></span>&quot;}. kwargs &#x2014; Additional keyword arguments passed along to the <code>__init__</code> of <code>tf.keras.layers.Layer</code>.`,name:"initializer_range"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L2799"}}),ft=new k({props:{name:"call",anchor:"transformers.TFSharedEmbeddings.call",parameters:[{name:"inputs",val:": Tensor"},{name:"mode",val:": str = 'embedding'"}],parametersDescription:[{anchor:"transformers.TFSharedEmbeddings.call.inputs",description:`<strong>inputs</strong> (<code>tf.Tensor</code>) &#x2014; In embedding mode, should be an int64 tensor with shape <code>[batch_size, length]</code>.</p> <p>In linear mode, should be a float tensor with shape <code>[batch_size, length, hidden_size]</code>.`,name:"inputs"},{anchor:"transformers.TFSharedEmbeddings.call.mode",description:`<strong>mode</strong> (<code>str</code>, defaults to <code>&quot;embedding&quot;</code>) &#x2014; A valid value is either <code>&quot;embedding&quot;</code> or <code>&quot;linear&quot;</code>, the first one indicates that the layer should be used as an embedding layer, the second one that the layer should be used as a linear decoder.`,name:"mode"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L2845",returnDescription:` <p>In embedding mode, the output is a float32 embedding tensor, with shape <code>[batch_size, length, embedding_size]</code>.</p> <p>In linear mode, the output is a float32 with shape <code>[batch_size, length, vocab_size]</code>.</p> `,returnType:` <p><code>tf.Tensor</code></p> `,raiseDescription:` <ul> <li><code>ValueError</code> \u2014 if <code>mode</code> is not valid.</li> </ul> `,raiseType:` <p><code>ValueError</code></p> `}}),gt=new k({props:{name:"class transformers.TFSequenceSummary",anchor:"transformers.TFSequenceSummary",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.TFSequenceSummary.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) &#x2014; The config used by the model. Relevant arguments in the config class of the model are (refer to the actual config class of your model for the default values it uses):</p> <ul> <li> <p><strong>summary_type</strong> (<code>str</code>) &#x2014; The method to use to make this summary. Accepted values are:</p> <ul> <li><code>&quot;last&quot;</code> &#x2014; Take the last token hidden state (like XLNet)</li> <li><code>&quot;first&quot;</code> &#x2014; Take the first token hidden state (like Bert)</li> <li><code>&quot;mean&quot;</code> &#x2014; Take the mean of all tokens hidden states</li> <li><code>&quot;cls_index&quot;</code> &#x2014; Supply a Tensor of classification token position (GPT/GPT-2)</li> <li><code>&quot;attn&quot;</code> &#x2014; Not implemented now, use multi-head attention</li> </ul> </li> <li> <p><strong>summary_use_proj</strong> (<code>bool</code>) &#x2014; Add a projection after the vector extraction.</p> </li> <li> <p><strong>summary_proj_to_labels</strong> (<code>bool</code>) &#x2014; If <code>True</code>, the projection outputs to <code>config.num_labels</code> classes (otherwise to <code>config.hidden_size</code>).</p> </li> <li> <p><strong>summary_activation</strong> (<code>Optional[str]</code>) &#x2014; Set to <code>&quot;tanh&quot;</code> to add a tanh activation to the output, another string or <code>None</code> will add no activation.</p> </li> <li> <p><strong>summary_first_dropout</strong> (<code>float</code>) &#x2014; Optional dropout probability before the projection and activation.</p> </li> <li> <p><strong>summary_last_dropout</strong> (<code>float</code>)&#x2014; Optional dropout probability after the projection and activation.</p> </li> </ul>`,name:"config"},{anchor:"transformers.TFSequenceSummary.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, defaults to 0.02) &#x2014; The standard deviation to use to initialize the weights. kwargs &#x2014; Additional keyword arguments passed along to the <code>__init__</code> of <code>tf.keras.layers.Layer</code>.`,name:"initializer_range"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L2898"}}),_t=new Jt({}),vt=new k({props:{name:"class transformers.modeling_tf_utils.TFCausalLanguageModelingLoss",anchor:"transformers.modeling_tf_utils.TFCausalLanguageModelingLoss",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L179"}}),Ie=new rr({props:{$$slots:{default:[Bi]},$$scope:{ctx:S}}}),bt=new k({props:{name:"class transformers.modeling_tf_utils.TFMaskedLanguageModelingLoss",anchor:"transformers.modeling_tf_utils.TFMaskedLanguageModelingLoss",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L298"}}),Ne=new rr({props:{$$slots:{default:[Gi]},$$scope:{ctx:S}}}),yt=new k({props:{name:"class transformers.modeling_tf_utils.TFMultipleChoiceLoss",anchor:"transformers.modeling_tf_utils.TFMultipleChoiceLoss",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L288"}}),$t=new k({props:{name:"class transformers.modeling_tf_utils.TFQuestionAnsweringLoss",anchor:"transformers.modeling_tf_utils.TFQuestionAnsweringLoss",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L210"}}),wt=new k({props:{name:"class transformers.modeling_tf_utils.TFSequenceClassificationLoss",anchor:"transformers.modeling_tf_utils.TFSequenceClassificationLoss",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L269"}}),Tt=new k({props:{name:"class transformers.modeling_tf_utils.TFTokenClassificationLoss",anchor:"transformers.modeling_tf_utils.TFTokenClassificationLoss",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L225"}}),He=new rr({props:{$$slots:{default:[Ui]},$$scope:{ctx:S}}}),kt=new Jt({}),xt=new k({props:{name:"transformers.modeling_tf_utils.get_initializer",anchor:"transformers.modeling_tf_utils.get_initializer",parameters:[{name:"initializer_range",val:": float = 0.02"}],parametersDescription:[{anchor:"transformers.modeling_tf_utils.get_initializer.initializer_range",description:"<strong>initializer_range</strong> (<em>float</em>, defaults to 0.02) &#x2014; Standard deviation of the initializer range.",name:"initializer_range"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L3014",returnDescription:` <p>The truncated normal initializer.</p> `,returnType:` <p><code>tf.initializers.TruncatedNormal</code></p> `}}),qt=new k({props:{name:"transformers.modeling_tf_utils.keras_serializable",anchor:"transformers.modeling_tf_utils.keras_serializable",parameters:[],parametersDescription:[{anchor:"transformers.modeling_tf_utils.keras_serializable.cls",description:`<strong>cls</strong> (a <code>tf.keras.layers.Layers subclass</code>) &#x2014; Typically a <code>TF.MainLayer</code> class in this project, in general must accept a <code>config</code> argument to its initializer.`,name:"cls"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L114",returnDescription:` <p>The same class object, with modifications for Keras deserialization.</p> `}}),Dt=new k({props:{name:"transformers.shape_list",anchor:"transformers.shape_list",parameters:[{name:"tensor",val:": typing.Union[tensorflow.python.framework.ops.Tensor, numpy.ndarray]"}],parametersDescription:[{anchor:"transformers.shape_list.tensor",description:"<strong>tensor</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code>) &#x2014; The tensor we want the shape of.",name:"tensor"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tf_utils.py#L26",returnDescription:` <p>The shape of the tensor as a list.</p> `,returnType:` <p><code>List[int]</code></p> `}}),{c(){p=r("meta"),x=l(),u=r("h1"),$=r("a"),q=r("span"),h(w.$$.fragment),D=l(),O=r("span"),P=a("Custom Layers and Utilities"),Q=l(),F=r("p"),C=a("This page lists all the custom layers used by the library, as well as the utility functions it provides for modeling."),B=l(),A=r("p"),z=a("Most of those are only useful if you are studying the code of the models in the library."),G=l(),L=r("h2"),E=r("a"),T=r("span"),h(I.$$.fragment),ce=l(),U=r("span"),pe=a("Pytorch custom modules"),ae=l(),K=r("div"),h(Be.$$.fragment),Yr=l(),Yt=r("p"),Zr=a("1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2)."),es=l(),Zt=r("p"),ts=a("Basically works like a linear layer but the weights are transposed."),sr=l(),R=r("div"),h(Ge.$$.fragment),os=l(),eo=r("p"),rs=a("Compute SQuAD start logits from sequence hidden states."),ss=l(),Pt=r("div"),h(Ue.$$.fragment),nr=l(),W=r("div"),h(Ke.$$.fragment),ns=l(),to=r("p"),as=a("Compute SQuAD end logits from sequence hidden states."),is=l(),De=r("div"),h(Re.$$.fragment),ls=l(),h(Le.$$.fragment),ar=l(),X=r("div"),h(We.$$.fragment),ds=l(),oo=r("p"),cs=a("Compute SQuAD 2.0 answer class from classification and start tokens hidden states."),ps=l(),Pe=r("div"),h(Xe.$$.fragment),ms=l(),h(Ce.$$.fragment),ir=l(),me=r("div"),h(Je.$$.fragment),fs=l(),Ye=r("p"),us=a("Base class for outputs of question answering models using a "),Ct=r("a"),hs=a("SQuADHead"),gs=a("."),lr=l(),J=r("div"),h(Ze.$$.fragment),_s=l(),ro=r("p"),vs=a("A SQuAD head inspired by XLNet."),bs=l(),zt=r("div"),h(et.$$.fragment),dr=l(),Y=r("div"),h(tt.$$.fragment),ys=l(),so=r("p"),$s=a("Compute a single vector summary of a sequence hidden states."),ws=l(),ze=r("div"),h(ot.$$.fragment),Ts=l(),no=r("p"),ks=a("Compute a single vector summary of a sequence hidden states."),cr=l(),fe=r("h2"),Se=r("a"),ao=r("span"),h(rt.$$.fragment),xs=l(),io=r("span"),Es=a("PyTorch Helper Functions"),pr=l(),N=r("div"),h(st.$$.fragment),qs=l(),H=r("p"),Ds=a("This function chunks the "),lo=r("code"),Ls=a("input_tensors"),Ps=a(" into smaller input tensor parts of size "),co=r("code"),Cs=a("chunk_size"),zs=a(` over the dimension `),po=r("code"),Ss=a("chunk_dim"),Fs=a(". It then applies a layer "),mo=r("code"),As=a("forward_fn"),Os=a(" to each chunk independently to save memory."),Is=l(),M=r("p"),Ns=a("If the "),fo=r("code"),Hs=a("forward_fn"),Ms=a(" is independent across the "),uo=r("code"),Vs=a("chunk_dim"),js=a(` this function will yield the same result as directly applying `),ho=r("code"),Qs=a("forward_fn"),Bs=a(" to "),go=r("code"),Gs=a("input_tensors"),Us=a("."),Ks=l(),h(Fe.$$.fragment),mr=l(),ue=r("div"),h(nt.$$.fragment),Rs=l(),at=r("p"),Ws=a("Finds the heads and their indices taking "),_o=r("code"),Xs=a("already_pruned_heads"),Js=a(" into account."),fr=l(),Z=r("div"),h(it.$$.fragment),Ys=l(),vo=r("p"),Zs=a("Prune a Conv1D or linear layer to keep only entries in index."),en=l(),bo=r("p"),tn=a("Used to remove heads."),ur=l(),ee=r("div"),h(lt.$$.fragment),on=l(),yo=r("p"),rn=a(`Prune a Conv1D layer to keep only entries in index. A Conv1D work as a Linear layer (see e.g. BERT) but the weights are transposed.`),sn=l(),$o=r("p"),nn=a("Used to remove heads."),hr=l(),te=r("div"),h(dt.$$.fragment),an=l(),wo=r("p"),ln=a("Prune a linear layer to keep only entries in index."),dn=l(),To=r("p"),cn=a("Used to remove heads."),gr=l(),he=r("h2"),Ae=r("a"),ko=r("span"),h(ct.$$.fragment),pn=l(),xo=r("span"),mn=a("TensorFlow custom layers"),_r=l(),oe=r("div"),h(pt.$$.fragment),fn=l(),Eo=r("p"),un=a("1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2)."),hn=l(),qo=r("p"),gn=a("Basically works like a linear layer but the weights are transposed."),vr=l(),V=r("div"),h(mt.$$.fragment),_n=l(),Do=r("p"),vn=a("Construct shared token embeddings."),bn=l(),Lo=r("p"),yn=a(`The weights of the embedding layer is usually shared with the weights of the linear decoder when doing language modeling.`),$n=l(),ie=r("div"),h(ft.$$.fragment),wn=l(),Po=r("p"),Tn=a("Get token embeddings of inputs or decode final hidden state."),kn=l(),ut=r("p"),xn=a(`Shared weights logic is adapted from `),ht=r("a"),En=a("here"),qn=a("."),br=l(),ge=r("div"),h(gt.$$.fragment),Dn=l(),Co=r("p"),Ln=a("Compute a single vector summary of a sequence hidden states."),yr=l(),_e=r("h2"),Oe=r("a"),zo=r("span"),h(_t.$$.fragment),Pn=l(),So=r("span"),Cn=a("TensorFlow loss functions"),$r=l(),re=r("div"),h(vt.$$.fragment),zn=l(),Fo=r("p"),Sn=a("Loss function suitable for causal language modeling (CLM), that is, the task of guessing the next token."),Fn=l(),h(Ie.$$.fragment),wr=l(),se=r("div"),h(bt.$$.fragment),An=l(),Ao=r("p"),On=a("Loss function suitable for masked language modeling (MLM), that is, the task of guessing the masked tokens."),In=l(),h(Ne.$$.fragment),Tr=l(),ve=r("div"),h(yt.$$.fragment),Nn=l(),Oo=r("p"),Hn=a("Loss function suitable for multiple choice tasks."),kr=l(),be=r("div"),h($t.$$.fragment),Mn=l(),Io=r("p"),Vn=a("Loss function suitable for question answering."),xr=l(),ye=r("div"),h(wt.$$.fragment),jn=l(),No=r("p"),Qn=a("Loss function suitable for sequence classification."),Er=l(),ne=r("div"),h(Tt.$$.fragment),Bn=l(),Ho=r("p"),Gn=a("Loss function suitable for token classification."),Un=l(),h(He.$$.fragment),qr=l(),$e=r("h2"),Me=r("a"),Mo=r("span"),h(kt.$$.fragment),Kn=l(),Vo=r("span"),Rn=a("TensorFlow Helper Functions"),Dr=l(),we=r("div"),h(xt.$$.fragment),Wn=l(),Et=r("p"),Xn=a("Creates a "),jo=r("code"),Jn=a("tf.initializers.TruncatedNormal"),Yn=a(" with the given range."),Lr=l(),j=r("div"),h(qt.$$.fragment),Zn=l(),Qo=r("p"),ea=a("Decorate a Keras Layer class to support Keras serialization."),ta=l(),Bo=r("p"),oa=a("This is done by:"),ra=l(),Te=r("ol"),ke=r("li"),sa=a("Adding a "),Go=r("code"),na=a("transformers_config"),aa=a(" dict to the Keras config dictionary in "),Uo=r("code"),ia=a("get_config"),la=a(` (called by Keras at serialization time.`),da=l(),xe=r("li"),ca=a("Wrapping "),Ko=r("code"),pa=a("__init__"),ma=a(" to accept that "),Ro=r("code"),fa=a("transformers_config"),ua=a(` dict (passed by Keras at deserialization time) and convert it to a config object for the actual layer initializer.`),ha=l(),Ee=r("li"),ga=a(`Registering the class as a custom object in Keras (if the Tensorflow version supports this), so that it does not need to be supplied in `),Wo=r("code"),_a=a("custom_objects"),va=a(" in the call to "),Xo=r("code"),ba=a("tf.keras.models.load_model"),ya=a("."),Pr=l(),qe=r("div"),h(Dt.$$.fragment),$a=l(),Jo=r("p"),wa=a("Deal with dynamic shape in tensorflow cleanly."),this.h()},l(e){const c=Oi('[data-svelte="svelte-1phssyn"]',document.head);p=s(c,"META",{name:!0,content:!0}),c.forEach(o),x=d(e),u=s(e,"H1",{class:!0});var Lt=n(u);$=s(Lt,"A",{id:!0,class:!0,href:!0});var Yo=n($);q=s(Yo,"SPAN",{});var Zo=n(q);g(w.$$.fragment,Zo),Zo.forEach(o),Yo.forEach(o),D=d(Lt),O=s(Lt,"SPAN",{});var er=n(O);P=i(er,"Custom Layers and Utilities"),er.forEach(o),Lt.forEach(o),Q=d(e),F=s(e,"P",{});var tr=n(F);C=i(tr,"This page lists all the custom layers used by the library, as well as the utility functions it provides for modeling."),tr.forEach(o),B=d(e),A=s(e,"P",{});var or=n(A);z=i(or,"Most of those are only useful if you are studying the code of the models in the library."),or.forEach(o),G=d(e),L=s(e,"H2",{class:!0});var zr=n(L);E=s(zr,"A",{id:!0,class:!0,href:!0});var Ta=n(E);T=s(Ta,"SPAN",{});var ka=n(T);g(I.$$.fragment,ka),ka.forEach(o),Ta.forEach(o),ce=d(zr),U=s(zr,"SPAN",{});var xa=n(U);pe=i(xa,"Pytorch custom modules"),xa.forEach(o),zr.forEach(o),ae=d(e),K=s(e,"DIV",{class:!0});var St=n(K);g(Be.$$.fragment,St),Yr=d(St),Yt=s(St,"P",{});var Ea=n(Yt);Zr=i(Ea,"1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2)."),Ea.forEach(o),es=d(St),Zt=s(St,"P",{});var qa=n(Zt);ts=i(qa,"Basically works like a linear layer but the weights are transposed."),qa.forEach(o),St.forEach(o),sr=d(e),R=s(e,"DIV",{class:!0});var Ft=n(R);g(Ge.$$.fragment,Ft),os=d(Ft),eo=s(Ft,"P",{});var Da=n(eo);rs=i(Da,"Compute SQuAD start logits from sequence hidden states."),Da.forEach(o),ss=d(Ft),Pt=s(Ft,"DIV",{class:!0});var La=n(Pt);g(Ue.$$.fragment,La),La.forEach(o),Ft.forEach(o),nr=d(e),W=s(e,"DIV",{class:!0});var At=n(W);g(Ke.$$.fragment,At),ns=d(At),to=s(At,"P",{});var Pa=n(to);as=i(Pa,"Compute SQuAD end logits from sequence hidden states."),Pa.forEach(o),is=d(At),De=s(At,"DIV",{class:!0});var Sr=n(De);g(Re.$$.fragment,Sr),ls=d(Sr),g(Le.$$.fragment,Sr),Sr.forEach(o),At.forEach(o),ar=d(e),X=s(e,"DIV",{class:!0});var Ot=n(X);g(We.$$.fragment,Ot),ds=d(Ot),oo=s(Ot,"P",{});var Ca=n(oo);cs=i(Ca,"Compute SQuAD 2.0 answer class from classification and start tokens hidden states."),Ca.forEach(o),ps=d(Ot),Pe=s(Ot,"DIV",{class:!0});var Fr=n(Pe);g(Xe.$$.fragment,Fr),ms=d(Fr),g(Ce.$$.fragment,Fr),Fr.forEach(o),Ot.forEach(o),ir=d(e),me=s(e,"DIV",{class:!0});var Ar=n(me);g(Je.$$.fragment,Ar),fs=d(Ar),Ye=s(Ar,"P",{});var Or=n(Ye);us=i(Or,"Base class for outputs of question answering models using a "),Ct=s(Or,"A",{href:!0});var za=n(Ct);hs=i(za,"SQuADHead"),za.forEach(o),gs=i(Or,"."),Or.forEach(o),Ar.forEach(o),lr=d(e),J=s(e,"DIV",{class:!0});var It=n(J);g(Ze.$$.fragment,It),_s=d(It),ro=s(It,"P",{});var Sa=n(ro);vs=i(Sa,"A SQuAD head inspired by XLNet."),Sa.forEach(o),bs=d(It),zt=s(It,"DIV",{class:!0});var Fa=n(zt);g(et.$$.fragment,Fa),Fa.forEach(o),It.forEach(o),dr=d(e),Y=s(e,"DIV",{class:!0});var Nt=n(Y);g(tt.$$.fragment,Nt),ys=d(Nt),so=s(Nt,"P",{});var Aa=n(so);$s=i(Aa,"Compute a single vector summary of a sequence hidden states."),Aa.forEach(o),ws=d(Nt),ze=s(Nt,"DIV",{class:!0});var Ir=n(ze);g(ot.$$.fragment,Ir),Ts=d(Ir),no=s(Ir,"P",{});var Oa=n(no);ks=i(Oa,"Compute a single vector summary of a sequence hidden states."),Oa.forEach(o),Ir.forEach(o),Nt.forEach(o),cr=d(e),fe=s(e,"H2",{class:!0});var Nr=n(fe);Se=s(Nr,"A",{id:!0,class:!0,href:!0});var Ia=n(Se);ao=s(Ia,"SPAN",{});var Na=n(ao);g(rt.$$.fragment,Na),Na.forEach(o),Ia.forEach(o),xs=d(Nr),io=s(Nr,"SPAN",{});var Ha=n(io);Es=i(Ha,"PyTorch Helper Functions"),Ha.forEach(o),Nr.forEach(o),pr=d(e),N=s(e,"DIV",{class:!0});var Ve=n(N);g(st.$$.fragment,Ve),qs=d(Ve),H=s(Ve,"P",{});var le=n(H);Ds=i(le,"This function chunks the "),lo=s(le,"CODE",{});var Ma=n(lo);Ls=i(Ma,"input_tensors"),Ma.forEach(o),Ps=i(le," into smaller input tensor parts of size "),co=s(le,"CODE",{});var Va=n(co);Cs=i(Va,"chunk_size"),Va.forEach(o),zs=i(le,` over the dimension `),po=s(le,"CODE",{});var ja=n(po);Ss=i(ja,"chunk_dim"),ja.forEach(o),Fs=i(le,". It then applies a layer "),mo=s(le,"CODE",{});var Qa=n(mo);As=i(Qa,"forward_fn"),Qa.forEach(o),Os=i(le," to each chunk independently to save memory."),le.forEach(o),Is=d(Ve),M=s(Ve,"P",{});var de=n(M);Ns=i(de,"If the "),fo=s(de,"CODE",{});var Ba=n(fo);Hs=i(Ba,"forward_fn"),Ba.forEach(o),Ms=i(de," is independent across the "),uo=s(de,"CODE",{});var Ga=n(uo);Vs=i(Ga,"chunk_dim"),Ga.forEach(o),js=i(de,` this function will yield the same result as directly applying `),ho=s(de,"CODE",{});var Ua=n(ho);Qs=i(Ua,"forward_fn"),Ua.forEach(o),Bs=i(de," to "),go=s(de,"CODE",{});var Ka=n(go);Gs=i(Ka,"input_tensors"),Ka.forEach(o),Us=i(de,"."),de.forEach(o),Ks=d(Ve),g(Fe.$$.fragment,Ve),Ve.forEach(o),mr=d(e),ue=s(e,"DIV",{class:!0});var Hr=n(ue);g(nt.$$.fragment,Hr),Rs=d(Hr),at=s(Hr,"P",{});var Mr=n(at);Ws=i(Mr,"Finds the heads and their indices taking "),_o=s(Mr,"CODE",{});var Ra=n(_o);Xs=i(Ra,"already_pruned_heads"),Ra.forEach(o),Js=i(Mr," into account."),Mr.forEach(o),Hr.forEach(o),fr=d(e),Z=s(e,"DIV",{class:!0});var Ht=n(Z);g(it.$$.fragment,Ht),Ys=d(Ht),vo=s(Ht,"P",{});var Wa=n(vo);Zs=i(Wa,"Prune a Conv1D or linear layer to keep only entries in index."),Wa.forEach(o),en=d(Ht),bo=s(Ht,"P",{});var Xa=n(bo);tn=i(Xa,"Used to remove heads."),Xa.forEach(o),Ht.forEach(o),ur=d(e),ee=s(e,"DIV",{class:!0});var Mt=n(ee);g(lt.$$.fragment,Mt),on=d(Mt),yo=s(Mt,"P",{});var Ja=n(yo);rn=i(Ja,`Prune a Conv1D layer to keep only entries in index. A Conv1D work as a Linear layer (see e.g. BERT) but the weights are transposed.`),Ja.forEach(o),sn=d(Mt),$o=s(Mt,"P",{});var Ya=n($o);nn=i(Ya,"Used to remove heads."),Ya.forEach(o),Mt.forEach(o),hr=d(e),te=s(e,"DIV",{class:!0});var Vt=n(te);g(dt.$$.fragment,Vt),an=d(Vt),wo=s(Vt,"P",{});var Za=n(wo);ln=i(Za,"Prune a linear layer to keep only entries in index."),Za.forEach(o),dn=d(Vt),To=s(Vt,"P",{});var ei=n(To);cn=i(ei,"Used to remove heads."),ei.forEach(o),Vt.forEach(o),gr=d(e),he=s(e,"H2",{class:!0});var Vr=n(he);Ae=s(Vr,"A",{id:!0,class:!0,href:!0});var ti=n(Ae);ko=s(ti,"SPAN",{});var oi=n(ko);g(ct.$$.fragment,oi),oi.forEach(o),ti.forEach(o),pn=d(Vr),xo=s(Vr,"SPAN",{});var ri=n(xo);mn=i(ri,"TensorFlow custom layers"),ri.forEach(o),Vr.forEach(o),_r=d(e),oe=s(e,"DIV",{class:!0});var jt=n(oe);g(pt.$$.fragment,jt),fn=d(jt),Eo=s(jt,"P",{});var si=n(Eo);un=i(si,"1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2)."),si.forEach(o),hn=d(jt),qo=s(jt,"P",{});var ni=n(qo);gn=i(ni,"Basically works like a linear layer but the weights are transposed."),ni.forEach(o),jt.forEach(o),vr=d(e),V=s(e,"DIV",{class:!0});var je=n(V);g(mt.$$.fragment,je),_n=d(je),Do=s(je,"P",{});var ai=n(Do);vn=i(ai,"Construct shared token embeddings."),ai.forEach(o),bn=d(je),Lo=s(je,"P",{});var ii=n(Lo);yn=i(ii,`The weights of the embedding layer is usually shared with the weights of the linear decoder when doing language modeling.`),ii.forEach(o),$n=d(je),ie=s(je,"DIV",{class:!0});var Qt=n(ie);g(ft.$$.fragment,Qt),wn=d(Qt),Po=s(Qt,"P",{});var li=n(Po);Tn=i(li,"Get token embeddings of inputs or decode final hidden state."),li.forEach(o),kn=d(Qt),ut=s(Qt,"P",{});var jr=n(ut);xn=i(jr,`Shared weights logic is adapted from `),ht=s(jr,"A",{href:!0,rel:!0});var di=n(ht);En=i(di,"here"),di.forEach(o),qn=i(jr,"."),jr.forEach(o),Qt.forEach(o),je.forEach(o),br=d(e),ge=s(e,"DIV",{class:!0});var Qr=n(ge);g(gt.$$.fragment,Qr),Dn=d(Qr),Co=s(Qr,"P",{});var ci=n(Co);Ln=i(ci,"Compute a single vector summary of a sequence hidden states."),ci.forEach(o),Qr.forEach(o),yr=d(e),_e=s(e,"H2",{class:!0});var Br=n(_e);Oe=s(Br,"A",{id:!0,class:!0,href:!0});var pi=n(Oe);zo=s(pi,"SPAN",{});var mi=n(zo);g(_t.$$.fragment,mi),mi.forEach(o),pi.forEach(o),Pn=d(Br),So=s(Br,"SPAN",{});var fi=n(So);Cn=i(fi,"TensorFlow loss functions"),fi.forEach(o),Br.forEach(o),$r=d(e),re=s(e,"DIV",{class:!0});var Bt=n(re);g(vt.$$.fragment,Bt),zn=d(Bt),Fo=s(Bt,"P",{});var ui=n(Fo);Sn=i(ui,"Loss function suitable for causal language modeling (CLM), that is, the task of guessing the next token."),ui.forEach(o),Fn=d(Bt),g(Ie.$$.fragment,Bt),Bt.forEach(o),wr=d(e),se=s(e,"DIV",{class:!0});var Gt=n(se);g(bt.$$.fragment,Gt),An=d(Gt),Ao=s(Gt,"P",{});var hi=n(Ao);On=i(hi,"Loss function suitable for masked language modeling (MLM), that is, the task of guessing the masked tokens."),hi.forEach(o),In=d(Gt),g(Ne.$$.fragment,Gt),Gt.forEach(o),Tr=d(e),ve=s(e,"DIV",{class:!0});var Gr=n(ve);g(yt.$$.fragment,Gr),Nn=d(Gr),Oo=s(Gr,"P",{});var gi=n(Oo);Hn=i(gi,"Loss function suitable for multiple choice tasks."),gi.forEach(o),Gr.forEach(o),kr=d(e),be=s(e,"DIV",{class:!0});var Ur=n(be);g($t.$$.fragment,Ur),Mn=d(Ur),Io=s(Ur,"P",{});var _i=n(Io);Vn=i(_i,"Loss function suitable for question answering."),_i.forEach(o),Ur.forEach(o),xr=d(e),ye=s(e,"DIV",{class:!0});var Kr=n(ye);g(wt.$$.fragment,Kr),jn=d(Kr),No=s(Kr,"P",{});var vi=n(No);Qn=i(vi,"Loss function suitable for sequence classification."),vi.forEach(o),Kr.forEach(o),Er=d(e),ne=s(e,"DIV",{class:!0});var Ut=n(ne);g(Tt.$$.fragment,Ut),Bn=d(Ut),Ho=s(Ut,"P",{});var bi=n(Ho);Gn=i(bi,"Loss function suitable for token classification."),bi.forEach(o),Un=d(Ut),g(He.$$.fragment,Ut),Ut.forEach(o),qr=d(e),$e=s(e,"H2",{class:!0});var Rr=n($e);Me=s(Rr,"A",{id:!0,class:!0,href:!0});var yi=n(Me);Mo=s(yi,"SPAN",{});var $i=n(Mo);g(kt.$$.fragment,$i),$i.forEach(o),yi.forEach(o),Kn=d(Rr),Vo=s(Rr,"SPAN",{});var wi=n(Vo);Rn=i(wi,"TensorFlow Helper Functions"),wi.forEach(o),Rr.forEach(o),Dr=d(e),we=s(e,"DIV",{class:!0});var Wr=n(we);g(xt.$$.fragment,Wr),Wn=d(Wr),Et=s(Wr,"P",{});var Xr=n(Et);Xn=i(Xr,"Creates a "),jo=s(Xr,"CODE",{});var Ti=n(jo);Jn=i(Ti,"tf.initializers.TruncatedNormal"),Ti.forEach(o),Yn=i(Xr," with the given range."),Xr.forEach(o),Wr.forEach(o),Lr=d(e),j=s(e,"DIV",{class:!0});var Qe=n(j);g(qt.$$.fragment,Qe),Zn=d(Qe),Qo=s(Qe,"P",{});var ki=n(Qo);ea=i(ki,"Decorate a Keras Layer class to support Keras serialization."),ki.forEach(o),ta=d(Qe),Bo=s(Qe,"P",{});var xi=n(Bo);oa=i(xi,"This is done by:"),xi.forEach(o),ra=d(Qe),Te=s(Qe,"OL",{});var Kt=n(Te);ke=s(Kt,"LI",{});var Rt=n(ke);sa=i(Rt,"Adding a "),Go=s(Rt,"CODE",{});var Ei=n(Go);na=i(Ei,"transformers_config"),Ei.forEach(o),aa=i(Rt," dict to the Keras config dictionary in "),Uo=s(Rt,"CODE",{});var qi=n(Uo);ia=i(qi,"get_config"),qi.forEach(o),la=i(Rt,` (called by Keras at serialization time.`),Rt.forEach(o),da=d(Kt),xe=s(Kt,"LI",{});var Wt=n(xe);ca=i(Wt,"Wrapping "),Ko=s(Wt,"CODE",{});var Di=n(Ko);pa=i(Di,"__init__"),Di.forEach(o),ma=i(Wt," to accept that "),Ro=s(Wt,"CODE",{});var Li=n(Ro);fa=i(Li,"transformers_config"),Li.forEach(o),ua=i(Wt,` dict (passed by Keras at deserialization time) and convert it to a config object for the actual layer initializer.`),Wt.forEach(o),ha=d(Kt),Ee=s(Kt,"LI",{});var Xt=n(Ee);ga=i(Xt,`Registering the class as a custom object in Keras (if the Tensorflow version supports this), so that it does not need to be supplied in `),Wo=s(Xt,"CODE",{});var Pi=n(Wo);_a=i(Pi,"custom_objects"),Pi.forEach(o),va=i(Xt," in the call to "),Xo=s(Xt,"CODE",{});var Ci=n(Xo);ba=i(Ci,"tf.keras.models.load_model"),Ci.forEach(o),ya=i(Xt,"."),Xt.forEach(o),Kt.forEach(o),Qe.forEach(o),Pr=d(e),qe=s(e,"DIV",{class:!0});var Jr=n(qe);g(Dt.$$.fragment,Jr),$a=d(Jr),Jo=s(Jr,"P",{});var zi=n(Jo);wa=i(zi,"Deal with dynamic shape in tensorflow cleanly."),zi.forEach(o),Jr.forEach(o),this.h()},h(){f(p,"name","hf:doc:metadata"),f(p,"content",JSON.stringify(Ri)),f($,"id","custom-layers-and-utilities"),f($,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f($,"href","#custom-layers-and-utilities"),f(u,"class","relative group"),f(E,"id","transformers.Conv1D"),f(E,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(E,"href","#transformers.Conv1D"),f(L,"class","relative group"),f(K,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(Pt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(R,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(De,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(W,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(Pe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(X,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(Ct,"href","/docs/transformers/pr_19429/en/internal/modeling_utils#transformers.modeling_utils.SQuADHead"),f(me,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(zt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(J,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(ze,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(Y,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(Se,"id","transformers.apply_chunking_to_forward"),f(Se,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Se,"href","#transformers.apply_chunking_to_forward"),f(fe,"class","relative group"),f(N,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(ue,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(Z,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(ee,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(te,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(Ae,"id","transformers.modeling_tf_utils.TFConv1D"),f(Ae,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Ae,"href","#transformers.modeling_tf_utils.TFConv1D"),f(he,"class","relative group"),f(oe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(ht,"href","https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24"),f(ht,"rel","nofollow"),f(ie,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(V,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(ge,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(Oe,"id","transformers.modeling_tf_utils.TFCausalLanguageModelingLoss"),f(Oe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Oe,"href","#transformers.modeling_tf_utils.TFCausalLanguageModelingLoss"),f(_e,"class","relative group"),f(re,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(se,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(ve,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(be,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(ye,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(ne,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(Me,"id","transformers.modeling_tf_utils.get_initializer"),f(Me,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Me,"href","#transformers.modeling_tf_utils.get_initializer"),f($e,"class","relative group"),f(we,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(j,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),f(qe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,c){t(document.head,p),m(e,x,c),m(e,u,c),t(u,$),t($,q),_(w,q,null),t(u,D),t(u,O),t(O,P),m(e,Q,c),m(e,F,c),t(F,C),m(e,B,c),m(e,A,c),t(A,z),m(e,G,c),m(e,L,c),t(L,E),t(E,T),_(I,T,null),t(L,ce),t(L,U),t(U,pe),m(e,ae,c),m(e,K,c),_(Be,K,null),t(K,Yr),t(K,Yt),t(Yt,Zr),t(K,es),t(K,Zt),t(Zt,ts),m(e,sr,c),m(e,R,c),_(Ge,R,null),t(R,os),t(R,eo),t(eo,rs),t(R,ss),t(R,Pt),_(Ue,Pt,null),m(e,nr,c),m(e,W,c),_(Ke,W,null),t(W,ns),t(W,to),t(to,as),t(W,is),t(W,De),_(Re,De,null),t(De,ls),_(Le,De,null),m(e,ar,c),m(e,X,c),_(We,X,null),t(X,ds),t(X,oo),t(oo,cs),t(X,ps),t(X,Pe),_(Xe,Pe,null),t(Pe,ms),_(Ce,Pe,null),m(e,ir,c),m(e,me,c),_(Je,me,null),t(me,fs),t(me,Ye),t(Ye,us),t(Ye,Ct),t(Ct,hs),t(Ye,gs),m(e,lr,c),m(e,J,c),_(Ze,J,null),t(J,_s),t(J,ro),t(ro,vs),t(J,bs),t(J,zt),_(et,zt,null),m(e,dr,c),m(e,Y,c),_(tt,Y,null),t(Y,ys),t(Y,so),t(so,$s),t(Y,ws),t(Y,ze),_(ot,ze,null),t(ze,Ts),t(ze,no),t(no,ks),m(e,cr,c),m(e,fe,c),t(fe,Se),t(Se,ao),_(rt,ao,null),t(fe,xs),t(fe,io),t(io,Es),m(e,pr,c),m(e,N,c),_(st,N,null),t(N,qs),t(N,H),t(H,Ds),t(H,lo),t(lo,Ls),t(H,Ps),t(H,co),t(co,Cs),t(H,zs),t(H,po),t(po,Ss),t(H,Fs),t(H,mo),t(mo,As),t(H,Os),t(N,Is),t(N,M),t(M,Ns),t(M,fo),t(fo,Hs),t(M,Ms),t(M,uo),t(uo,Vs),t(M,js),t(M,ho),t(ho,Qs),t(M,Bs),t(M,go),t(go,Gs),t(M,Us),t(N,Ks),_(Fe,N,null),m(e,mr,c),m(e,ue,c),_(nt,ue,null),t(ue,Rs),t(ue,at),t(at,Ws),t(at,_o),t(_o,Xs),t(at,Js),m(e,fr,c),m(e,Z,c),_(it,Z,null),t(Z,Ys),t(Z,vo),t(vo,Zs),t(Z,en),t(Z,bo),t(bo,tn),m(e,ur,c),m(e,ee,c),_(lt,ee,null),t(ee,on),t(ee,yo),t(yo,rn),t(ee,sn),t(ee,$o),t($o,nn),m(e,hr,c),m(e,te,c),_(dt,te,null),t(te,an),t(te,wo),t(wo,ln),t(te,dn),t(te,To),t(To,cn),m(e,gr,c),m(e,he,c),t(he,Ae),t(Ae,ko),_(ct,ko,null),t(he,pn),t(he,xo),t(xo,mn),m(e,_r,c),m(e,oe,c),_(pt,oe,null),t(oe,fn),t(oe,Eo),t(Eo,un),t(oe,hn),t(oe,qo),t(qo,gn),m(e,vr,c),m(e,V,c),_(mt,V,null),t(V,_n),t(V,Do),t(Do,vn),t(V,bn),t(V,Lo),t(Lo,yn),t(V,$n),t(V,ie),_(ft,ie,null),t(ie,wn),t(ie,Po),t(Po,Tn),t(ie,kn),t(ie,ut),t(ut,xn),t(ut,ht),t(ht,En),t(ut,qn),m(e,br,c),m(e,ge,c),_(gt,ge,null),t(ge,Dn),t(ge,Co),t(Co,Ln),m(e,yr,c),m(e,_e,c),t(_e,Oe),t(Oe,zo),_(_t,zo,null),t(_e,Pn),t(_e,So),t(So,Cn),m(e,$r,c),m(e,re,c),_(vt,re,null),t(re,zn),t(re,Fo),t(Fo,Sn),t(re,Fn),_(Ie,re,null),m(e,wr,c),m(e,se,c),_(bt,se,null),t(se,An),t(se,Ao),t(Ao,On),t(se,In),_(Ne,se,null),m(e,Tr,c),m(e,ve,c),_(yt,ve,null),t(ve,Nn),t(ve,Oo),t(Oo,Hn),m(e,kr,c),m(e,be,c),_($t,be,null),t(be,Mn),t(be,Io),t(Io,Vn),m(e,xr,c),m(e,ye,c),_(wt,ye,null),t(ye,jn),t(ye,No),t(No,Qn),m(e,Er,c),m(e,ne,c),_(Tt,ne,null),t(ne,Bn),t(ne,Ho),t(Ho,Gn),t(ne,Un),_(He,ne,null),m(e,qr,c),m(e,$e,c),t($e,Me),t(Me,Mo),_(kt,Mo,null),t($e,Kn),t($e,Vo),t(Vo,Rn),m(e,Dr,c),m(e,we,c),_(xt,we,null),t(we,Wn),t(we,Et),t(Et,Xn),t(Et,jo),t(jo,Jn),t(Et,Yn),m(e,Lr,c),m(e,j,c),_(qt,j,null),t(j,Zn),t(j,Qo),t(Qo,ea),t(j,ta),t(j,Bo),t(Bo,oa),t(j,ra),t(j,Te),t(Te,ke),t(ke,sa),t(ke,Go),t(Go,na),t(ke,aa),t(ke,Uo),t(Uo,ia),t(ke,la),t(Te,da),t(Te,xe),t(xe,ca),t(xe,Ko),t(Ko,pa),t(xe,ma),t(xe,Ro),t(Ro,fa),t(xe,ua),t(Te,ha),t(Te,Ee),t(Ee,ga),t(Ee,Wo),t(Wo,_a),t(Ee,va),t(Ee,Xo),t(Xo,ba),t(Ee,ya),m(e,Pr,c),m(e,qe,c),_(Dt,qe,null),t(qe,$a),t(qe,Jo),t(Jo,wa),Cr=!0},p(e,[c]){const Lt={};c&2&&(Lt.$$scope={dirty:c,ctx:e}),Le.$set(Lt);const Yo={};c&2&&(Yo.$$scope={dirty:c,ctx:e}),Ce.$set(Yo);const Zo={};c&2&&(Zo.$$scope={dirty:c,ctx:e}),Fe.$set(Zo);const er={};c&2&&(er.$$scope={dirty:c,ctx:e}),Ie.$set(er);const tr={};c&2&&(tr.$$scope={dirty:c,ctx:e}),Ne.$set(tr);const or={};c&2&&(or.$$scope={dirty:c,ctx:e}),He.$set(or)},i(e){Cr||(v(w.$$.fragment,e),v(I.$$.fragment,e),v(Be.$$.fragment,e),v(Ge.$$.fragment,e),v(Ue.$$.fragment,e),v(Ke.$$.fragment,e),v(Re.$$.fragment,e),v(Le.$$.fragment,e),v(We.$$.fragment,e),v(Xe.$$.fragment,e),v(Ce.$$.fragment,e),v(Je.$$.fragment,e),v(Ze.$$.fragment,e),v(et.$$.fragment,e),v(tt.$$.fragment,e),v(ot.$$.fragment,e),v(rt.$$.fragment,e),v(st.$$.fragment,e),v(Fe.$$.fragment,e),v(nt.$$.fragment,e),v(it.$$.fragment,e),v(lt.$$.fragment,e),v(dt.$$.fragment,e),v(ct.$$.fragment,e),v(pt.$$.fragment,e),v(mt.$$.fragment,e),v(ft.$$.fragment,e),v(gt.$$.fragment,e),v(_t.$$.fragment,e),v(vt.$$.fragment,e),v(Ie.$$.fragment,e),v(bt.$$.fragment,e),v(Ne.$$.fragment,e),v(yt.$$.fragment,e),v($t.$$.fragment,e),v(wt.$$.fragment,e),v(Tt.$$.fragment,e),v(He.$$.fragment,e),v(kt.$$.fragment,e),v(xt.$$.fragment,e),v(qt.$$.fragment,e),v(Dt.$$.fragment,e),Cr=!0)},o(e){b(w.$$.fragment,e),b(I.$$.fragment,e),b(Be.$$.fragment,e),b(Ge.$$.fragment,e),b(Ue.$$.fragment,e),b(Ke.$$.fragment,e),b(Re.$$.fragment,e),b(Le.$$.fragment,e),b(We.$$.fragment,e),b(Xe.$$.fragment,e),b(Ce.$$.fragment,e),b(Je.$$.fragment,e),b(Ze.$$.fragment,e),b(et.$$.fragment,e),b(tt.$$.fragment,e),b(ot.$$.fragment,e),b(rt.$$.fragment,e),b(st.$$.fragment,e),b(Fe.$$.fragment,e),b(nt.$$.fragment,e),b(it.$$.fragment,e),b(lt.$$.fragment,e),b(dt.$$.fragment,e),b(ct.$$.fragment,e),b(pt.$$.fragment,e),b(mt.$$.fragment,e),b(ft.$$.fragment,e),b(gt.$$.fragment,e),b(_t.$$.fragment,e),b(vt.$$.fragment,e),b(Ie.$$.fragment,e),b(bt.$$.fragment,e),b(Ne.$$.fragment,e),b(yt.$$.fragment,e),b($t.$$.fragment,e),b(wt.$$.fragment,e),b(Tt.$$.fragment,e),b(He.$$.fragment,e),b(kt.$$.fragment,e),b(xt.$$.fragment,e),b(qt.$$.fragment,e),b(Dt.$$.fragment,e),Cr=!1},d(e){o(p),e&&o(x),e&&o(u),y(w),e&&o(Q),e&&o(F),e&&o(B),e&&o(A),e&&o(G),e&&o(L),y(I),e&&o(ae),e&&o(K),y(Be),e&&o(sr),e&&o(R),y(Ge),y(Ue),e&&o(nr),e&&o(W),y(Ke),y(Re),y(Le),e&&o(ar),e&&o(X),y(We),y(Xe),y(Ce),e&&o(ir),e&&o(me),y(Je),e&&o(lr),e&&o(J),y(Ze),y(et),e&&o(dr),e&&o(Y),y(tt),y(ot),e&&o(cr),e&&o(fe),y(rt),e&&o(pr),e&&o(N),y(st),y(Fe),e&&o(mr),e&&o(ue),y(nt),e&&o(fr),e&&o(Z),y(it),e&&o(ur),e&&o(ee),y(lt),e&&o(hr),e&&o(te),y(dt),e&&o(gr),e&&o(he),y(ct),e&&o(_r),e&&o(oe),y(pt),e&&o(vr),e&&o(V),y(mt),y(ft),e&&o(br),e&&o(ge),y(gt),e&&o(yr),e&&o(_e),y(_t),e&&o($r),e&&o(re),y(vt),y(Ie),e&&o(wr),e&&o(se),y(bt),y(Ne),e&&o(Tr),e&&o(ve),y(yt),e&&o(kr),e&&o(be),y($t),e&&o(xr),e&&o(ye),y(wt),e&&o(Er),e&&o(ne),y(Tt),y(He),e&&o(qr),e&&o($e),y(kt),e&&o(Dr),e&&o(we),y(xt),e&&o(Lr),e&&o(j),y(qt),e&&o(Pr),e&&o(qe),y(Dt)}}}const Ri={local:"custom-layers-and-utilities",sections:[{local:"transformers.Conv1D",title:"Pytorch custom modules"},{local:"transformers.apply_chunking_to_forward",title:"PyTorch Helper Functions"},{local:"transformers.modeling_tf_utils.TFConv1D",title:"TensorFlow custom layers"},{local:"transformers.modeling_tf_utils.TFCausalLanguageModelingLoss",title:"TensorFlow loss functions"},{local:"transformers.modeling_tf_utils.get_initializer",title:"TensorFlow Helper Functions"}],title:"Custom Layers and Utilities"};function Wi(S){return Ii(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class ol extends Si{constructor(p){super();Fi(this,p,Wi,Ki,Ai,{})}}export{ol as default,Ri as metadata};
41
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_19429/en/_app/pages/internal/tokenization_utils.mdx-hf-doc-builder.js
import{S as Ym,i as Jm,s as Km,e as r,k as d,w as k,t as o,M as Qm,c as s,d as t,m as l,a,x as b,h as n,b as m,G as e,g as P,y as v,q as T,o as y,B as z,v as Zm,L as ys}from"../../chunks/vendor-hf-doc-builder.js";import{T as Lr}from"../../chunks/Tip-hf-doc-builder.js";import{D as E}from"../../chunks/Docstring-hf-doc-builder.js";import{C as zs}from"../../chunks/CodeBlock-hf-doc-builder.js";import{I as vs}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{E as Ts}from"../../chunks/ExampleCodeBlock-hf-doc-builder.js";function eh(q){let p,$,f,h,x;return{c(){p=r("p"),$=o("This method is deprecated, "),f=r("code"),h=o("__call__"),x=o(" should be used instead.")},l(c){p=s(c,"P",{});var _=a(p);$=n(_,"This method is deprecated, "),f=s(_,"CODE",{});var L=a(f);h=n(L,"__call__"),L.forEach(t),x=n(_," should be used instead."),_.forEach(t)},m(c,_){P(c,p,_),e(p,$),e(p,f),e(f,h),e(p,x)},d(c){c&&t(p)}}}function th(q){let p,$,f,h,x;return{c(){p=r("p"),$=o("This method is deprecated, "),f=r("code"),h=o("__call__"),x=o(" should be used instead.")},l(c){p=s(c,"P",{});var _=a(p);$=n(_,"This method is deprecated, "),f=s(_,"CODE",{});var L=a(f);h=n(L,"__call__"),L.forEach(t),x=n(_," should be used instead."),_.forEach(t)},m(c,_){P(c,p,_),e(p,$),e(p,f),e(f,h),e(p,x)},d(c){c&&t(p)}}}function oh(q){let p,$,f,h,x;return{c(){p=r("p"),$=o("Passing "),f=r("code"),h=o("use_auth_token=True"),x=o(" is required when you want to use a private model.")},l(c){p=s(c,"P",{});var _=a(p);$=n(_,"Passing "),f=s(_,"CODE",{});var L=a(f);h=n(L,"use_auth_token=True"),L.forEach(t),x=n(_," is required when you want to use a private model."),_.forEach(t)},m(c,_){P(c,p,_),e(p,$),e(p,f),e(f,h),e(p,x)},d(c){c&&t(p)}}}function nh(q){let p,$,f,h,x;return h=new zs({props:{code:`# We can't instantiate directly the base class *PreTrainedTokenizerBase* so let's show our examples on a derived class: BertTokenizer # Download vocabulary from huggingface.co and cache. tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") # Download vocabulary from huggingface.co (user-uploaded) and cache. tokenizer = BertTokenizer.from_pretrained("dbmdz/bert-base-german-cased") # If vocabulary files are in a directory (e.g. tokenizer was saved using *save_pretrained('./test/saved_model/')*) tokenizer = BertTokenizer.from_pretrained("./test/saved_model/") # If the tokenizer uses a single vocabulary file, you can point directly to this file tokenizer = BertTokenizer.from_pretrained("./test/saved_model/my_vocab.txt") # You can link tokens to special vocabulary when instantiating tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", unk_token="<unk>") # You should be sure '<unk>' is in the vocabulary when doing that. # Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead) assert tokenizer.unk_token == "<unk>"`,highlighted:`<span class="hljs-comment"># We can&#x27;t instantiate directly the base class *PreTrainedTokenizerBase* so let&#x27;s show our examples on a derived class: BertTokenizer</span> <span class="hljs-comment"># Download vocabulary from huggingface.co and cache.</span> tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) <span class="hljs-comment"># Download vocabulary from huggingface.co (user-uploaded) and cache.</span> tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;dbmdz/bert-base-german-cased&quot;</span>) <span class="hljs-comment"># If vocabulary files are in a directory (e.g. tokenizer was saved using *save_pretrained(&#x27;./test/saved_model/&#x27;)*)</span> tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;./test/saved_model/&quot;</span>) <span class="hljs-comment"># If the tokenizer uses a single vocabulary file, you can point directly to this file</span> tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;./test/saved_model/my_vocab.txt&quot;</span>) <span class="hljs-comment"># You can link tokens to special vocabulary when instantiating</span> tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>, unk_token=<span class="hljs-string">&quot;&lt;unk&gt;&quot;</span>) <span class="hljs-comment"># You should be sure &#x27;&lt;unk&gt;&#x27; is in the vocabulary when doing that.</span> <span class="hljs-comment"># Otherwise use tokenizer.add_special_tokens({&#x27;unk_token&#x27;: &#x27;&lt;unk&gt;&#x27;}) instead)</span> <span class="hljs-keyword">assert</span> tokenizer.unk_token == <span class="hljs-string">&quot;&lt;unk&gt;&quot;</span>`}}),{c(){p=r("p"),$=o("Examples:"),f=d(),k(h.$$.fragment)},l(c){p=s(c,"P",{});var _=a(p);$=n(_,"Examples:"),_.forEach(t),f=l(c),b(h.$$.fragment,c)},m(c,_){P(c,p,_),e(p,$),P(c,f,_),v(h,c,_),x=!0},p:ys,i(c){x||(T(h.$$.fragment,c),x=!0)},o(c){y(h.$$.fragment,c),x=!1},d(c){c&&t(p),c&&t(f),z(h,c)}}}function rh(q){let p,$,f,h,x,c,_,L;return{c(){p=r("p"),$=o("If the "),f=r("code"),h=o("encoded_inputs"),x=o(` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the result will use the same type unless you provide a different tensor type with `),c=r("code"),_=o("return_tensors"),L=o(`. In the case of PyTorch tensors, you will lose the specific device of your tensors however.`)},l(ge){p=s(ge,"P",{});var j=a(p);$=n(j,"If the "),f=s(j,"CODE",{});var D=a(f);h=n(D,"encoded_inputs"),D.forEach(t),x=n(j,` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the result will use the same type unless you provide a different tensor type with `),c=s(j,"CODE",{});var ro=a(c);_=n(ro,"return_tensors"),ro.forEach(t),L=n(j,`. In the case of PyTorch tensors, you will lose the specific device of your tensors however.`),j.forEach(t)},m(ge,j){P(ge,p,j),e(p,$),e(p,f),e(f,h),e(p,x),e(p,c),e(c,_),e(p,L)},d(ge){ge&&t(p)}}}function sh(q){let p,$,f,h,x;return h=new zs({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") # Push the tokenizer to your namespace with the name "my-finetuned-bert". tokenizer.push_to_hub("my-finetuned-bert") # Push the tokenizer to an organization with the name "my-finetuned-bert". tokenizer.push_to_hub("huggingface/my-finetuned-bert")`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the tokenizer to your namespace with the name &quot;my-finetuned-bert&quot;.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the tokenizer to an organization with the name &quot;my-finetuned-bert&quot;.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;huggingface/my-finetuned-bert&quot;</span>)`}}),{c(){p=r("p"),$=o("Examples:"),f=d(),k(h.$$.fragment)},l(c){p=s(c,"P",{});var _=a(p);$=n(_,"Examples:"),_.forEach(t),f=l(c),b(h.$$.fragment,c)},m(c,_){P(c,p,_),e(p,$),P(c,f,_),v(h,c,_),x=!0},p:ys,i(c){x||(T(h.$$.fragment,c),x=!0)},o(c){y(h.$$.fragment,c),x=!1},d(c){c&&t(p),c&&t(f),z(h,c)}}}function ah(q){let p,$;return{c(){p=r("p"),$=o("This API is experimental and may have some slight breaking changes in the next releases.")},l(f){p=s(f,"P",{});var h=a(p);$=n(h,"This API is experimental and may have some slight breaking changes in the next releases."),h.forEach(t)},m(f,h){P(f,p,h),e(p,$)},d(f){f&&t(p)}}}function ih(q){let p,$,f,h,x;return h=new zs({props:{code:`# Let's see how to add a new classification token to GPT-2 tokenizer = GPT2Tokenizer.from_pretrained("gpt2") model = GPT2Model.from_pretrained("gpt2") special_tokens_dict = {"cls_token": "<CLS>"} num_added_toks = tokenizer.add_special_tokens(special_tokens_dict) print("We have added", num_added_toks, "tokens") # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer. model.resize_token_embeddings(len(tokenizer)) assert tokenizer.cls_token == "<CLS>"`,highlighted:`<span class="hljs-comment"># Let&#x27;s see how to add a new classification token to GPT-2</span> tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) model = GPT2Model.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) special_tokens_dict = {<span class="hljs-string">&quot;cls_token&quot;</span>: <span class="hljs-string">&quot;&lt;CLS&gt;&quot;</span>} num_added_toks = tokenizer.add_special_tokens(special_tokens_dict) <span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;We have added&quot;</span>, num_added_toks, <span class="hljs-string">&quot;tokens&quot;</span>) <span class="hljs-comment"># Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer.</span> model.resize_token_embeddings(<span class="hljs-built_in">len</span>(tokenizer)) <span class="hljs-keyword">assert</span> tokenizer.cls_token == <span class="hljs-string">&quot;&lt;CLS&gt;&quot;</span>`}}),{c(){p=r("p"),$=o("Examples:"),f=d(),k(h.$$.fragment)},l(c){p=s(c,"P",{});var _=a(p);$=n(_,"Examples:"),_.forEach(t),f=l(c),b(h.$$.fragment,c)},m(c,_){P(c,p,_),e(p,$),P(c,f,_),v(h,c,_),x=!0},p:ys,i(c){x||(T(h.$$.fragment,c),x=!0)},o(c){y(h.$$.fragment,c),x=!1},d(c){c&&t(p),c&&t(f),z(h,c)}}}function dh(q){let p,$,f,h,x;return h=new zs({props:{code:`# Let's see how to increase the vocabulary of Bert model and tokenizer tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased") model = BertModel.from_pretrained("bert-base-uncased") num_added_toks = tokenizer.add_tokens(["new_tok1", "my_new-tok2"]) print("We have added", num_added_toks, "tokens") # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer. model.resize_token_embeddings(len(tokenizer))`,highlighted:`<span class="hljs-comment"># Let&#x27;s see how to increase the vocabulary of Bert model and tokenizer</span> tokenizer = BertTokenizerFast.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) model = BertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) num_added_toks = tokenizer.add_tokens([<span class="hljs-string">&quot;new_tok1&quot;</span>, <span class="hljs-string">&quot;my_new-tok2&quot;</span>]) <span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;We have added&quot;</span>, num_added_toks, <span class="hljs-string">&quot;tokens&quot;</span>) <span class="hljs-comment"># Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer.</span> model.resize_token_embeddings(<span class="hljs-built_in">len</span>(tokenizer))`}}),{c(){p=r("p"),$=o("Examples:"),f=d(),k(h.$$.fragment)},l(c){p=s(c,"P",{});var _=a(p);$=n(_,"Examples:"),_.forEach(t),f=l(c),b(h.$$.fragment,c)},m(c,_){P(c,p,_),e(p,$),P(c,f,_),v(h,c,_),x=!0},p:ys,i(c){x||(T(h.$$.fragment,c),x=!0)},o(c){y(h.$$.fragment,c),x=!1},d(c){c&&t(p),c&&t(f),z(h,c)}}}function lh(q){let p,$,f,h,x,c,_,L,ge,j,D,ro,so,ws,xs,ao,$s,Ps,io,Es,qs,lo,Bs,Ls,Dr,co,Ds,Ir,ke,Ie,Oo,at,Is,Wo,Ns,Nr,u,it,As,be,Fs,po,Ss,Cs,mo,Os,Ws,js,jo,Rs,Us,Ro,Ms,Vs,I,Z,Uo,Gs,Hs,Mo,Xs,Ys,Vo,Js,Ks,Qs,F,Go,Zs,ea,Ho,ta,oa,Xo,na,ra,Yo,sa,aa,Jo,ia,da,la,V,Ko,ca,pa,Qo,ma,ha,Zo,ua,fa,en,_a,ga,ka,S,tn,ba,va,on,Ta,ya,nn,za,wa,rn,xa,$a,ho,Pa,Ea,qa,Ne,sn,Ba,La,an,Da,Ia,Na,G,dn,Aa,Fa,ln,Sa,Ca,cn,Oa,Wa,pn,ja,Ra,Ua,H,mn,Ma,Va,hn,Ga,Ha,un,Xa,Ya,fn,Ja,Ka,Qa,Ae,dt,Za,_n,ei,ti,Fe,lt,oi,gn,ni,ri,Se,ct,si,kn,ai,ii,ee,pt,di,bn,li,ci,Ce,pi,te,mt,mi,vn,hi,ui,Tn,fi,_i,Oe,ht,gi,yn,ki,bi,We,ut,vi,ft,Ti,zn,yi,zi,wi,oe,_t,xi,uo,$i,fo,Pi,Ei,wn,qi,Bi,ne,gt,Li,xn,Di,Ii,kt,Ni,$n,Ai,Fi,Si,re,bt,Ci,Pn,Oi,Wi,vt,ji,En,Ri,Ui,Mi,se,Tt,Vi,qn,Gi,Hi,je,Xi,X,yt,Yi,zt,Ji,_o,Ki,Qi,Zi,Re,ed,Ue,td,Me,wt,od,ve,nd,Bn,rd,sd,Ln,ad,id,dd,ae,xt,ld,Dn,cd,pd,ie,In,md,hd,Nn,ud,fd,An,_d,gd,kd,C,$t,bd,Fn,vd,Td,J,yd,Sn,zd,wd,Cn,xd,$d,On,Pd,Ed,qd,Te,Bd,Wn,Ld,Dd,jn,Id,Nd,Ad,Ve,Fd,Ge,Pt,Sd,R,Cd,Rn,Od,Wd,Un,jd,Rd,Mn,Ud,Md,Vn,Vd,Gd,Hd,He,Et,Xd,Gn,Yd,Jd,de,qt,Kd,Bt,Qd,Hn,Zd,el,tl,Xe,ol,le,Lt,nl,Dt,rl,Xn,sl,al,il,Ye,dl,Y,It,ll,Yn,cl,pl,Nt,ml,Jn,hl,ul,fl,At,_l,Kn,gl,kl,bl,ce,Ft,vl,Qn,Tl,yl,St,zl,Zn,wl,xl,$l,Je,Ct,Pl,Ot,El,er,ql,Bl,Ll,Ke,Wt,Dl,tr,Il,Ar,ye,Qe,or,jt,Nl,nr,Al,Fr,A,Rt,Fl,ze,Sl,go,Cl,Ol,ko,Wl,jl,Rl,B,Ut,Ul,rr,Ml,Vl,sr,Gl,Hl,Mt,Xl,bo,Yl,Jl,Kl,Vt,Ql,ar,Zl,ec,tc,Gt,ir,oc,nc,Ht,rc,dr,sc,ac,ic,U,dc,vo,lc,cc,lr,pc,mc,cr,hc,uc,pr,fc,_c,gc,Ze,kc,O,Xt,bc,mr,vc,Tc,hr,yc,zc,Yt,wc,To,xc,$c,Pc,et,Ec,pe,Jt,qc,we,Bc,ur,Lc,Dc,fr,Ic,Nc,Ac,_r,Fc,Sr,xe,tt,gr,Kt,Sc,kr,Cc,Cr,$e,Qt,Oc,Pe,Wc,br,jc,Rc,ot,Uc,vr,Mc,Vc,Gc,Or,Ee,Zt,Hc,Tr,Xc,Wr,qe,eo,Yc,yr,Jc,jr;return c=new vs({}),at=new vs({}),it=new E({props:{name:"class transformers.PreTrainedTokenizerBase",anchor:"transformers.PreTrainedTokenizerBase",parameters:[{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.model_max_length",description:`<strong>model_max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum length (in number of tokens) for the inputs to the transformer model. When the tokenizer is loaded with <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.from_pretrained">from_pretrained()</a>, this will be set to the value stored for the associated model in <code>max_model_input_sizes</code> (see above). If no value is provided, will default to VERY_LARGE_INTEGER (<code>int(1e30)</code>).`,name:"model_max_length"},{anchor:"transformers.PreTrainedTokenizerBase.padding_side",description:`<strong>padding_side</strong> (<code>str</code>, <em>optional</em>) &#x2014; The side on which the model should have padding applied. Should be selected between [&#x2018;right&#x2019;, &#x2018;left&#x2019;]. Default value is picked from the class attribute of the same name.`,name:"padding_side"},{anchor:"transformers.PreTrainedTokenizerBase.truncation_side",description:`<strong>truncation_side</strong> (<code>str</code>, <em>optional</em>) &#x2014; The side on which the model should have truncation applied. Should be selected between [&#x2018;right&#x2019;, &#x2018;left&#x2019;]. Default value is picked from the class attribute of the same name.`,name:"truncation_side"},{anchor:"transformers.PreTrainedTokenizerBase.model_input_names",description:`<strong>model_input_names</strong> (<code>List[string]</code>, <em>optional</em>) &#x2014; The list of inputs accepted by the forward pass of the model (like <code>&quot;token_type_ids&quot;</code> or <code>&quot;attention_mask&quot;</code>). Default value is picked from the class attribute of the same name.`,name:"model_input_names"},{anchor:"transformers.PreTrainedTokenizerBase.bos_token",description:`<strong>bos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the beginning of a sentence. Will be associated to <code>self.bos_token</code> and <code>self.bos_token_id</code>.`,name:"bos_token"},{anchor:"transformers.PreTrainedTokenizerBase.eos_token",description:`<strong>eos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the end of a sentence. Will be associated to <code>self.eos_token</code> and <code>self.eos_token_id</code>.`,name:"eos_token"},{anchor:"transformers.PreTrainedTokenizerBase.unk_token",description:`<strong>unk_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing an out-of-vocabulary token. Will be associated to <code>self.unk_token</code> and <code>self.unk_token_id</code>.`,name:"unk_token"},{anchor:"transformers.PreTrainedTokenizerBase.sep_token",description:`<strong>sep_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token separating two different sentences in the same input (used by BERT for instance). Will be associated to <code>self.sep_token</code> and <code>self.sep_token_id</code>.`,name:"sep_token"},{anchor:"transformers.PreTrainedTokenizerBase.pad_token",description:`<strong>pad_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. Will be associated to <code>self.pad_token</code> and <code>self.pad_token_id</code>.`,name:"pad_token"},{anchor:"transformers.PreTrainedTokenizerBase.cls_token",description:`<strong>cls_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the class of the input (used by BERT for instance). Will be associated to <code>self.cls_token</code> and <code>self.cls_token_id</code>.`,name:"cls_token"},{anchor:"transformers.PreTrainedTokenizerBase.mask_token",description:`<strong>mask_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing a masked token (used by masked-language modeling pretraining objectives, like BERT). Will be associated to <code>self.mask_token</code> and <code>self.mask_token_id</code>.`,name:"mask_token"},{anchor:"transformers.PreTrainedTokenizerBase.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (tuple or list of <code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A tuple or a list of additional special tokens. Add them here to ensure they won&#x2019;t be split by the tokenization process. Will be associated to <code>self.additional_special_tokens</code> and <code>self.additional_special_tokens_ids</code>.`,name:"additional_special_tokens"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L1453"}}),dt=new E({props:{name:"__call__",anchor:"transformers.PreTrainedTokenizerBase.__call__",parameters:[{name:"text",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]]] = None"},{name:"text_pair",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]], NoneType] = None"},{name:"text_target",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]]] = None"},{name:"text_pair_target",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]], NoneType] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.utils.generic.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"is_split_into_words",val:": bool = False"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.utils.generic.TensorType, NoneType] = None"},{name:"return_token_type_ids",val:": typing.Optional[bool] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_overflowing_tokens",val:": bool = False"},{name:"return_special_tokens_mask",val:": bool = False"},{name:"return_offsets_mapping",val:": bool = False"},{name:"return_length",val:": bool = False"},{name:"verbose",val:": bool = True"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.__call__.text",description:`<strong>text</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).`,name:"text"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.text_pair",description:`<strong>text_pair</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).`,name:"text_pair"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.text_target",description:`<strong>text_target</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).`,name:"text_target"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.text_pair_target",description:`<strong>text_pair_target</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).`,name:"text_pair_target"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_token_type_ids",description:`<strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"return_token_type_ids"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_attention_mask",description:`<strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"return_attention_mask"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_overflowing_tokens",description:`<strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.`,name:"return_overflowing_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_special_tokens_mask",description:`<strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.`,name:"return_special_tokens_mask"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_offsets_mapping",description:`<strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.`,name:"return_offsets_mapping"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_length",description:`<strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.`,name:"return_length"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.verbose",description:`<strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method`,name:"verbose"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L2410",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> \u2014 List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> \u2014 List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>\u201Ctoken_type_ids\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> \u2014 List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>\u201Cattention_mask\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>overflowing_tokens</strong> \u2014 List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> \u2014 Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> \u2014 List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> \u2014 The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> `}}),lt=new E({props:{name:"as_target_tokenizer",anchor:"transformers.PreTrainedTokenizerBase.as_target_tokenizer",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3536"}}),ct=new E({props:{name:"batch_decode",anchor:"transformers.PreTrainedTokenizerBase.batch_decode",parameters:[{name:"sequences",val:": typing.Union[typing.List[int], typing.List[typing.List[int]], ForwardRef('np.ndarray'), ForwardRef('torch.Tensor'), ForwardRef('tf.Tensor')]"},{name:"skip_special_tokens",val:": bool = False"},{name:"clean_up_tokenization_spaces",val:": bool = True"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.batch_decode.sequences",description:`<strong>sequences</strong> (<code>Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.`,name:"sequences"},{anchor:"transformers.PreTrainedTokenizerBase.batch_decode.skip_special_tokens",description:`<strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.`,name:"skip_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.batch_decode.clean_up_tokenization_spaces",description:`<strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.`,name:"clean_up_tokenization_spaces"},{anchor:"transformers.PreTrainedTokenizerBase.batch_decode.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.`,name:"kwargs"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3370",returnDescription:` <p>The list of decoded sentences.</p> `,returnType:` <p><code>List[str]</code></p> `}}),pt=new E({props:{name:"batch_encode_plus",anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus",parameters:[{name:"batch_text_or_text_pairs",val:": typing.Union[typing.List[str], typing.List[typing.Tuple[str, str]], typing.List[typing.List[str]], typing.List[typing.Tuple[typing.List[str], typing.List[str]]], typing.List[typing.List[int]], typing.List[typing.Tuple[typing.List[int], typing.List[int]]]]"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.utils.generic.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"is_split_into_words",val:": bool = False"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.utils.generic.TensorType, NoneType] = None"},{name:"return_token_type_ids",val:": typing.Optional[bool] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_overflowing_tokens",val:": bool = False"},{name:"return_special_tokens_mask",val:": bool = False"},{name:"return_offsets_mapping",val:": bool = False"},{name:"return_length",val:": bool = False"},{name:"verbose",val:": bool = True"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.batch_text_or_text_pairs",description:`<strong>batch_text_or_text_pairs</strong> (<code>List[str]</code>, <code>List[Tuple[str, str]]</code>, <code>List[List[str]]</code>, <code>List[Tuple[List[str], List[str]]]</code>, and for not-fast tokenizers, also <code>List[List[int]]</code>, <code>List[Tuple[List[int], List[int]]]</code>) &#x2014; Batch of sequences or pair of sequences to be encoded. This can be a list of string/string-sequences/int-sequences or a list of pair of string/string-sequences/int-sequence (see details in <code>encode_plus</code>).`,name:"batch_text_or_text_pairs"},{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.return_token_type_ids",description:`<strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"return_token_type_ids"},{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.return_attention_mask",description:`<strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"return_attention_mask"},{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.return_overflowing_tokens",description:`<strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.`,name:"return_overflowing_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.return_special_tokens_mask",description:`<strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.`,name:"return_special_tokens_mask"},{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.return_offsets_mapping",description:`<strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.`,name:"return_offsets_mapping"},{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.return_length",description:`<strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.`,name:"return_length"},{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.verbose",description:`<strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method`,name:"verbose"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L2707",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> \u2014 List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> \u2014 List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>\u201Ctoken_type_ids\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> \u2014 List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>\u201Cattention_mask\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>overflowing_tokens</strong> \u2014 List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> \u2014 Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> \u2014 List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> \u2014 The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> `}}),Ce=new Lr({props:{warning:!0,$$slots:{default:[eh]},$$scope:{ctx:q}}}),mt=new E({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.PreTrainedTokenizerBase.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.build_inputs_with_special_tokens.token_ids_0",description:"<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; The first tokenized sequence.",name:"token_ids_0"},{anchor:"transformers.PreTrainedTokenizerBase.build_inputs_with_special_tokens.token_ids_1",description:"<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; The second tokenized sequence.",name:"token_ids_1"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3003",returnDescription:` <p>The model input with special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),ht=new E({props:{name:"clean_up_tokenization",anchor:"transformers.PreTrainedTokenizerBase.clean_up_tokenization",parameters:[{name:"out_string",val:": str"}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.clean_up_tokenization.out_string",description:"<strong>out_string</strong> (<code>str</code>) &#x2014; The text to clean up.",name:"out_string"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3479",returnDescription:` <p>The cleaned-up string.</p> `,returnType:` <p><code>str</code></p> `}}),ut=new E({props:{name:"convert_tokens_to_string",anchor:"transformers.PreTrainedTokenizerBase.convert_tokens_to_string",parameters:[{name:"tokens",val:": typing.List[str]"}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.convert_tokens_to_string.tokens",description:"<strong>tokens</strong> (<code>List[str]</code>) &#x2014; The token to join in a string.",name:"tokens"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3357",returnDescription:` <p>The joined tokens.</p> `,returnType:` <p><code>str</code></p> `}}),_t=new E({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences.token_ids_0",description:"<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; The first tokenized sequence.",name:"token_ids_0"},{anchor:"transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences.token_ids_1",description:"<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; The second tokenized sequence.",name:"token_ids_1"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L2983",returnDescription:` <p>The token type ids.</p> `,returnType:` <p><code>List[int]</code></p> `}}),gt=new E({props:{name:"decode",anchor:"transformers.PreTrainedTokenizerBase.decode",parameters:[{name:"token_ids",val:": typing.Union[int, typing.List[int], ForwardRef('np.ndarray'), ForwardRef('torch.Tensor'), ForwardRef('tf.Tensor')]"},{name:"skip_special_tokens",val:": bool = False"},{name:"clean_up_tokenization_spaces",val:": bool = True"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.decode.token_ids",description:`<strong>token_ids</strong> (<code>Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.`,name:"token_ids"},{anchor:"transformers.PreTrainedTokenizerBase.decode.skip_special_tokens",description:`<strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.`,name:"skip_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.decode.clean_up_tokenization_spaces",description:`<strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.`,name:"clean_up_tokenization_spaces"},{anchor:"transformers.PreTrainedTokenizerBase.decode.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.`,name:"kwargs"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3403",returnDescription:` <p>The decoded sentence.</p> `,returnType:` <p><code>str</code></p> `}}),bt=new E({props:{name:"encode",anchor:"transformers.PreTrainedTokenizerBase.encode",parameters:[{name:"text",val:": typing.Union[str, typing.List[str], typing.List[int]]"},{name:"text_pair",val:": typing.Union[str, typing.List[str], typing.List[int], NoneType] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.utils.generic.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"return_tensors",val:": typing.Union[str, transformers.utils.generic.TensorType, NoneType] = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.encode.text",description:`<strong>text</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code>) &#x2014; The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).`,name:"text"},{anchor:"transformers.PreTrainedTokenizerBase.encode.text_pair",description:`<strong>text_pair</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code>, <em>optional</em>) &#x2014; Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).`,name:"text_pair"},{anchor:"transformers.PreTrainedTokenizerBase.encode.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.encode.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.PreTrainedTokenizerBase.encode.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.PreTrainedTokenizerBase.encode.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.PreTrainedTokenizerBase.encode.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.PreTrainedTokenizerBase.encode.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.PreTrainedTokenizerBase.encode.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.PreTrainedTokenizerBase.encode.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul> <p>**kwargs &#x2014; Passed along to the <code>.tokenize()</code> method.`,name:"return_tensors"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L2220",returnDescription:` <p>The tokenized ids of the text.</p> `,returnType:` <p><code>List[int]</code>, <code>torch.Tensor</code>, <code>tf.Tensor</code> or <code>np.ndarray</code></p> `}}),Tt=new E({props:{name:"encode_plus",anchor:"transformers.PreTrainedTokenizerBase.encode_plus",parameters:[{name:"text",val:": typing.Union[str, typing.List[str], typing.List[int]]"},{name:"text_pair",val:": typing.Union[str, typing.List[str], typing.List[int], NoneType] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.utils.generic.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"is_split_into_words",val:": bool = False"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.utils.generic.TensorType, NoneType] = None"},{name:"return_token_type_ids",val:": typing.Optional[bool] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_overflowing_tokens",val:": bool = False"},{name:"return_special_tokens_mask",val:": bool = False"},{name:"return_offsets_mapping",val:": bool = False"},{name:"return_length",val:": bool = False"},{name:"verbose",val:": bool = True"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.text",description:`<strong>text</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code> (the latter only for not-fast tokenizers)) &#x2014; The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).`,name:"text"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.text_pair",description:`<strong>text_pair</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code>, <em>optional</em>) &#x2014; Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).`,name:"text_pair"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.return_token_type_ids",description:`<strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"return_token_type_ids"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.return_attention_mask",description:`<strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"return_attention_mask"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.return_overflowing_tokens",description:`<strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.`,name:"return_overflowing_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.return_special_tokens_mask",description:`<strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.`,name:"return_special_tokens_mask"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.return_offsets_mapping",description:`<strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.`,name:"return_offsets_mapping"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.return_length",description:`<strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.`,name:"return_length"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.verbose",description:`<strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method`,name:"verbose"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L2611",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> \u2014 List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> \u2014 List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>\u201Ctoken_type_ids\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> \u2014 List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>\u201Cattention_mask\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>overflowing_tokens</strong> \u2014 List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> \u2014 Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> \u2014 List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> \u2014 The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> `}}),je=new Lr({props:{warning:!0,$$slots:{default:[th]},$$scope:{ctx:q}}}),yt=new E({props:{name:"from_pretrained",anchor:"transformers.PreTrainedTokenizerBase.from_pretrained",parameters:[{name:"pretrained_model_name_or_path",val:": typing.Union[str, os.PathLike]"},{name:"*init_inputs",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a predefined tokenizer hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing vocabulary files required by the tokenizer, for instance saved using the <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.save_pretrained">save_pretrained()</a> method, e.g., <code>./my_model_directory/</code>.</li> <li>(<strong>Deprecated</strong>, not applicable to all derived classes) A path or url to a single saved vocabulary file (if and only if the tokenizer only requires a single vocabulary file like Bert or XLNet), e.g., <code>./my_model_directory/vocab.txt</code>.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.PreTrainedTokenizerBase.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<code>str</code> or <code>os.PathLike</code>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.PreTrainedTokenizerBase.from_pretrained.force_download",description:`<strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to force the (re-)download the vocabulary files and override the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.PreTrainedTokenizerBase.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to delete incompletely received files. Attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.PreTrainedTokenizerBase.from_pretrained.proxies",description:`<strong>proxies</strong> (<code>Dict[str, str]</code>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <code>{&apos;http&apos;: &apos;foo.bar:3128&apos;, &apos;http://hostname&apos;: &apos;foo.bar:4012&apos;}</code>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.PreTrainedTokenizerBase.from_pretrained.use_auth_token",description:`<strong>use_auth_token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>huggingface-cli login</code> (stored in <code>~/.huggingface</code>).`,name:"use_auth_token"},{anchor:"transformers.PreTrainedTokenizerBase.from_pretrained.local_files_only",description:`<strong>local_files_only</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to only rely on local files and not to attempt to download any files.`,name:"local_files_only"},{anchor:"transformers.PreTrainedTokenizerBase.from_pretrained.revision",description:`<strong>revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;main&quot;</code>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <code>revision</code> can be any identifier allowed by git.`,name:"revision"},{anchor:"transformers.PreTrainedTokenizerBase.from_pretrained.subfolder",description:`<strong>subfolder</strong> (<code>str</code>, <em>optional</em>) &#x2014; In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for facebook/rag-token-base), specify it here.`,name:"subfolder"},{anchor:"transformers.PreTrainedTokenizerBase.from_pretrained.inputs",description:`<strong>inputs</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the Tokenizer <code>__init__</code> method.`,name:"inputs"},{anchor:"transformers.PreTrainedTokenizerBase.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the Tokenizer <code>__init__</code> method. Can be used to set special tokens like <code>bos_token</code>, <code>eos_token</code>, <code>unk_token</code>, <code>sep_token</code>, <code>pad_token</code>, <code>cls_token</code>, <code>mask_token</code>, <code>additional_special_tokens</code>. See parameters in the <code>__init__</code> for more details.`,name:"kwargs"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L1570"}}),Re=new Lr({props:{$$slots:{default:[oh]},$$scope:{ctx:q}}}),Ue=new Ts({props:{anchor:"transformers.PreTrainedTokenizerBase.from_pretrained.example",$$slots:{default:[nh]},$$scope:{ctx:q}}}),wt=new E({props:{name:"get_special_tokens_mask",anchor:"transformers.PreTrainedTokenizerBase.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of ids of the first sequence.`,name:"token_ids_0"},{anchor:"transformers.PreTrainedTokenizerBase.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; List of ids of the second sequence.`,name:"token_ids_1"},{anchor:"transformers.PreTrainedTokenizerBase.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3448",returnDescription:` <p>1 for a special token, 0 for a sequence token.</p> `,returnType:` <p>A list of integers in the range [0, 1]</p> `}}),xt=new E({props:{name:"get_vocab",anchor:"transformers.PreTrainedTokenizerBase.get_vocab",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L1558",returnDescription:` <p>The vocabulary.</p> `,returnType:` <p><code>Dict[str, int]</code></p> `}}),$t=new E({props:{name:"pad",anchor:"transformers.PreTrainedTokenizerBase.pad",parameters:[{name:"encoded_inputs",val:": typing.Union[transformers.tokenization_utils_base.BatchEncoding, typing.List[transformers.tokenization_utils_base.BatchEncoding], typing.Dict[str, typing.List[int]], typing.Dict[str, typing.List[typing.List[int]]], typing.List[typing.Dict[str, typing.List[int]]]]"},{name:"padding",val:": typing.Union[bool, str, transformers.utils.generic.PaddingStrategy] = True"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.utils.generic.TensorType, NoneType] = None"},{name:"verbose",val:": bool = True"}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.pad.encoded_inputs",description:`<strong>encoded_inputs</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding">BatchEncoding</a>, list of <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding">BatchEncoding</a>, <code>Dict[str, List[int]]</code>, <code>Dict[str, List[List[int]]</code> or <code>List[Dict[str, List[int]]]</code>) &#x2014; Tokenized inputs. Can represent one input (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding">BatchEncoding</a> or <code>Dict[str, List[int]]</code>) or a batch of tokenized inputs (list of <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding">BatchEncoding</a>, <em>Dict[str, List[List[int]]]</em> or <em>List[Dict[str, List[int]]]</em>) so you can use this method during preprocessing as well as in a PyTorch Dataloader collate function.</p> <p>Instead of <code>List[int]</code> you can have tensors (numpy arrays, PyTorch tensors or TensorFlow tensors), see the note above for the return type.`,name:"encoded_inputs"},{anchor:"transformers.PreTrainedTokenizerBase.pad.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Select a strategy to pad the returned sequences (according to the model&#x2019;s padding side and padding index) among:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.PreTrainedTokenizerBase.pad.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Maximum length of the returned list and optionally padding length (see above).`,name:"max_length"},{anchor:"transformers.PreTrainedTokenizerBase.pad.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value.</p> <p>This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability</p> <blockquote> <p>= 7.5 (Volta).</p> </blockquote>`,name:"pad_to_multiple_of"},{anchor:"transformers.PreTrainedTokenizerBase.pad.return_attention_mask",description:`<strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"return_attention_mask"},{anchor:"transformers.PreTrainedTokenizerBase.pad.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.PreTrainedTokenizerBase.pad.verbose",description:`<strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings.`,name:"verbose"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L2810"}}),Ve=new Lr({props:{$$slots:{default:[rh]},$$scope:{ctx:q}}}),Pt=new E({props:{name:"prepare_for_model",anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model",parameters:[{name:"ids",val:": typing.List[int]"},{name:"pair_ids",val:": typing.Optional[typing.List[int]] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.utils.generic.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.utils.generic.TensorType, NoneType] = None"},{name:"return_token_type_ids",val:": typing.Optional[bool] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_overflowing_tokens",val:": bool = False"},{name:"return_special_tokens_mask",val:": bool = False"},{name:"return_offsets_mapping",val:": bool = False"},{name:"return_length",val:": bool = False"},{name:"verbose",val:": bool = True"},{name:"prepend_batch_axis",val:": bool = False"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.ids",description:`<strong>ids</strong> (<code>List[int]</code>) &#x2014; Tokenized input ids of the first sequence. Can be obtained from a string by chaining the <code>tokenize</code> and <code>convert_tokens_to_ids</code> methods.`,name:"ids"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.pair_ids",description:`<strong>pair_ids</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Tokenized input ids of the second sequence. Can be obtained from a string by chaining the <code>tokenize</code> and <code>convert_tokens_to_ids</code> methods.`,name:"pair_ids"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.return_token_type_ids",description:`<strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"return_token_type_ids"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.return_attention_mask",description:`<strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"return_attention_mask"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.return_overflowing_tokens",description:`<strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.`,name:"return_overflowing_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.return_special_tokens_mask",description:`<strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.`,name:"return_special_tokens_mask"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.return_offsets_mapping",description:`<strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.`,name:"return_offsets_mapping"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.return_length",description:`<strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.`,name:"return_length"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.verbose",description:`<strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method`,name:"verbose"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3023",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> \u2014 List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> \u2014 List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>\u201Ctoken_type_ids\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> \u2014 List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>\u201Cattention_mask\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>overflowing_tokens</strong> \u2014 List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> \u2014 Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> \u2014 List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> \u2014 The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> `}}),Et=new E({props:{name:"prepare_seq2seq_batch",anchor:"transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch",parameters:[{name:"src_texts",val:": typing.List[str]"},{name:"tgt_texts",val:": typing.Optional[typing.List[str]] = None"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"max_target_length",val:": typing.Optional[int] = None"},{name:"padding",val:": str = 'longest'"},{name:"return_tensors",val:": str = None"},{name:"truncation",val:": bool = True"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.src_texts",description:`<strong>src_texts</strong> (<code>List[str]</code>) &#x2014; List of documents to summarize or source language texts.`,name:"src_texts"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.tgt_texts",description:`<strong>tgt_texts</strong> (<code>list</code>, <em>optional</em>) &#x2014; List of summaries or target language texts.`,name:"tgt_texts"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length for encoder inputs (documents to summarize or source language texts) If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.max_target_length",description:`<strong>max_target_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length of decoder inputs (target language texts or summaries) If left unset or set to <code>None</code>, this will use the max_length value.`,name:"max_target_length"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). **kwargs &#x2014; Additional keyword arguments passed along to <code>self.__call__</code>.</li> </ul>`,name:"truncation"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3579",returnDescription:` <p>A <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li><strong>input_ids</strong> \u2014 List of token ids to be fed to the encoder.</li> <li><strong>attention_mask</strong> \u2014 List of indices specifying which tokens should be attended to by the model.</li> <li><strong>labels</strong> \u2014 List of token ids for tgt_texts.</li> </ul> <p>The full set of keys <code>[input_ids, attention_mask, labels]</code>, will only be returned if tgt_texts is passed. Otherwise, input_ids, attention_mask will be the only keys.</p> `,returnType:` <p><a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> `}}),qt=new E({props:{name:"push_to_hub",anchor:"transformers.PreTrainedTokenizerBase.push_to_hub",parameters:[{name:"repo_id",val:": str"},{name:"use_temp_dir",val:": typing.Optional[bool] = None"},{name:"commit_message",val:": typing.Optional[str] = None"},{name:"private",val:": typing.Optional[bool] = None"},{name:"use_auth_token",val:": typing.Union[bool, str, NoneType] = None"},{name:"max_shard_size",val:": typing.Union[int, str, NoneType] = '10GB'"},{name:"create_pr",val:": bool = False"},{name:"**deprecated_kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.push_to_hub.repo_id",description:`<strong>repo_id</strong> (<code>str</code>) &#x2014; The name of the repository you want to push your tokenizer to. It should contain your organization name when pushing to a given organization.`,name:"repo_id"},{anchor:"transformers.PreTrainedTokenizerBase.push_to_hub.use_temp_dir",description:`<strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to use a temporary directory to store the files saved before they are pushed to the Hub. Will default to <code>True</code> if there is no directory named like <code>repo_id</code>, <code>False</code> otherwise.`,name:"use_temp_dir"},{anchor:"transformers.PreTrainedTokenizerBase.push_to_hub.commit_message",description:`<strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;Upload tokenizer&quot;</code>.`,name:"commit_message"},{anchor:"transformers.PreTrainedTokenizerBase.push_to_hub.private",description:`<strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).`,name:"private"},{anchor:"transformers.PreTrainedTokenizerBase.push_to_hub.use_auth_token",description:`<strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>huggingface-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.`,name:"use_auth_token"},{anchor:"transformers.PreTrainedTokenizerBase.push_to_hub.max_shard_size",description:`<strong>max_shard_size</strong> (<code>int</code> or <code>str</code>, <em>optional</em>, defaults to <code>&quot;10GB&quot;</code>) &#x2014; Only applicable for models. The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like <code>&quot;5MB&quot;</code>).`,name:"max_shard_size"},{anchor:"transformers.PreTrainedTokenizerBase.push_to_hub.create_pr",description:`<strong>create_pr</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to create a PR with the uploaded files or directly commit.`,name:"create_pr"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/hub.py#L712"}}),Xe=new Ts({props:{anchor:"transformers.PreTrainedTokenizerBase.push_to_hub.example",$$slots:{default:[sh]},$$scope:{ctx:q}}}),Lt=new E({props:{name:"register_for_auto_class",anchor:"transformers.PreTrainedTokenizerBase.register_for_auto_class",parameters:[{name:"auto_class",val:" = 'AutoTokenizer'"}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.register_for_auto_class.auto_class",description:`<strong>auto_class</strong> (<code>str</code> or <code>type</code>, <em>optional</em>, defaults to <code>&quot;AutoTokenizer&quot;</code>) &#x2014; The auto class to register this new tokenizer with.`,name:"auto_class"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3553"}}),Ye=new Lr({props:{warning:!0,$$slots:{default:[ah]},$$scope:{ctx:q}}}),It=new E({props:{name:"save_pretrained",anchor:"transformers.PreTrainedTokenizerBase.save_pretrained",parameters:[{name:"save_directory",val:": typing.Union[str, os.PathLike]"},{name:"legacy_format",val:": typing.Optional[bool] = None"},{name:"filename_prefix",val:": typing.Optional[str] = None"},{name:"push_to_hub",val:": bool = False"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.save_pretrained.save_directory",description:"<strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; The path to a directory where the tokenizer will be saved.",name:"save_directory"},{anchor:"transformers.PreTrainedTokenizerBase.save_pretrained.legacy_format",description:`<strong>legacy_format</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Only applicable for a fast tokenizer. If unset (default), will save the tokenizer in the unified JSON format as well as in legacy format if it exists, i.e. with tokenizer specific vocabulary and a separate added_tokens files.</p> <p>If <code>False</code>, will only save the tokenizer in the unified JSON format. This format is incompatible with &#x201C;slow&#x201D; tokenizers (not powered by the <em>tokenizers</em> library), so the tokenizer will not be able to be loaded in the corresponding &#x201C;slow&#x201D; tokenizer.</p> <p>If <code>True</code>, will save the tokenizer in legacy format. If the &#x201C;slow&#x201D; tokenizer doesn&#x2019;t exits, a value error is raised. filename_prefix &#x2014; (<code>str</code>, <em>optional</em>): A prefix to add to the names of the files saved by the tokenizer.`,name:"legacy_format"},{anchor:"transformers.PreTrainedTokenizerBase.save_pretrained.push_to_hub",description:`<strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with <code>repo_id</code> (will default to the name of <code>save_directory</code> in your namespace). kwargs &#x2014; Additional key word arguments passed along to the <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.push_to_hub">push_to_hub()</a> method.`,name:"push_to_hub"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L2020",returnDescription:` <p>The files saved.</p> `,returnType:` <p>A tuple of <code>str</code></p> `}}),Ft=new E({props:{name:"save_vocabulary",anchor:"transformers.PreTrainedTokenizerBase.save_vocabulary",parameters:[{name:"save_directory",val:": str"},{name:"filename_prefix",val:": typing.Optional[str] = None"}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.save_vocabulary.save_directory",description:`<strong>save_directory</strong> (<code>str</code>) &#x2014; The directory in which to save the vocabulary.`,name:"save_directory"},{anchor:"transformers.PreTrainedTokenizerBase.save_vocabulary.filename_prefix",description:`<strong>filename_prefix</strong> (<code>str</code>, <em>optional</em>) &#x2014; An optional prefix to add to the named of the saved files.`,name:"filename_prefix"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L2182",returnDescription:` <p>Paths to the files saved.</p> `,returnType:` <p><code>Tuple(str)</code></p> `}}),Ct=new E({props:{name:"tokenize",anchor:"transformers.PreTrainedTokenizerBase.tokenize",parameters:[{name:"text",val:": str"},{name:"pair",val:": typing.Optional[str] = None"},{name:"add_special_tokens",val:": bool = False"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.tokenize.text",description:`<strong>text</strong> (<code>str</code>) &#x2014; The sequence to be encoded.`,name:"text"},{anchor:"transformers.PreTrainedTokenizerBase.tokenize.pair",description:`<strong>pair</strong> (<code>str</code>, <em>optional</em>) &#x2014; A second sequence to be encoded with the first.`,name:"pair"},{anchor:"transformers.PreTrainedTokenizerBase.tokenize.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to add the special tokens associated with the corresponding model.`,name:"add_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.tokenize.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific encode method. See details in <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__"><strong>call</strong>()</a>`,name:"kwargs"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L2200",returnDescription:` <p>The list of tokens.</p> `,returnType:` <p><code>List[str]</code></p> `}}),Wt=new E({props:{name:"truncate_sequences",anchor:"transformers.PreTrainedTokenizerBase.truncate_sequences",parameters:[{name:"ids",val:": typing.List[int]"},{name:"pair_ids",val:": typing.Optional[typing.List[int]] = None"},{name:"num_tokens_to_remove",val:": int = 0"},{name:"truncation_strategy",val:": typing.Union[str, transformers.tokenization_utils_base.TruncationStrategy] = 'longest_first'"},{name:"stride",val:": int = 0"}],parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.truncate_sequences.ids",description:`<strong>ids</strong> (<code>List[int]</code>) &#x2014; Tokenized input ids of the first sequence. Can be obtained from a string by chaining the <code>tokenize</code> and <code>convert_tokens_to_ids</code> methods.`,name:"ids"},{anchor:"transformers.PreTrainedTokenizerBase.truncate_sequences.pair_ids",description:`<strong>pair_ids</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Tokenized input ids of the second sequence. Can be obtained from a string by chaining the <code>tokenize</code> and <code>convert_tokens_to_ids</code> methods.`,name:"pair_ids"},{anchor:"transformers.PreTrainedTokenizerBase.truncate_sequences.num_tokens_to_remove",description:`<strong>num_tokens_to_remove</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Number of tokens to remove using the truncation strategy.`,name:"num_tokens_to_remove"},{anchor:"transformers.PreTrainedTokenizerBase.truncate_sequences.truncation_strategy",description:`<strong>truncation_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; The strategy to follow for truncation. Can be:</p> <ul> <li><code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation_strategy"},{anchor:"transformers.PreTrainedTokenizerBase.truncate_sequences.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a positive number, the overflowing tokens returned will contain some tokens from the main sequence returned. The value of this argument defines the number of additional tokens.`,name:"stride"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3159",returnDescription:` <p>The truncated <code>ids</code>, the truncated <code>pair_ids</code> and the list of overflowing tokens. Note: The <em>longest_first</em> strategy returns empty list of overflowing tokens if a pair of sequences (or a batch of pairs) is provided.</p> `,returnType:` <p><code>Tuple[List[int], List[int], List[int]]</code></p> `}}),jt=new vs({}),Rt=new E({props:{name:"class transformers.SpecialTokensMixin",anchor:"transformers.SpecialTokensMixin",parameters:[{name:"verbose",val:" = True"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"transformers.SpecialTokensMixin.bos_token",description:`<strong>bos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the beginning of a sentence.`,name:"bos_token"},{anchor:"transformers.SpecialTokensMixin.eos_token",description:`<strong>eos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the end of a sentence.`,name:"eos_token"},{anchor:"transformers.SpecialTokensMixin.unk_token",description:`<strong>unk_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing an out-of-vocabulary token.`,name:"unk_token"},{anchor:"transformers.SpecialTokensMixin.sep_token",description:`<strong>sep_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token separating two different sentences in the same input (used by BERT for instance).`,name:"sep_token"},{anchor:"transformers.SpecialTokensMixin.pad_token",description:`<strong>pad_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation.`,name:"pad_token"},{anchor:"transformers.SpecialTokensMixin.cls_token",description:`<strong>cls_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the class of the input (used by BERT for instance).`,name:"cls_token"},{anchor:"transformers.SpecialTokensMixin.mask_token",description:`<strong>mask_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing a masked token (used by masked-language modeling pretraining objectives, like BERT).`,name:"mask_token"},{anchor:"transformers.SpecialTokensMixin.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (tuple or list of <code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A tuple or a list of additional special tokens.`,name:"additional_special_tokens"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L763"}}),Ut=new E({props:{name:"add_special_tokens",anchor:"transformers.SpecialTokensMixin.add_special_tokens",parameters:[{name:"special_tokens_dict",val:": typing.Dict[str, typing.Union[str, tokenizers.AddedToken]]"}],parametersDescription:[{anchor:"transformers.SpecialTokensMixin.add_special_tokens.special_tokens_dict",description:`<strong>special_tokens_dict</strong> (dictionary <em>str</em> to <em>str</em> or <code>tokenizers.AddedToken</code>) &#x2014; Keys should be in the list of predefined special attributes: [<code>bos_token</code>, <code>eos_token</code>, <code>unk_token</code>, <code>sep_token</code>, <code>pad_token</code>, <code>cls_token</code>, <code>mask_token</code>, <code>additional_special_tokens</code>].</p> <p>Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the <code>unk_token</code> to them).`,name:"special_tokens_dict"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L843",returnDescription:` <p>Number of tokens added to the vocabulary.</p> `,returnType:` <p><code>int</code></p> `}}),Ze=new Ts({props:{anchor:"transformers.SpecialTokensMixin.add_special_tokens.example",$$slots:{default:[ih]},$$scope:{ctx:q}}}),Xt=new E({props:{name:"add_tokens",anchor:"transformers.SpecialTokensMixin.add_tokens",parameters:[{name:"new_tokens",val:": typing.Union[str, tokenizers.AddedToken, typing.List[typing.Union[str, tokenizers.AddedToken]]]"},{name:"special_tokens",val:": bool = False"}],parametersDescription:[{anchor:"transformers.SpecialTokensMixin.add_tokens.new_tokens",description:`<strong>new_tokens</strong> (<code>str</code>, <code>tokenizers.AddedToken</code> or a list of <em>str</em> or <code>tokenizers.AddedToken</code>) &#x2014; Tokens are only added if they are not already in the vocabulary. <code>tokenizers.AddedToken</code> wraps a string token to let you personalize its behavior: whether this token should only match against a single word, whether this token should strip all potential whitespaces on the left side, whether this token should strip all potential whitespaces on the right side, etc.`,name:"new_tokens"},{anchor:"transformers.SpecialTokensMixin.add_tokens.special_tokens",description:`<strong>special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Can be used to specify if the token is a special token. This mostly change the normalization behavior (special tokens like CLS or [MASK] are usually not lower-cased for instance).</p> <p>See details for <code>tokenizers.AddedToken</code> in HuggingFace tokenizers library.`,name:"special_tokens"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L915",returnDescription:` <p>Number of tokens added to the vocabulary.</p> `,returnType:` <p><code>int</code></p> `}}),et=new Ts({props:{anchor:"transformers.SpecialTokensMixin.add_tokens.example",$$slots:{default:[dh]},$$scope:{ctx:q}}}),Jt=new E({props:{name:"sanitize_special_tokens",anchor:"transformers.SpecialTokensMixin.sanitize_special_tokens",parameters:[],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L831",returnDescription:` <p>The number of tokens added in the vocabulary during the operation.</p> `,returnType:` <p><code>int</code></p> `}}),Kt=new vs({}),Qt=new E({props:{name:"class transformers.tokenization_utils_base.TruncationStrategy",anchor:"transformers.tokenization_utils_base.TruncationStrategy",parameters:[{name:"value",val:""},{name:"names",val:" = None"},{name:"module",val:" = None"},{name:"qualname",val:" = None"},{name:"type",val:" = None"},{name:"start",val:" = 1"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L121"}}),Zt=new E({props:{name:"class transformers.CharSpan",anchor:"transformers.CharSpan",parameters:[{name:"start",val:": int"},{name:"end",val:": int"}],parametersDescription:[{anchor:"transformers.CharSpan.start",description:"<strong>start</strong> (<code>int</code>) &#x2014; Index of the first character in the original string.",name:"start"},{anchor:"transformers.CharSpan.end",description:"<strong>end</strong> (<code>int</code>) &#x2014; Index of the character following the last character in the original string.",name:"end"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L133"}}),eo=new E({props:{name:"class transformers.TokenSpan",anchor:"transformers.TokenSpan",parameters:[{name:"start",val:": int"},{name:"end",val:": int"}],parametersDescription:[{anchor:"transformers.TokenSpan.start",description:"<strong>start</strong> (<code>int</code>) &#x2014; Index of the first token in the span.",name:"start"},{anchor:"transformers.TokenSpan.end",description:"<strong>end</strong> (<code>int</code>) &#x2014; Index of the token following the last token in the span.",name:"end"}],source:"https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L146"}}),{c(){p=r("meta"),$=d(),f=r("h1"),h=r("a"),x=r("span"),k(c.$$.fragment),_=d(),L=r("span"),ge=o("Utilities for Tokenizers"),j=d(),D=r("p"),ro=o(`This page lists all the utility functions used by the tokenizers, mainly the class `),so=r("a"),ws=o("PreTrainedTokenizerBase"),xs=o(` that implements the common methods between `),ao=r("a"),$s=o("PreTrainedTokenizer"),Ps=o(" and "),io=r("a"),Es=o("PreTrainedTokenizerFast"),qs=o(` and the mixin `),lo=r("a"),Bs=o("SpecialTokensMixin"),Ls=o("."),Dr=d(),co=r("p"),Ds=o("Most of those are only useful if you are studying the code of the tokenizers in the library."),Ir=d(),ke=r("h2"),Ie=r("a"),Oo=r("span"),k(at.$$.fragment),Is=d(),Wo=r("span"),Ns=o("PreTrainedTokenizerBase"),Nr=d(),u=r("div"),k(it.$$.fragment),As=d(),be=r("p"),Fs=o("Base class for "),po=r("a"),Ss=o("PreTrainedTokenizer"),Cs=o(" and "),mo=r("a"),Os=o("PreTrainedTokenizerFast"),Ws=o("."),js=d(),jo=r("p"),Rs=o("Handles shared (mostly boiler plate) methods for those two classes."),Us=d(),Ro=r("p"),Ms=o("Class attributes (overridden by derived classes)"),Vs=d(),I=r("ul"),Z=r("li"),Uo=r("strong"),Gs=o("vocab_files_names"),Hs=o(" ("),Mo=r("code"),Xs=o("Dict[str, str]"),Ys=o(") \u2014 A dictionary with, as keys, the "),Vo=r("code"),Js=o("__init__"),Ks=o(` keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string).`),Qs=d(),F=r("li"),Go=r("strong"),Zs=o("pretrained_vocab_files_map"),ea=o(" ("),Ho=r("code"),ta=o("Dict[str, Dict[str, str]]"),oa=o(`) \u2014 A dictionary of dictionaries, with the high-level keys being the `),Xo=r("code"),na=o("__init__"),ra=o(` keyword name of each vocabulary file required by the model, the low-level being the `),Yo=r("code"),sa=o("short-cut-names"),aa=o(" of the pretrained models with, as associated values, the "),Jo=r("code"),ia=o("url"),da=o(` to the associated pretrained vocabulary file.`),la=d(),V=r("li"),Ko=r("strong"),ca=o("max_model_input_sizes"),pa=o(" ("),Qo=r("code"),ma=o("Dict[str, Optional[int]]"),ha=o(") \u2014 A dictionary with, as keys, the "),Zo=r("code"),ua=o("short-cut-names"),fa=o(` of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or `),en=r("code"),_a=o("None"),ga=o(" if the model has no maximum input size."),ka=d(),S=r("li"),tn=r("strong"),ba=o("pretrained_init_configuration"),va=o(" ("),on=r("code"),Ta=o("Dict[str, Dict[str, Any]]"),ya=o(`) \u2014 A dictionary with, as keys, the `),nn=r("code"),za=o("short-cut-names"),wa=o(` of the pretrained models, and as associated values, a dictionary of specific arguments to pass to the `),rn=r("code"),xa=o("__init__"),$a=o(` method of the tokenizer class for this pretrained model when loading the tokenizer with the `),ho=r("a"),Pa=o("from_pretrained()"),Ea=o(" method."),qa=d(),Ne=r("li"),sn=r("strong"),Ba=o("model_input_names"),La=o(" ("),an=r("code"),Da=o("List[str]"),Ia=o(") \u2014 A list of inputs expected in the forward pass of the model."),Na=d(),G=r("li"),dn=r("strong"),Aa=o("padding_side"),Fa=o(" ("),ln=r("code"),Sa=o("str"),Ca=o(`) \u2014 The default value for the side on which the model should have padding applied. Should be `),cn=r("code"),Oa=o("'right'"),Wa=o(" or "),pn=r("code"),ja=o("'left'"),Ra=o("."),Ua=d(),H=r("li"),mn=r("strong"),Ma=o("truncation_side"),Va=o(" ("),hn=r("code"),Ga=o("str"),Ha=o(`) \u2014 The default value for the side on which the model should have truncation applied. Should be `),un=r("code"),Xa=o("'right'"),Ya=o(" or "),fn=r("code"),Ja=o("'left'"),Ka=o("."),Qa=d(),Ae=r("div"),k(dt.$$.fragment),Za=d(),_n=r("p"),ei=o(`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences.`),ti=d(),Fe=r("div"),k(lt.$$.fragment),oi=d(),gn=r("p"),ni=o(`Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to sequence-to-sequence models that need a slightly different processing for the labels.`),ri=d(),Se=r("div"),k(ct.$$.fragment),si=d(),kn=r("p"),ai=o("Convert a list of lists of token ids into a list of strings by calling decode."),ii=d(),ee=r("div"),k(pt.$$.fragment),di=d(),bn=r("p"),li=o("Tokenize and prepare for the model a list of sequences or a list of pairs of sequences."),ci=d(),k(Ce.$$.fragment),pi=d(),te=r("div"),k(mt.$$.fragment),mi=d(),vn=r("p"),hi=o(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens.`),ui=d(),Tn=r("p"),fi=o("This implementation does not add special tokens and this method should be overridden in a subclass."),_i=d(),Oe=r("div"),k(ht.$$.fragment),gi=d(),yn=r("p"),ki=o("Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms."),bi=d(),We=r("div"),k(ut.$$.fragment),vi=d(),ft=r("p"),Ti=o("Converts a sequence of tokens in a single string. The most simple way to do it is "),zn=r("code"),yi=o('" ".join(tokens)'),zi=o(` but we often want to remove sub-word tokenization artifacts at the same time.`),wi=d(),oe=r("div"),k(_t.$$.fragment),xi=d(),uo=r("p"),$i=o("Create the token type IDs corresponding to the sequences passed. "),fo=r("a"),Pi=o(`What are token type IDs?`),Ei=d(),wn=r("p"),qi=o("Should be overridden in a subclass if the model has a special way of building those."),Bi=d(),ne=r("div"),k(gt.$$.fragment),Li=d(),xn=r("p"),Di=o(`Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces.`),Ii=d(),kt=r("p"),Ni=o("Similar to doing "),$n=r("code"),Ai=o("self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))"),Fi=o("."),Si=d(),re=r("div"),k(bt.$$.fragment),Ci=d(),Pn=r("p"),Oi=o("Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary."),Wi=d(),vt=r("p"),ji=o("Same as doing "),En=r("code"),Ri=o("self.convert_tokens_to_ids(self.tokenize(text))"),Ui=o("."),Mi=d(),se=r("div"),k(Tt.$$.fragment),Vi=d(),qn=r("p"),Gi=o("Tokenize and prepare for the model a sequence or a pair of sequences."),Hi=d(),k(je.$$.fragment),Xi=d(),X=r("div"),k(yt.$$.fragment),Yi=d(),zt=r("p"),Ji=o("Instantiate a "),_o=r("a"),Ki=o("PreTrainedTokenizerBase"),Qi=o(` (or a derived class) from a predefined tokenizer.`),Zi=d(),k(Re.$$.fragment),ed=d(),k(Ue.$$.fragment),td=d(),Me=r("div"),k(wt.$$.fragment),od=d(),ve=r("p"),nd=o(`Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Bn=r("code"),rd=o("prepare_for_model"),sd=o(" or "),Ln=r("code"),ad=o("encode_plus"),id=o(" methods."),dd=d(),ae=r("div"),k(xt.$$.fragment),ld=d(),Dn=r("p"),cd=o("Returns the vocabulary as a dictionary of token to index."),pd=d(),ie=r("p"),In=r("code"),md=o("tokenizer.get_vocab()[token]"),hd=o(" is equivalent to "),Nn=r("code"),ud=o("tokenizer.convert_tokens_to_ids(token)"),fd=o(" when "),An=r("code"),_d=o("token"),gd=o(` is in the vocab.`),kd=d(),C=r("div"),k($t.$$.fragment),bd=d(),Fn=r("p"),vd=o(`Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length in the batch.`),Td=d(),J=r("p"),yd=o("Padding side (left/right) padding token ids are defined at the tokenizer level (with "),Sn=r("code"),zd=o("self.padding_side"),wd=o(`, `),Cn=r("code"),xd=o("self.pad_token_id"),$d=o(" and "),On=r("code"),Pd=o("self.pad_token_type_id"),Ed=o(")."),qd=d(),Te=r("p"),Bd=o("Please note that with a fast tokenizer, using the "),Wn=r("code"),Ld=o("__call__"),Dd=o(` method is faster than using a method to encode the text followed by a call to the `),jn=r("code"),Id=o("pad"),Nd=o(" method to get a padded encoding."),Ad=d(),k(Ve.$$.fragment),Fd=d(),Ge=r("div"),k(Pt.$$.fragment),Sd=d(),R=r("p"),Cd=o(`Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Please Note, for `),Rn=r("em"),Od=o("pair_ids"),Wd=o(` different than `),Un=r("code"),jd=o("None"),Rd=o(" and "),Mn=r("em"),Ud=o("truncation_strategy = longest_first"),Md=o(" or "),Vn=r("code"),Vd=o("True"),Gd=o(`, it is not possible to return overflowing tokens. Such a combination of arguments will raise an error.`),Hd=d(),He=r("div"),k(Et.$$.fragment),Xd=d(),Gn=r("p"),Yd=o("Prepare model inputs for translation. For best performance, translate one sentence at a time."),Jd=d(),de=r("div"),k(qt.$$.fragment),Kd=d(),Bt=r("p"),Qd=o(`Upload the tokenizer files to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),Hn=r("code"),Zd=o("repo_path_or_name"),el=o("."),tl=d(),k(Xe.$$.fragment),ol=d(),le=r("div"),k(Lt.$$.fragment),nl=d(),Dt=r("p"),rl=o(`Register this class with a given auto class. This should only be used for custom tokenizers as the ones in the library are already mapped with `),Xn=r("code"),sl=o("AutoTokenizer"),al=o("."),il=d(),k(Ye.$$.fragment),dl=d(),Y=r("div"),k(It.$$.fragment),ll=d(),Yn=r("p"),cl=o("Save the full tokenizer state."),pl=d(),Nt=r("p"),ml=o(`This method make sure the full tokenizer can then be re-loaded using the `),Jn=r("code"),hl=o("~tokenization_utils_base.PreTrainedTokenizer.from_pretrained"),ul=o(" class method.."),fl=d(),At=r("p"),_l=o(`Warning,None This won\u2019t save modifications you may have applied to the tokenizer after the instantiation (for instance, modifying `),Kn=r("code"),gl=o("tokenizer.do_lower_case"),kl=o(" after creation)."),bl=d(),ce=r("div"),k(Ft.$$.fragment),vl=d(),Qn=r("p"),Tl=o("Save only the vocabulary of the tokenizer (vocabulary + added tokens)."),yl=d(),St=r("p"),zl=o(`This method won\u2019t save the configuration and special token mappings of the tokenizer. Use `),Zn=r("code"),wl=o("_save_pretrained()"),xl=o(" to save the whole state of the tokenizer."),$l=d(),Je=r("div"),k(Ct.$$.fragment),Pl=d(),Ot=r("p"),El=o("Converts a string in a sequence of tokens, replacing unknown tokens with the "),er=r("code"),ql=o("unk_token"),Bl=o("."),Ll=d(),Ke=r("div"),k(Wt.$$.fragment),Dl=d(),tr=r("p"),Il=o("Truncates a sequence pair in-place following the strategy."),Ar=d(),ye=r("h2"),Qe=r("a"),or=r("span"),k(jt.$$.fragment),Nl=d(),nr=r("span"),Al=o("SpecialTokensMixin"),Fr=d(),A=r("div"),k(Rt.$$.fragment),Fl=d(),ze=r("p"),Sl=o("A mixin derived by "),go=r("a"),Cl=o("PreTrainedTokenizer"),Ol=o(" and "),ko=r("a"),Wl=o("PreTrainedTokenizerFast"),jl=o(` to handle specific behaviors related to special tokens. In particular, this class hold the attributes which can be used to directly access these special tokens in a model-independent manner and allow to set and update the special tokens.`),Rl=d(),B=r("div"),k(Ut.$$.fragment),Ul=d(),rr=r("p"),Ml=o(`Add a dictionary of special tokens (eos, pad, cls, etc.) to the encoder and link them to class attributes. If special tokens are NOT in the vocabulary, they are added to it (indexed starting from the last index of the current vocabulary).`),Vl=d(),sr=r("p"),Gl=o(`Note,None When adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of the model so that its embedding matrix matches the tokenizer.`),Hl=d(),Mt=r("p"),Xl=o("In order to do that, please use the "),bo=r("a"),Yl=o("resize_token_embeddings()"),Jl=o(" method."),Kl=d(),Vt=r("p"),Ql=o("Using "),ar=r("code"),Zl=o("add_special_tokens"),ec=o(" will ensure your special tokens can be used in several ways:"),tc=d(),Gt=r("ul"),ir=r("li"),oc=o("Special tokens are carefully handled by the tokenizer (they are never split)."),nc=d(),Ht=r("li"),rc=o("You can easily refer to special tokens using tokenizer class attributes like "),dr=r("code"),sc=o("tokenizer.cls_token"),ac=o(`. This makes it easy to develop model-agnostic training and fine-tuning scripts.`),ic=d(),U=r("p"),dc=o(`When possible, special tokens are already registered for provided pretrained models (for instance `),vo=r("a"),lc=o("BertTokenizer"),cc=d(),lr=r("code"),pc=o("cls_token"),mc=o(" is already registered to be :obj"),cr=r("em"),hc=o("\u2019[CLS]\u2019"),uc=o(` and XLM\u2019s one is also registered to be `),pr=r("code"),fc=o("'</s>'"),_c=o(")."),gc=d(),k(Ze.$$.fragment),kc=d(),O=r("div"),k(Xt.$$.fragment),bc=d(),mr=r("p"),vc=o(`Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to it with indices starting from length of the current vocabulary and and will be isolated before the tokenization algorithm is applied. Added tokens and tokens from the vocabulary of the tokenization algorithm are therefore not treated in the same way.`),Tc=d(),hr=r("p"),yc=o(`Note, when adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of the model so that its embedding matrix matches the tokenizer.`),zc=d(),Yt=r("p"),wc=o("In order to do that, please use the "),To=r("a"),xc=o("resize_token_embeddings()"),$c=o(" method."),Pc=d(),k(et.$$.fragment),Ec=d(),pe=r("div"),k(Jt.$$.fragment),qc=d(),we=r("p"),Bc=o("Make sure that all the special tokens attributes of the tokenizer ("),ur=r("code"),Lc=o("tokenizer.mask_token"),Dc=o(`, `),fr=r("code"),Ic=o("tokenizer.cls_token"),Nc=o(", etc.) are in the vocabulary."),Ac=d(),_r=r("p"),Fc=o("Add the missing ones to the vocabulary if needed."),Sr=d(),xe=r("h2"),tt=r("a"),gr=r("span"),k(Kt.$$.fragment),Sc=d(),kr=r("span"),Cc=o("Enums and namedtuples"),Cr=d(),$e=r("div"),k(Qt.$$.fragment),Oc=d(),Pe=r("p"),Wc=o("Possible values for the "),br=r("code"),jc=o("truncation"),Rc=o(" argument in "),ot=r("a"),Uc=o("PreTrainedTokenizerBase."),vr=r("strong"),Mc=o("call"),Vc=o("()"),Gc=o(`. Useful for tab-completion in an IDE.`),Or=d(),Ee=r("div"),k(Zt.$$.fragment),Hc=d(),Tr=r("p"),Xc=o("Character span in the original string."),Wr=d(),qe=r("div"),k(eo.$$.fragment),Yc=d(),yr=r("p"),Jc=o("Token span in an encoded string (list of tokens)."),this.h()},l(i){const w=Qm('[data-svelte="svelte-1phssyn"]',document.head);p=s(w,"META",{name:!0,content:!0}),w.forEach(t),$=l(i),f=s(i,"H1",{class:!0});var to=a(f);h=s(to,"A",{id:!0,class:!0,href:!0});var zr=a(h);x=s(zr,"SPAN",{});var wr=a(x);b(c.$$.fragment,wr),wr.forEach(t),zr.forEach(t),_=l(to),L=s(to,"SPAN",{});var xr=a(L);ge=n(xr,"Utilities for Tokenizers"),xr.forEach(t),to.forEach(t),j=l(i),D=s(i,"P",{});var M=a(D);ro=n(M,`This page lists all the utility functions used by the tokenizers, mainly the class `),so=s(M,"A",{href:!0});var $r=a(so);ws=n($r,"PreTrainedTokenizerBase"),$r.forEach(t),xs=n(M,` that implements the common methods between `),ao=s(M,"A",{href:!0});var Pr=a(ao);$s=n(Pr,"PreTrainedTokenizer"),Pr.forEach(t),Ps=n(M," and "),io=s(M,"A",{href:!0});var Er=a(io);Es=n(Er,"PreTrainedTokenizerFast"),Er.forEach(t),qs=n(M,` and the mixin `),lo=s(M,"A",{href:!0});var qr=a(lo);Bs=n(qr,"SpecialTokensMixin"),qr.forEach(t),Ls=n(M,"."),M.forEach(t),Dr=l(i),co=s(i,"P",{});var Qc=a(co);Ds=n(Qc,"Most of those are only useful if you are studying the code of the tokenizers in the library."),Qc.forEach(t),Ir=l(i),ke=s(i,"H2",{class:!0});var Rr=a(ke);Ie=s(Rr,"A",{id:!0,class:!0,href:!0});var Zc=a(Ie);Oo=s(Zc,"SPAN",{});var ep=a(Oo);b(at.$$.fragment,ep),ep.forEach(t),Zc.forEach(t),Is=l(Rr),Wo=s(Rr,"SPAN",{});var tp=a(Wo);Ns=n(tp,"PreTrainedTokenizerBase"),tp.forEach(t),Rr.forEach(t),Nr=l(i),u=s(i,"DIV",{class:!0});var g=a(u);b(it.$$.fragment,g),As=l(g),be=s(g,"P",{});var yo=a(be);Fs=n(yo,"Base class for "),po=s(yo,"A",{href:!0});var op=a(po);Ss=n(op,"PreTrainedTokenizer"),op.forEach(t),Cs=n(yo," and "),mo=s(yo,"A",{href:!0});var np=a(mo);Os=n(np,"PreTrainedTokenizerFast"),np.forEach(t),Ws=n(yo,"."),yo.forEach(t),js=l(g),jo=s(g,"P",{});var rp=a(jo);Rs=n(rp,"Handles shared (mostly boiler plate) methods for those two classes."),rp.forEach(t),Us=l(g),Ro=s(g,"P",{});var sp=a(Ro);Ms=n(sp,"Class attributes (overridden by derived classes)"),sp.forEach(t),Vs=l(g),I=s(g,"UL",{});var W=a(I);Z=s(W,"LI",{});var oo=a(Z);Uo=s(oo,"STRONG",{});var ap=a(Uo);Gs=n(ap,"vocab_files_names"),ap.forEach(t),Hs=n(oo," ("),Mo=s(oo,"CODE",{});var ip=a(Mo);Xs=n(ip,"Dict[str, str]"),ip.forEach(t),Ys=n(oo,") \u2014 A dictionary with, as keys, the "),Vo=s(oo,"CODE",{});var dp=a(Vo);Js=n(dp,"__init__"),dp.forEach(t),Ks=n(oo,` keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string).`),oo.forEach(t),Qs=l(W),F=s(W,"LI",{});var K=a(F);Go=s(K,"STRONG",{});var lp=a(Go);Zs=n(lp,"pretrained_vocab_files_map"),lp.forEach(t),ea=n(K," ("),Ho=s(K,"CODE",{});var cp=a(Ho);ta=n(cp,"Dict[str, Dict[str, str]]"),cp.forEach(t),oa=n(K,`) \u2014 A dictionary of dictionaries, with the high-level keys being the `),Xo=s(K,"CODE",{});var pp=a(Xo);na=n(pp,"__init__"),pp.forEach(t),ra=n(K,` keyword name of each vocabulary file required by the model, the low-level being the `),Yo=s(K,"CODE",{});var mp=a(Yo);sa=n(mp,"short-cut-names"),mp.forEach(t),aa=n(K," of the pretrained models with, as associated values, the "),Jo=s(K,"CODE",{});var hp=a(Jo);ia=n(hp,"url"),hp.forEach(t),da=n(K,` to the associated pretrained vocabulary file.`),K.forEach(t),la=l(W),V=s(W,"LI",{});var Be=a(V);Ko=s(Be,"STRONG",{});var up=a(Ko);ca=n(up,"max_model_input_sizes"),up.forEach(t),pa=n(Be," ("),Qo=s(Be,"CODE",{});var fp=a(Qo);ma=n(fp,"Dict[str, Optional[int]]"),fp.forEach(t),ha=n(Be,") \u2014 A dictionary with, as keys, the "),Zo=s(Be,"CODE",{});var _p=a(Zo);ua=n(_p,"short-cut-names"),_p.forEach(t),fa=n(Be,` of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or `),en=s(Be,"CODE",{});var gp=a(en);_a=n(gp,"None"),gp.forEach(t),ga=n(Be," if the model has no maximum input size."),Be.forEach(t),ka=l(W),S=s(W,"LI",{});var Q=a(S);tn=s(Q,"STRONG",{});var kp=a(tn);ba=n(kp,"pretrained_init_configuration"),kp.forEach(t),va=n(Q," ("),on=s(Q,"CODE",{});var bp=a(on);Ta=n(bp,"Dict[str, Dict[str, Any]]"),bp.forEach(t),ya=n(Q,`) \u2014 A dictionary with, as keys, the `),nn=s(Q,"CODE",{});var vp=a(nn);za=n(vp,"short-cut-names"),vp.forEach(t),wa=n(Q,` of the pretrained models, and as associated values, a dictionary of specific arguments to pass to the `),rn=s(Q,"CODE",{});var Tp=a(rn);xa=n(Tp,"__init__"),Tp.forEach(t),$a=n(Q,` method of the tokenizer class for this pretrained model when loading the tokenizer with the `),ho=s(Q,"A",{href:!0});var yp=a(ho);Pa=n(yp,"from_pretrained()"),yp.forEach(t),Ea=n(Q," method."),Q.forEach(t),qa=l(W),Ne=s(W,"LI",{});var Br=a(Ne);sn=s(Br,"STRONG",{});var zp=a(sn);Ba=n(zp,"model_input_names"),zp.forEach(t),La=n(Br," ("),an=s(Br,"CODE",{});var wp=a(an);Da=n(wp,"List[str]"),wp.forEach(t),Ia=n(Br,") \u2014 A list of inputs expected in the forward pass of the model."),Br.forEach(t),Na=l(W),G=s(W,"LI",{});var Le=a(G);dn=s(Le,"STRONG",{});var xp=a(dn);Aa=n(xp,"padding_side"),xp.forEach(t),Fa=n(Le," ("),ln=s(Le,"CODE",{});var $p=a(ln);Sa=n($p,"str"),$p.forEach(t),Ca=n(Le,`) \u2014 The default value for the side on which the model should have padding applied. Should be `),cn=s(Le,"CODE",{});var Pp=a(cn);Oa=n(Pp,"'right'"),Pp.forEach(t),Wa=n(Le," or "),pn=s(Le,"CODE",{});var Ep=a(pn);ja=n(Ep,"'left'"),Ep.forEach(t),Ra=n(Le,"."),Le.forEach(t),Ua=l(W),H=s(W,"LI",{});var De=a(H);mn=s(De,"STRONG",{});var qp=a(mn);Ma=n(qp,"truncation_side"),qp.forEach(t),Va=n(De," ("),hn=s(De,"CODE",{});var Bp=a(hn);Ga=n(Bp,"str"),Bp.forEach(t),Ha=n(De,`) \u2014 The default value for the side on which the model should have truncation applied. Should be `),un=s(De,"CODE",{});var Lp=a(un);Xa=n(Lp,"'right'"),Lp.forEach(t),Ya=n(De," or "),fn=s(De,"CODE",{});var Dp=a(fn);Ja=n(Dp,"'left'"),Dp.forEach(t),Ka=n(De,"."),De.forEach(t),W.forEach(t),Qa=l(g),Ae=s(g,"DIV",{class:!0});var Ur=a(Ae);b(dt.$$.fragment,Ur),Za=l(Ur),_n=s(Ur,"P",{});var Ip=a(_n);ei=n(Ip,`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences.`),Ip.forEach(t),Ur.forEach(t),ti=l(g),Fe=s(g,"DIV",{class:!0});var Mr=a(Fe);b(lt.$$.fragment,Mr),oi=l(Mr),gn=s(Mr,"P",{});var Np=a(gn);ni=n(Np,`Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to sequence-to-sequence models that need a slightly different processing for the labels.`),Np.forEach(t),Mr.forEach(t),ri=l(g),Se=s(g,"DIV",{class:!0});var Vr=a(Se);b(ct.$$.fragment,Vr),si=l(Vr),kn=s(Vr,"P",{});var Ap=a(kn);ai=n(Ap,"Convert a list of lists of token ids into a list of strings by calling decode."),Ap.forEach(t),Vr.forEach(t),ii=l(g),ee=s(g,"DIV",{class:!0});var zo=a(ee);b(pt.$$.fragment,zo),di=l(zo),bn=s(zo,"P",{});var Fp=a(bn);li=n(Fp,"Tokenize and prepare for the model a list of sequences or a list of pairs of sequences."),Fp.forEach(t),ci=l(zo),b(Ce.$$.fragment,zo),zo.forEach(t),pi=l(g),te=s(g,"DIV",{class:!0});var wo=a(te);b(mt.$$.fragment,wo),mi=l(wo),vn=s(wo,"P",{});var Sp=a(vn);hi=n(Sp,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens.`),Sp.forEach(t),ui=l(wo),Tn=s(wo,"P",{});var Cp=a(Tn);fi=n(Cp,"This implementation does not add special tokens and this method should be overridden in a subclass."),Cp.forEach(t),wo.forEach(t),_i=l(g),Oe=s(g,"DIV",{class:!0});var Gr=a(Oe);b(ht.$$.fragment,Gr),gi=l(Gr),yn=s(Gr,"P",{});var Op=a(yn);ki=n(Op,"Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms."),Op.forEach(t),Gr.forEach(t),bi=l(g),We=s(g,"DIV",{class:!0});var Hr=a(We);b(ut.$$.fragment,Hr),vi=l(Hr),ft=s(Hr,"P",{});var Xr=a(ft);Ti=n(Xr,"Converts a sequence of tokens in a single string. The most simple way to do it is "),zn=s(Xr,"CODE",{});var Wp=a(zn);yi=n(Wp,'" ".join(tokens)'),Wp.forEach(t),zi=n(Xr,` but we often want to remove sub-word tokenization artifacts at the same time.`),Xr.forEach(t),Hr.forEach(t),wi=l(g),oe=s(g,"DIV",{class:!0});var xo=a(oe);b(_t.$$.fragment,xo),xi=l(xo),uo=s(xo,"P",{});var Kc=a(uo);$i=n(Kc,"Create the token type IDs corresponding to the sequences passed. "),fo=s(Kc,"A",{href:!0});var jp=a(fo);Pi=n(jp,`What are token type IDs?`),jp.forEach(t),Kc.forEach(t),Ei=l(xo),wn=s(xo,"P",{});var Rp=a(wn);qi=n(Rp,"Should be overridden in a subclass if the model has a special way of building those."),Rp.forEach(t),xo.forEach(t),Bi=l(g),ne=s(g,"DIV",{class:!0});var $o=a(ne);b(gt.$$.fragment,$o),Li=l($o),xn=s($o,"P",{});var Up=a(xn);Di=n(Up,`Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces.`),Up.forEach(t),Ii=l($o),kt=s($o,"P",{});var Yr=a(kt);Ni=n(Yr,"Similar to doing "),$n=s(Yr,"CODE",{});var Mp=a($n);Ai=n(Mp,"self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))"),Mp.forEach(t),Fi=n(Yr,"."),Yr.forEach(t),$o.forEach(t),Si=l(g),re=s(g,"DIV",{class:!0});var Po=a(re);b(bt.$$.fragment,Po),Ci=l(Po),Pn=s(Po,"P",{});var Vp=a(Pn);Oi=n(Vp,"Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary."),Vp.forEach(t),Wi=l(Po),vt=s(Po,"P",{});var Jr=a(vt);ji=n(Jr,"Same as doing "),En=s(Jr,"CODE",{});var Gp=a(En);Ri=n(Gp,"self.convert_tokens_to_ids(self.tokenize(text))"),Gp.forEach(t),Ui=n(Jr,"."),Jr.forEach(t),Po.forEach(t),Mi=l(g),se=s(g,"DIV",{class:!0});var Eo=a(se);b(Tt.$$.fragment,Eo),Vi=l(Eo),qn=s(Eo,"P",{});var Hp=a(qn);Gi=n(Hp,"Tokenize and prepare for the model a sequence or a pair of sequences."),Hp.forEach(t),Hi=l(Eo),b(je.$$.fragment,Eo),Eo.forEach(t),Xi=l(g),X=s(g,"DIV",{class:!0});var nt=a(X);b(yt.$$.fragment,nt),Yi=l(nt),zt=s(nt,"P",{});var Kr=a(zt);Ji=n(Kr,"Instantiate a "),_o=s(Kr,"A",{href:!0});var Xp=a(_o);Ki=n(Xp,"PreTrainedTokenizerBase"),Xp.forEach(t),Qi=n(Kr,` (or a derived class) from a predefined tokenizer.`),Kr.forEach(t),Zi=l(nt),b(Re.$$.fragment,nt),ed=l(nt),b(Ue.$$.fragment,nt),nt.forEach(t),td=l(g),Me=s(g,"DIV",{class:!0});var Qr=a(Me);b(wt.$$.fragment,Qr),od=l(Qr),ve=s(Qr,"P",{});var qo=a(ve);nd=n(qo,`Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),Bn=s(qo,"CODE",{});var Yp=a(Bn);rd=n(Yp,"prepare_for_model"),Yp.forEach(t),sd=n(qo," or "),Ln=s(qo,"CODE",{});var Jp=a(Ln);ad=n(Jp,"encode_plus"),Jp.forEach(t),id=n(qo," methods."),qo.forEach(t),Qr.forEach(t),dd=l(g),ae=s(g,"DIV",{class:!0});var Bo=a(ae);b(xt.$$.fragment,Bo),ld=l(Bo),Dn=s(Bo,"P",{});var Kp=a(Dn);cd=n(Kp,"Returns the vocabulary as a dictionary of token to index."),Kp.forEach(t),pd=l(Bo),ie=s(Bo,"P",{});var no=a(ie);In=s(no,"CODE",{});var Qp=a(In);md=n(Qp,"tokenizer.get_vocab()[token]"),Qp.forEach(t),hd=n(no," is equivalent to "),Nn=s(no,"CODE",{});var Zp=a(Nn);ud=n(Zp,"tokenizer.convert_tokens_to_ids(token)"),Zp.forEach(t),fd=n(no," when "),An=s(no,"CODE",{});var em=a(An);_d=n(em,"token"),em.forEach(t),gd=n(no,` is in the vocab.`),no.forEach(t),Bo.forEach(t),kd=l(g),C=s(g,"DIV",{class:!0});var me=a(C);b($t.$$.fragment,me),bd=l(me),Fn=s(me,"P",{});var tm=a(Fn);vd=n(tm,`Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length in the batch.`),tm.forEach(t),Td=l(me),J=s(me,"P",{});var rt=a(J);yd=n(rt,"Padding side (left/right) padding token ids are defined at the tokenizer level (with "),Sn=s(rt,"CODE",{});var om=a(Sn);zd=n(om,"self.padding_side"),om.forEach(t),wd=n(rt,`, `),Cn=s(rt,"CODE",{});var nm=a(Cn);xd=n(nm,"self.pad_token_id"),nm.forEach(t),$d=n(rt," and "),On=s(rt,"CODE",{});var rm=a(On);Pd=n(rm,"self.pad_token_type_id"),rm.forEach(t),Ed=n(rt,")."),rt.forEach(t),qd=l(me),Te=s(me,"P",{});var Lo=a(Te);Bd=n(Lo,"Please note that with a fast tokenizer, using the "),Wn=s(Lo,"CODE",{});var sm=a(Wn);Ld=n(sm,"__call__"),sm.forEach(t),Dd=n(Lo,` method is faster than using a method to encode the text followed by a call to the `),jn=s(Lo,"CODE",{});var am=a(jn);Id=n(am,"pad"),am.forEach(t),Nd=n(Lo," method to get a padded encoding."),Lo.forEach(t),Ad=l(me),b(Ve.$$.fragment,me),me.forEach(t),Fd=l(g),Ge=s(g,"DIV",{class:!0});var Zr=a(Ge);b(Pt.$$.fragment,Zr),Sd=l(Zr),R=s(Zr,"P",{});var he=a(R);Cd=n(he,`Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Please Note, for `),Rn=s(he,"EM",{});var im=a(Rn);Od=n(im,"pair_ids"),im.forEach(t),Wd=n(he,` different than `),Un=s(he,"CODE",{});var dm=a(Un);jd=n(dm,"None"),dm.forEach(t),Rd=n(he," and "),Mn=s(he,"EM",{});var lm=a(Mn);Ud=n(lm,"truncation_strategy = longest_first"),lm.forEach(t),Md=n(he," or "),Vn=s(he,"CODE",{});var cm=a(Vn);Vd=n(cm,"True"),cm.forEach(t),Gd=n(he,`, it is not possible to return overflowing tokens. Such a combination of arguments will raise an error.`),he.forEach(t),Zr.forEach(t),Hd=l(g),He=s(g,"DIV",{class:!0});var es=a(He);b(Et.$$.fragment,es),Xd=l(es),Gn=s(es,"P",{});var pm=a(Gn);Yd=n(pm,"Prepare model inputs for translation. For best performance, translate one sentence at a time."),pm.forEach(t),es.forEach(t),Jd=l(g),de=s(g,"DIV",{class:!0});var Do=a(de);b(qt.$$.fragment,Do),Kd=l(Do),Bt=s(Do,"P",{});var ts=a(Bt);Qd=n(ts,`Upload the tokenizer files to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),Hn=s(ts,"CODE",{});var mm=a(Hn);Zd=n(mm,"repo_path_or_name"),mm.forEach(t),el=n(ts,"."),ts.forEach(t),tl=l(Do),b(Xe.$$.fragment,Do),Do.forEach(t),ol=l(g),le=s(g,"DIV",{class:!0});var Io=a(le);b(Lt.$$.fragment,Io),nl=l(Io),Dt=s(Io,"P",{});var os=a(Dt);rl=n(os,`Register this class with a given auto class. This should only be used for custom tokenizers as the ones in the library are already mapped with `),Xn=s(os,"CODE",{});var hm=a(Xn);sl=n(hm,"AutoTokenizer"),hm.forEach(t),al=n(os,"."),os.forEach(t),il=l(Io),b(Ye.$$.fragment,Io),Io.forEach(t),dl=l(g),Y=s(g,"DIV",{class:!0});var st=a(Y);b(It.$$.fragment,st),ll=l(st),Yn=s(st,"P",{});var um=a(Yn);cl=n(um,"Save the full tokenizer state."),um.forEach(t),pl=l(st),Nt=s(st,"P",{});var ns=a(Nt);ml=n(ns,`This method make sure the full tokenizer can then be re-loaded using the `),Jn=s(ns,"CODE",{});var fm=a(Jn);hl=n(fm,"~tokenization_utils_base.PreTrainedTokenizer.from_pretrained"),fm.forEach(t),ul=n(ns," class method.."),ns.forEach(t),fl=l(st),At=s(st,"P",{});var rs=a(At);_l=n(rs,`Warning,None This won\u2019t save modifications you may have applied to the tokenizer after the instantiation (for instance, modifying `),Kn=s(rs,"CODE",{});var _m=a(Kn);gl=n(_m,"tokenizer.do_lower_case"),_m.forEach(t),kl=n(rs," after creation)."),rs.forEach(t),st.forEach(t),bl=l(g),ce=s(g,"DIV",{class:!0});var No=a(ce);b(Ft.$$.fragment,No),vl=l(No),Qn=s(No,"P",{});var gm=a(Qn);Tl=n(gm,"Save only the vocabulary of the tokenizer (vocabulary + added tokens)."),gm.forEach(t),yl=l(No),St=s(No,"P",{});var ss=a(St);zl=n(ss,`This method won\u2019t save the configuration and special token mappings of the tokenizer. Use `),Zn=s(ss,"CODE",{});var km=a(Zn);wl=n(km,"_save_pretrained()"),km.forEach(t),xl=n(ss," to save the whole state of the tokenizer."),ss.forEach(t),No.forEach(t),$l=l(g),Je=s(g,"DIV",{class:!0});var as=a(Je);b(Ct.$$.fragment,as),Pl=l(as),Ot=s(as,"P",{});var is=a(Ot);El=n(is,"Converts a string in a sequence of tokens, replacing unknown tokens with the "),er=s(is,"CODE",{});var bm=a(er);ql=n(bm,"unk_token"),bm.forEach(t),Bl=n(is,"."),is.forEach(t),as.forEach(t),Ll=l(g),Ke=s(g,"DIV",{class:!0});var ds=a(Ke);b(Wt.$$.fragment,ds),Dl=l(ds),tr=s(ds,"P",{});var vm=a(tr);Il=n(vm,"Truncates a sequence pair in-place following the strategy."),vm.forEach(t),ds.forEach(t),g.forEach(t),Ar=l(i),ye=s(i,"H2",{class:!0});var ls=a(ye);Qe=s(ls,"A",{id:!0,class:!0,href:!0});var Tm=a(Qe);or=s(Tm,"SPAN",{});var ym=a(or);b(jt.$$.fragment,ym),ym.forEach(t),Tm.forEach(t),Nl=l(ls),nr=s(ls,"SPAN",{});var zm=a(nr);Al=n(zm,"SpecialTokensMixin"),zm.forEach(t),ls.forEach(t),Fr=l(i),A=s(i,"DIV",{class:!0});var ue=a(A);b(Rt.$$.fragment,ue),Fl=l(ue),ze=s(ue,"P",{});var Ao=a(ze);Sl=n(Ao,"A mixin derived by "),go=s(Ao,"A",{href:!0});var wm=a(go);Cl=n(wm,"PreTrainedTokenizer"),wm.forEach(t),Ol=n(Ao," and "),ko=s(Ao,"A",{href:!0});var xm=a(ko);Wl=n(xm,"PreTrainedTokenizerFast"),xm.forEach(t),jl=n(Ao,` to handle specific behaviors related to special tokens. In particular, this class hold the attributes which can be used to directly access these special tokens in a model-independent manner and allow to set and update the special tokens.`),Ao.forEach(t),Rl=l(ue),B=s(ue,"DIV",{class:!0});var N=a(B);b(Ut.$$.fragment,N),Ul=l(N),rr=s(N,"P",{});var $m=a(rr);Ml=n($m,`Add a dictionary of special tokens (eos, pad, cls, etc.) to the encoder and link them to class attributes. If special tokens are NOT in the vocabulary, they are added to it (indexed starting from the last index of the current vocabulary).`),$m.forEach(t),Vl=l(N),sr=s(N,"P",{});var Pm=a(sr);Gl=n(Pm,`Note,None When adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of the model so that its embedding matrix matches the tokenizer.`),Pm.forEach(t),Hl=l(N),Mt=s(N,"P",{});var cs=a(Mt);Xl=n(cs,"In order to do that, please use the "),bo=s(cs,"A",{href:!0});var Em=a(bo);Yl=n(Em,"resize_token_embeddings()"),Em.forEach(t),Jl=n(cs," method."),cs.forEach(t),Kl=l(N),Vt=s(N,"P",{});var ps=a(Vt);Ql=n(ps,"Using "),ar=s(ps,"CODE",{});var qm=a(ar);Zl=n(qm,"add_special_tokens"),qm.forEach(t),ec=n(ps," will ensure your special tokens can be used in several ways:"),ps.forEach(t),tc=l(N),Gt=s(N,"UL",{});var ms=a(Gt);ir=s(ms,"LI",{});var Bm=a(ir);oc=n(Bm,"Special tokens are carefully handled by the tokenizer (they are never split)."),Bm.forEach(t),nc=l(ms),Ht=s(ms,"LI",{});var hs=a(Ht);rc=n(hs,"You can easily refer to special tokens using tokenizer class attributes like "),dr=s(hs,"CODE",{});var Lm=a(dr);sc=n(Lm,"tokenizer.cls_token"),Lm.forEach(t),ac=n(hs,`. This makes it easy to develop model-agnostic training and fine-tuning scripts.`),hs.forEach(t),ms.forEach(t),ic=l(N),U=s(N,"P",{});var fe=a(U);dc=n(fe,`When possible, special tokens are already registered for provided pretrained models (for instance `),vo=s(fe,"A",{href:!0});var Dm=a(vo);lc=n(Dm,"BertTokenizer"),Dm.forEach(t),cc=l(fe),lr=s(fe,"CODE",{});var Im=a(lr);pc=n(Im,"cls_token"),Im.forEach(t),mc=n(fe," is already registered to be :obj"),cr=s(fe,"EM",{});var Nm=a(cr);hc=n(Nm,"\u2019[CLS]\u2019"),Nm.forEach(t),uc=n(fe,` and XLM\u2019s one is also registered to be `),pr=s(fe,"CODE",{});var Am=a(pr);fc=n(Am,"'</s>'"),Am.forEach(t),_c=n(fe,")."),fe.forEach(t),gc=l(N),b(Ze.$$.fragment,N),N.forEach(t),kc=l(ue),O=s(ue,"DIV",{class:!0});var _e=a(O);b(Xt.$$.fragment,_e),bc=l(_e),mr=s(_e,"P",{});var Fm=a(mr);vc=n(Fm,`Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to it with indices starting from length of the current vocabulary and and will be isolated before the tokenization algorithm is applied. Added tokens and tokens from the vocabulary of the tokenization algorithm are therefore not treated in the same way.`),Fm.forEach(t),Tc=l(_e),hr=s(_e,"P",{});var Sm=a(hr);yc=n(Sm,`Note, when adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of the model so that its embedding matrix matches the tokenizer.`),Sm.forEach(t),zc=l(_e),Yt=s(_e,"P",{});var us=a(Yt);wc=n(us,"In order to do that, please use the "),To=s(us,"A",{href:!0});var Cm=a(To);xc=n(Cm,"resize_token_embeddings()"),Cm.forEach(t),$c=n(us," method."),us.forEach(t),Pc=l(_e),b(et.$$.fragment,_e),_e.forEach(t),Ec=l(ue),pe=s(ue,"DIV",{class:!0});var Fo=a(pe);b(Jt.$$.fragment,Fo),qc=l(Fo),we=s(Fo,"P",{});var So=a(we);Bc=n(So,"Make sure that all the special tokens attributes of the tokenizer ("),ur=s(So,"CODE",{});var Om=a(ur);Lc=n(Om,"tokenizer.mask_token"),Om.forEach(t),Dc=n(So,`, `),fr=s(So,"CODE",{});var Wm=a(fr);Ic=n(Wm,"tokenizer.cls_token"),Wm.forEach(t),Nc=n(So,", etc.) are in the vocabulary."),So.forEach(t),Ac=l(Fo),_r=s(Fo,"P",{});var jm=a(_r);Fc=n(jm,"Add the missing ones to the vocabulary if needed."),jm.forEach(t),Fo.forEach(t),ue.forEach(t),Sr=l(i),xe=s(i,"H2",{class:!0});var fs=a(xe);tt=s(fs,"A",{id:!0,class:!0,href:!0});var Rm=a(tt);gr=s(Rm,"SPAN",{});var Um=a(gr);b(Kt.$$.fragment,Um),Um.forEach(t),Rm.forEach(t),Sc=l(fs),kr=s(fs,"SPAN",{});var Mm=a(kr);Cc=n(Mm,"Enums and namedtuples"),Mm.forEach(t),fs.forEach(t),Cr=l(i),$e=s(i,"DIV",{class:!0});var _s=a($e);b(Qt.$$.fragment,_s),Oc=l(_s),Pe=s(_s,"P",{});var Co=a(Pe);Wc=n(Co,"Possible values for the "),br=s(Co,"CODE",{});var Vm=a(br);jc=n(Vm,"truncation"),Vm.forEach(t),Rc=n(Co," argument in "),ot=s(Co,"A",{href:!0});var gs=a(ot);Uc=n(gs,"PreTrainedTokenizerBase."),vr=s(gs,"STRONG",{});var Gm=a(vr);Mc=n(Gm,"call"),Gm.forEach(t),Vc=n(gs,"()"),gs.forEach(t),Gc=n(Co,`. Useful for tab-completion in an IDE.`),Co.forEach(t),_s.forEach(t),Or=l(i),Ee=s(i,"DIV",{class:!0});var ks=a(Ee);b(Zt.$$.fragment,ks),Hc=l(ks),Tr=s(ks,"P",{});var Hm=a(Tr);Xc=n(Hm,"Character span in the original string."),Hm.forEach(t),ks.forEach(t),Wr=l(i),qe=s(i,"DIV",{class:!0});var bs=a(qe);b(eo.$$.fragment,bs),Yc=l(bs),yr=s(bs,"P",{});var Xm=a(yr);Jc=n(Xm,"Token span in an encoded string (list of tokens)."),Xm.forEach(t),bs.forEach(t),this.h()},h(){m(p,"name","hf:doc:metadata"),m(p,"content",JSON.stringify(ch)),m(h,"id","utilities-for-tokenizers"),m(h,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(h,"href","#utilities-for-tokenizers"),m(f,"class","relative group"),m(so,"href","/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase"),m(ao,"href","/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),m(io,"href","/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),m(lo,"href","/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.SpecialTokensMixin"),m(Ie,"id","transformers.PreTrainedTokenizerBase"),m(Ie,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Ie,"href","#transformers.PreTrainedTokenizerBase"),m(ke,"class","relative group"),m(po,"href","/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),m(mo,"href","/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),m(ho,"href","/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.from_pretrained"),m(Ae,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Fe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Se,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(ee,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(te,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Oe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(We,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(fo,"href","../glossary#token-type-ids"),m(oe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(ne,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(re,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(se,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(_o,"href","/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase"),m(X,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Me,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(ae,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(C,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Ge,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(He,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(de,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(le,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Y,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(ce,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Je,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Ke,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(u,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Qe,"id","transformers.SpecialTokensMixin"),m(Qe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Qe,"href","#transformers.SpecialTokensMixin"),m(ye,"class","relative group"),m(go,"href","/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),m(ko,"href","/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),m(bo,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.resize_token_embeddings"),m(vo,"href","/docs/transformers/pr_19429/en/model_doc/bert#transformers.BertTokenizer"),m(B,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(To,"href","/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.resize_token_embeddings"),m(O,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(pe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(A,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(tt,"id","transformers.tokenization_utils_base.TruncationStrategy"),m(tt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(tt,"href","#transformers.tokenization_utils_base.TruncationStrategy"),m(xe,"class","relative group"),m(ot,"href","/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__"),m($e,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(Ee,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),m(qe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(i,w){e(document.head,p),P(i,$,w),P(i,f,w),e(f,h),e(h,x),v(c,x,null),e(f,_),e(f,L),e(L,ge),P(i,j,w),P(i,D,w),e(D,ro),e(D,so),e(so,ws),e(D,xs),e(D,ao),e(ao,$s),e(D,Ps),e(D,io),e(io,Es),e(D,qs),e(D,lo),e(lo,Bs),e(D,Ls),P(i,Dr,w),P(i,co,w),e(co,Ds),P(i,Ir,w),P(i,ke,w),e(ke,Ie),e(Ie,Oo),v(at,Oo,null),e(ke,Is),e(ke,Wo),e(Wo,Ns),P(i,Nr,w),P(i,u,w),v(it,u,null),e(u,As),e(u,be),e(be,Fs),e(be,po),e(po,Ss),e(be,Cs),e(be,mo),e(mo,Os),e(be,Ws),e(u,js),e(u,jo),e(jo,Rs),e(u,Us),e(u,Ro),e(Ro,Ms),e(u,Vs),e(u,I),e(I,Z),e(Z,Uo),e(Uo,Gs),e(Z,Hs),e(Z,Mo),e(Mo,Xs),e(Z,Ys),e(Z,Vo),e(Vo,Js),e(Z,Ks),e(I,Qs),e(I,F),e(F,Go),e(Go,Zs),e(F,ea),e(F,Ho),e(Ho,ta),e(F,oa),e(F,Xo),e(Xo,na),e(F,ra),e(F,Yo),e(Yo,sa),e(F,aa),e(F,Jo),e(Jo,ia),e(F,da),e(I,la),e(I,V),e(V,Ko),e(Ko,ca),e(V,pa),e(V,Qo),e(Qo,ma),e(V,ha),e(V,Zo),e(Zo,ua),e(V,fa),e(V,en),e(en,_a),e(V,ga),e(I,ka),e(I,S),e(S,tn),e(tn,ba),e(S,va),e(S,on),e(on,Ta),e(S,ya),e(S,nn),e(nn,za),e(S,wa),e(S,rn),e(rn,xa),e(S,$a),e(S,ho),e(ho,Pa),e(S,Ea),e(I,qa),e(I,Ne),e(Ne,sn),e(sn,Ba),e(Ne,La),e(Ne,an),e(an,Da),e(Ne,Ia),e(I,Na),e(I,G),e(G,dn),e(dn,Aa),e(G,Fa),e(G,ln),e(ln,Sa),e(G,Ca),e(G,cn),e(cn,Oa),e(G,Wa),e(G,pn),e(pn,ja),e(G,Ra),e(I,Ua),e(I,H),e(H,mn),e(mn,Ma),e(H,Va),e(H,hn),e(hn,Ga),e(H,Ha),e(H,un),e(un,Xa),e(H,Ya),e(H,fn),e(fn,Ja),e(H,Ka),e(u,Qa),e(u,Ae),v(dt,Ae,null),e(Ae,Za),e(Ae,_n),e(_n,ei),e(u,ti),e(u,Fe),v(lt,Fe,null),e(Fe,oi),e(Fe,gn),e(gn,ni),e(u,ri),e(u,Se),v(ct,Se,null),e(Se,si),e(Se,kn),e(kn,ai),e(u,ii),e(u,ee),v(pt,ee,null),e(ee,di),e(ee,bn),e(bn,li),e(ee,ci),v(Ce,ee,null),e(u,pi),e(u,te),v(mt,te,null),e(te,mi),e(te,vn),e(vn,hi),e(te,ui),e(te,Tn),e(Tn,fi),e(u,_i),e(u,Oe),v(ht,Oe,null),e(Oe,gi),e(Oe,yn),e(yn,ki),e(u,bi),e(u,We),v(ut,We,null),e(We,vi),e(We,ft),e(ft,Ti),e(ft,zn),e(zn,yi),e(ft,zi),e(u,wi),e(u,oe),v(_t,oe,null),e(oe,xi),e(oe,uo),e(uo,$i),e(uo,fo),e(fo,Pi),e(oe,Ei),e(oe,wn),e(wn,qi),e(u,Bi),e(u,ne),v(gt,ne,null),e(ne,Li),e(ne,xn),e(xn,Di),e(ne,Ii),e(ne,kt),e(kt,Ni),e(kt,$n),e($n,Ai),e(kt,Fi),e(u,Si),e(u,re),v(bt,re,null),e(re,Ci),e(re,Pn),e(Pn,Oi),e(re,Wi),e(re,vt),e(vt,ji),e(vt,En),e(En,Ri),e(vt,Ui),e(u,Mi),e(u,se),v(Tt,se,null),e(se,Vi),e(se,qn),e(qn,Gi),e(se,Hi),v(je,se,null),e(u,Xi),e(u,X),v(yt,X,null),e(X,Yi),e(X,zt),e(zt,Ji),e(zt,_o),e(_o,Ki),e(zt,Qi),e(X,Zi),v(Re,X,null),e(X,ed),v(Ue,X,null),e(u,td),e(u,Me),v(wt,Me,null),e(Me,od),e(Me,ve),e(ve,nd),e(ve,Bn),e(Bn,rd),e(ve,sd),e(ve,Ln),e(Ln,ad),e(ve,id),e(u,dd),e(u,ae),v(xt,ae,null),e(ae,ld),e(ae,Dn),e(Dn,cd),e(ae,pd),e(ae,ie),e(ie,In),e(In,md),e(ie,hd),e(ie,Nn),e(Nn,ud),e(ie,fd),e(ie,An),e(An,_d),e(ie,gd),e(u,kd),e(u,C),v($t,C,null),e(C,bd),e(C,Fn),e(Fn,vd),e(C,Td),e(C,J),e(J,yd),e(J,Sn),e(Sn,zd),e(J,wd),e(J,Cn),e(Cn,xd),e(J,$d),e(J,On),e(On,Pd),e(J,Ed),e(C,qd),e(C,Te),e(Te,Bd),e(Te,Wn),e(Wn,Ld),e(Te,Dd),e(Te,jn),e(jn,Id),e(Te,Nd),e(C,Ad),v(Ve,C,null),e(u,Fd),e(u,Ge),v(Pt,Ge,null),e(Ge,Sd),e(Ge,R),e(R,Cd),e(R,Rn),e(Rn,Od),e(R,Wd),e(R,Un),e(Un,jd),e(R,Rd),e(R,Mn),e(Mn,Ud),e(R,Md),e(R,Vn),e(Vn,Vd),e(R,Gd),e(u,Hd),e(u,He),v(Et,He,null),e(He,Xd),e(He,Gn),e(Gn,Yd),e(u,Jd),e(u,de),v(qt,de,null),e(de,Kd),e(de,Bt),e(Bt,Qd),e(Bt,Hn),e(Hn,Zd),e(Bt,el),e(de,tl),v(Xe,de,null),e(u,ol),e(u,le),v(Lt,le,null),e(le,nl),e(le,Dt),e(Dt,rl),e(Dt,Xn),e(Xn,sl),e(Dt,al),e(le,il),v(Ye,le,null),e(u,dl),e(u,Y),v(It,Y,null),e(Y,ll),e(Y,Yn),e(Yn,cl),e(Y,pl),e(Y,Nt),e(Nt,ml),e(Nt,Jn),e(Jn,hl),e(Nt,ul),e(Y,fl),e(Y,At),e(At,_l),e(At,Kn),e(Kn,gl),e(At,kl),e(u,bl),e(u,ce),v(Ft,ce,null),e(ce,vl),e(ce,Qn),e(Qn,Tl),e(ce,yl),e(ce,St),e(St,zl),e(St,Zn),e(Zn,wl),e(St,xl),e(u,$l),e(u,Je),v(Ct,Je,null),e(Je,Pl),e(Je,Ot),e(Ot,El),e(Ot,er),e(er,ql),e(Ot,Bl),e(u,Ll),e(u,Ke),v(Wt,Ke,null),e(Ke,Dl),e(Ke,tr),e(tr,Il),P(i,Ar,w),P(i,ye,w),e(ye,Qe),e(Qe,or),v(jt,or,null),e(ye,Nl),e(ye,nr),e(nr,Al),P(i,Fr,w),P(i,A,w),v(Rt,A,null),e(A,Fl),e(A,ze),e(ze,Sl),e(ze,go),e(go,Cl),e(ze,Ol),e(ze,ko),e(ko,Wl),e(ze,jl),e(A,Rl),e(A,B),v(Ut,B,null),e(B,Ul),e(B,rr),e(rr,Ml),e(B,Vl),e(B,sr),e(sr,Gl),e(B,Hl),e(B,Mt),e(Mt,Xl),e(Mt,bo),e(bo,Yl),e(Mt,Jl),e(B,Kl),e(B,Vt),e(Vt,Ql),e(Vt,ar),e(ar,Zl),e(Vt,ec),e(B,tc),e(B,Gt),e(Gt,ir),e(ir,oc),e(Gt,nc),e(Gt,Ht),e(Ht,rc),e(Ht,dr),e(dr,sc),e(Ht,ac),e(B,ic),e(B,U),e(U,dc),e(U,vo),e(vo,lc),e(U,cc),e(U,lr),e(lr,pc),e(U,mc),e(U,cr),e(cr,hc),e(U,uc),e(U,pr),e(pr,fc),e(U,_c),e(B,gc),v(Ze,B,null),e(A,kc),e(A,O),v(Xt,O,null),e(O,bc),e(O,mr),e(mr,vc),e(O,Tc),e(O,hr),e(hr,yc),e(O,zc),e(O,Yt),e(Yt,wc),e(Yt,To),e(To,xc),e(Yt,$c),e(O,Pc),v(et,O,null),e(A,Ec),e(A,pe),v(Jt,pe,null),e(pe,qc),e(pe,we),e(we,Bc),e(we,ur),e(ur,Lc),e(we,Dc),e(we,fr),e(fr,Ic),e(we,Nc),e(pe,Ac),e(pe,_r),e(_r,Fc),P(i,Sr,w),P(i,xe,w),e(xe,tt),e(tt,gr),v(Kt,gr,null),e(xe,Sc),e(xe,kr),e(kr,Cc),P(i,Cr,w),P(i,$e,w),v(Qt,$e,null),e($e,Oc),e($e,Pe),e(Pe,Wc),e(Pe,br),e(br,jc),e(Pe,Rc),e(Pe,ot),e(ot,Uc),e(ot,vr),e(vr,Mc),e(ot,Vc),e(Pe,Gc),P(i,Or,w),P(i,Ee,w),v(Zt,Ee,null),e(Ee,Hc),e(Ee,Tr),e(Tr,Xc),P(i,Wr,w),P(i,qe,w),v(eo,qe,null),e(qe,Yc),e(qe,yr),e(yr,Jc),jr=!0},p(i,[w]){const to={};w&2&&(to.$$scope={dirty:w,ctx:i}),Ce.$set(to);const zr={};w&2&&(zr.$$scope={dirty:w,ctx:i}),je.$set(zr);const wr={};w&2&&(wr.$$scope={dirty:w,ctx:i}),Re.$set(wr);const xr={};w&2&&(xr.$$scope={dirty:w,ctx:i}),Ue.$set(xr);const M={};w&2&&(M.$$scope={dirty:w,ctx:i}),Ve.$set(M);const $r={};w&2&&($r.$$scope={dirty:w,ctx:i}),Xe.$set($r);const Pr={};w&2&&(Pr.$$scope={dirty:w,ctx:i}),Ye.$set(Pr);const Er={};w&2&&(Er.$$scope={dirty:w,ctx:i}),Ze.$set(Er);const qr={};w&2&&(qr.$$scope={dirty:w,ctx:i}),et.$set(qr)},i(i){jr||(T(c.$$.fragment,i),T(at.$$.fragment,i),T(it.$$.fragment,i),T(dt.$$.fragment,i),T(lt.$$.fragment,i),T(ct.$$.fragment,i),T(pt.$$.fragment,i),T(Ce.$$.fragment,i),T(mt.$$.fragment,i),T(ht.$$.fragment,i),T(ut.$$.fragment,i),T(_t.$$.fragment,i),T(gt.$$.fragment,i),T(bt.$$.fragment,i),T(Tt.$$.fragment,i),T(je.$$.fragment,i),T(yt.$$.fragment,i),T(Re.$$.fragment,i),T(Ue.$$.fragment,i),T(wt.$$.fragment,i),T(xt.$$.fragment,i),T($t.$$.fragment,i),T(Ve.$$.fragment,i),T(Pt.$$.fragment,i),T(Et.$$.fragment,i),T(qt.$$.fragment,i),T(Xe.$$.fragment,i),T(Lt.$$.fragment,i),T(Ye.$$.fragment,i),T(It.$$.fragment,i),T(Ft.$$.fragment,i),T(Ct.$$.fragment,i),T(Wt.$$.fragment,i),T(jt.$$.fragment,i),T(Rt.$$.fragment,i),T(Ut.$$.fragment,i),T(Ze.$$.fragment,i),T(Xt.$$.fragment,i),T(et.$$.fragment,i),T(Jt.$$.fragment,i),T(Kt.$$.fragment,i),T(Qt.$$.fragment,i),T(Zt.$$.fragment,i),T(eo.$$.fragment,i),jr=!0)},o(i){y(c.$$.fragment,i),y(at.$$.fragment,i),y(it.$$.fragment,i),y(dt.$$.fragment,i),y(lt.$$.fragment,i),y(ct.$$.fragment,i),y(pt.$$.fragment,i),y(Ce.$$.fragment,i),y(mt.$$.fragment,i),y(ht.$$.fragment,i),y(ut.$$.fragment,i),y(_t.$$.fragment,i),y(gt.$$.fragment,i),y(bt.$$.fragment,i),y(Tt.$$.fragment,i),y(je.$$.fragment,i),y(yt.$$.fragment,i),y(Re.$$.fragment,i),y(Ue.$$.fragment,i),y(wt.$$.fragment,i),y(xt.$$.fragment,i),y($t.$$.fragment,i),y(Ve.$$.fragment,i),y(Pt.$$.fragment,i),y(Et.$$.fragment,i),y(qt.$$.fragment,i),y(Xe.$$.fragment,i),y(Lt.$$.fragment,i),y(Ye.$$.fragment,i),y(It.$$.fragment,i),y(Ft.$$.fragment,i),y(Ct.$$.fragment,i),y(Wt.$$.fragment,i),y(jt.$$.fragment,i),y(Rt.$$.fragment,i),y(Ut.$$.fragment,i),y(Ze.$$.fragment,i),y(Xt.$$.fragment,i),y(et.$$.fragment,i),y(Jt.$$.fragment,i),y(Kt.$$.fragment,i),y(Qt.$$.fragment,i),y(Zt.$$.fragment,i),y(eo.$$.fragment,i),jr=!1},d(i){t(p),i&&t($),i&&t(f),z(c),i&&t(j),i&&t(D),i&&t(Dr),i&&t(co),i&&t(Ir),i&&t(ke),z(at),i&&t(Nr),i&&t(u),z(it),z(dt),z(lt),z(ct),z(pt),z(Ce),z(mt),z(ht),z(ut),z(_t),z(gt),z(bt),z(Tt),z(je),z(yt),z(Re),z(Ue),z(wt),z(xt),z($t),z(Ve),z(Pt),z(Et),z(qt),z(Xe),z(Lt),z(Ye),z(It),z(Ft),z(Ct),z(Wt),i&&t(Ar),i&&t(ye),z(jt),i&&t(Fr),i&&t(A),z(Rt),z(Ut),z(Ze),z(Xt),z(et),z(Jt),i&&t(Sr),i&&t(xe),z(Kt),i&&t(Cr),i&&t($e),z(Qt),i&&t(Or),i&&t(Ee),z(Zt),i&&t(Wr),i&&t(qe),z(eo)}}}const ch={local:"utilities-for-tokenizers",sections:[{local:"transformers.PreTrainedTokenizerBase",title:"PreTrainedTokenizerBase"},{local:"transformers.SpecialTokensMixin",title:"SpecialTokensMixin"},{local:"transformers.tokenization_utils_base.TruncationStrategy",title:"Enums and namedtuples"}],title:"Utilities for Tokenizers"};function ph(q){return Zm(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class kh extends Ym{constructor(p){super();Jm(this,p,ph,lh,Km,{})}}export{kh as default,ch as metadata};
42
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/main_classes/data_collator.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;data-collator&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.default_data_collator&quot;,&quot;title&quot;:&quot;Default data collator&quot;},{&quot;local&quot;:&quot;transformers.DefaultDataCollator&quot;,&quot;title&quot;:&quot;DefaultDataCollator&quot;},{&quot;local&quot;:&quot;transformers.DataCollatorWithPadding&quot;,&quot;title&quot;:&quot;DataCollatorWithPadding&quot;},{&quot;local&quot;:&quot;transformers.DataCollatorForTokenClassification&quot;,&quot;title&quot;:&quot;DataCollatorForTokenClassification&quot;},{&quot;local&quot;:&quot;transformers.DataCollatorForSeq2Seq&quot;,&quot;title&quot;:&quot;DataCollatorForSeq2Seq&quot;},{&quot;local&quot;:&quot;transformers.DataCollatorForLanguageModeling&quot;,&quot;title&quot;:&quot;DataCollatorForLanguageModeling&quot;},{&quot;local&quot;:&quot;transformers.DataCollatorForWholeWordMask&quot;,&quot;title&quot;:&quot;DataCollatorForWholeWordMask&quot;},{&quot;local&quot;:&quot;transformers.DataCollatorForPermutationLanguageModeling&quot;,&quot;title&quot;:&quot;DataCollatorForPermutationLanguageModeling&quot;}],&quot;title&quot;:&quot;Data Collator&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/main_classes/data_collator.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Docstring-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <h1 class="relative group"><a id="data-collator" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#data-collator"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Data Collator </span></h1> <p>Data collators are objects that will form a batch by using a list of dataset elements as input. These elements are of the same type as the elements of <code>train_dataset</code> or <code>eval_dataset</code>.</p> <p>To be able to build batches, data collators may apply some processing (like padding). Some of them (like <a href="/docs/transformers/pr_19429/en/main_classes/data_collator#transformers.DataCollatorForLanguageModeling">DataCollatorForLanguageModeling</a>) also apply some random data augmentation (like random masking) on the formed batch.</p> <p>Examples of use can be found in the <a href="../examples">example scripts</a> or <a href="../notebooks">example notebooks</a>.</p> <h2 class="relative group"><a id="transformers.default_data_collator" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.default_data_collator"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Default data collator </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.default_data_collator"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.default_data_collator</span></h4><!-- HTML_TAG_END --> <a id="transformers.default_data_collator" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.default_data_collator"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L49" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">features<span class="opacity-60">: typing.List[InputDataClass]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60"> = &#39;pt&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Very simple data collator that simply collates batches of dict-like objects and performs special handling for potential keys named:</p> <ul><li><code>label</code>: handles a single value (int or float) per object</li> <li><code>label_ids</code>: handles a list of values per object</li></ul> <p>Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs to the model. See glue and ner for example of how it’s useful.</p></div> <h2 class="relative group"><a id="transformers.DefaultDataCollator" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DefaultDataCollator"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DefaultDataCollator </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DefaultDataCollator"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DefaultDataCollator</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DefaultDataCollator" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DefaultDataCollator"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L75" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: str = &#39;pt&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DefaultDataCollator.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DefaultDataCollator.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code>) &#x2014; The type of Tensor to return. Allowable values are &#x201C;np&#x201D;, &#x201C;pt&#x201D; and &#x201C;tf&#x201D;.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Very simple data collator that simply collates batches of dict-like objects and performs special handling for potential keys named:</p> <ul><li><code>label</code>: handles a single value (int or float) per object</li> <li><code>label_ids</code>: handles a list of values per object</li></ul> <p>Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs to the model. See glue and ner for example of how it’s useful.</p> <p>This is an object (like other data collators) rather than a pure function like default_data_collator. This can be helpful if you need to set a return_tensors value at initialization.</p></div> <h2 class="relative group"><a id="transformers.DataCollatorWithPadding" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorWithPadding"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DataCollatorWithPadding </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataCollatorWithPadding"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DataCollatorWithPadding</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DataCollatorWithPadding" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataCollatorWithPadding"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L213" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: PreTrainedTokenizerBase</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.utils.generic.PaddingStrategy] = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: str = &#39;pt&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorWithPadding.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorWithPadding.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> or <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>) &#x2014; The tokenizer used for encoding the data.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorWithPadding.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorWithPadding.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Select a strategy to pad the returned sequences (according to the model&#x2019;s padding side and padding index) among:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code> (default): Pad to the longest sequence in the batch (or no padding if only a single sequence is provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code>: No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorWithPadding.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorWithPadding.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Maximum length of the returned list and optionally padding length (see above).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorWithPadding.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorWithPadding.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value.</p> <p>This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorWithPadding.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorWithPadding.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code>) &#x2014; The type of Tensor to return. Allowable values are &#x201C;np&#x201D;, &#x201C;pt&#x201D; and &#x201C;tf&#x201D;.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Data collator that will dynamically pad the inputs received.</p></div> <h2 class="relative group"><a id="transformers.DataCollatorForTokenClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForTokenClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DataCollatorForTokenClassification </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataCollatorForTokenClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DataCollatorForTokenClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DataCollatorForTokenClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataCollatorForTokenClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L264" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: PreTrainedTokenizerBase</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.utils.generic.PaddingStrategy] = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">label_pad_token_id<span class="opacity-60">: int = -100</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: str = &#39;pt&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForTokenClassification.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForTokenClassification.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> or <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>) &#x2014; The tokenizer used for encoding the data.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForTokenClassification.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForTokenClassification.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Select a strategy to pad the returned sequences (according to the model&#x2019;s padding side and padding index) among:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence is provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForTokenClassification.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForTokenClassification.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Maximum length of the returned list and optionally padding length (see above).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForTokenClassification.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForTokenClassification.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value.</p> <p>This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForTokenClassification.label_pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForTokenClassification.label_pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>label_pad_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to -100) &#x2014; The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForTokenClassification.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForTokenClassification.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code>) &#x2014; The type of Tensor to return. Allowable values are &#x201C;np&#x201D;, &#x201C;pt&#x201D; and &#x201C;tf&#x201D;.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Data collator that will dynamically pad the inputs received, as well as the labels.</p></div> <h2 class="relative group"><a id="transformers.DataCollatorForSeq2Seq" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForSeq2Seq"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DataCollatorForSeq2Seq </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataCollatorForSeq2Seq"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DataCollatorForSeq2Seq</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DataCollatorForSeq2Seq" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataCollatorForSeq2Seq"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L514" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: PreTrainedTokenizerBase</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60">: typing.Optional[typing.Any] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.utils.generic.PaddingStrategy] = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">label_pad_token_id<span class="opacity-60">: int = -100</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: str = &#39;pt&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForSeq2Seq.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForSeq2Seq.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> or <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>) &#x2014; The tokenizer used for encoding the data.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForSeq2Seq.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForSeq2Seq.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>) &#x2014; The model that is being trained. If set and has the <em>prepare_decoder_input_ids_from_labels</em>, use it to prepare the <em>decoder_input_ids</em></p> <p>This is useful when using <em>label_smoothing</em> to avoid calculating loss twice.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForSeq2Seq.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForSeq2Seq.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Select a strategy to pad the returned sequences (according to the model&#x2019;s padding side and padding index) among:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence is provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForSeq2Seq.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForSeq2Seq.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Maximum length of the returned list and optionally padding length (see above).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForSeq2Seq.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForSeq2Seq.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value.</p> <p>This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForSeq2Seq.label_pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForSeq2Seq.label_pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>label_pad_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to -100) &#x2014; The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForSeq2Seq.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForSeq2Seq.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code>) &#x2014; The type of Tensor to return. Allowable values are &#x201C;np&#x201D;, &#x201C;pt&#x201D; and &#x201C;tf&#x201D;.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Data collator that will dynamically pad the inputs received, as well as the labels.</p></div> <h2 class="relative group"><a id="transformers.DataCollatorForLanguageModeling" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForLanguageModeling"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DataCollatorForLanguageModeling </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataCollatorForLanguageModeling"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DataCollatorForLanguageModeling</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DataCollatorForLanguageModeling" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataCollatorForLanguageModeling"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L607" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: PreTrainedTokenizerBase</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mlm<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mlm_probability<span class="opacity-60">: float = 0.15</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tf_experimental_compile<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: str = &#39;pt&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForLanguageModeling.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForLanguageModeling.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> or <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>) &#x2014; The tokenizer used for encoding the data.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForLanguageModeling.mlm" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForLanguageModeling.mlm"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mlm</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to use masked language modeling. If set to <code>False</code>, the labels are the same as the inputs with the padding tokens ignored (by setting them to -100). Otherwise, the labels are -100 for non-masked tokens and the value to predict for the masked token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForLanguageModeling.mlm_probability" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForLanguageModeling.mlm_probability"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mlm_probability</strong> (<code>float</code>, <em>optional</em>, defaults to 0.15) &#x2014; The probability with which to (randomly) mask tokens in the input, when <code>mlm</code> is set to <code>True</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForLanguageModeling.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForLanguageModeling.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForLanguageModeling.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForLanguageModeling.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code>) &#x2014; The type of Tensor to return. Allowable values are &#x201C;np&#x201D;, &#x201C;pt&#x201D; and &#x201C;tf&#x201D;.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they are not all of the same length.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>For best performance, this data collator should be used with a dataset having items that are dictionaries or BatchEncoding, with the <code>&quot;special_tokens_mask&quot;</code> key, as returned by a <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> or a <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a> with the argument <code>return_special_tokens_mask=True</code>.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataCollatorForLanguageModeling.numpy_mask_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>numpy_mask_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.DataCollatorForLanguageModeling.numpy_mask_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataCollatorForLanguageModeling.numpy_mask_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L805" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Any</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">special_tokens_mask<span class="opacity-60">: typing.Optional[typing.Any] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataCollatorForLanguageModeling.tf_mask_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>tf_mask_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.DataCollatorForLanguageModeling.tf_mask_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataCollatorForLanguageModeling.tf_mask_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L659" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Any</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_size<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token_id<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">special_tokens_mask<span class="opacity-60">: typing.Optional[typing.Any] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataCollatorForLanguageModeling.torch_mask_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>torch_mask_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.DataCollatorForLanguageModeling.torch_mask_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataCollatorForLanguageModeling.torch_mask_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L748" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Any</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">special_tokens_mask<span class="opacity-60">: typing.Optional[typing.Any] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.</p></div></div> <h2 class="relative group"><a id="transformers.DataCollatorForWholeWordMask" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForWholeWordMask"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DataCollatorForWholeWordMask </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataCollatorForWholeWordMask"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DataCollatorForWholeWordMask</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DataCollatorForWholeWordMask" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataCollatorForWholeWordMask"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L846" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: PreTrainedTokenizerBase</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mlm<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mlm_probability<span class="opacity-60">: float = 0.15</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tf_experimental_compile<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: str = &#39;pt&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Data collator used for language modeling that masks entire words.</p> <ul><li>collates batches of tensors, honoring their tokenizer’s pad_token</li> <li>preprocesses batches for masked language modeling</li></ul> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>This collator relies on details of the implementation of subword tokenization by <a href="/docs/transformers/pr_19429/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>, specifically that subword tokens are prefixed with <em>##</em>. For tokenizers that do not adhere to this scheme, this collator will produce an output that is roughly equivalent to <code>.DataCollatorForLanguageModeling</code>.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataCollatorForWholeWordMask.numpy_mask_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>numpy_mask_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.DataCollatorForWholeWordMask.numpy_mask_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataCollatorForWholeWordMask.numpy_mask_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L1074" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Any</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_labels<span class="opacity-60">: typing.Any</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set ‘mask_labels’ means we use whole word mask (wwm), we directly mask idxs according to it’s ref.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataCollatorForWholeWordMask.tf_mask_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>tf_mask_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.DataCollatorForWholeWordMask.tf_mask_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataCollatorForWholeWordMask.tf_mask_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L1032" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Any</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_labels<span class="opacity-60">: typing.Any</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set ‘mask_labels’ means we use whole word mask (wwm), we directly mask idxs according to it’s ref.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataCollatorForWholeWordMask.torch_mask_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>torch_mask_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.DataCollatorForWholeWordMask.torch_mask_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataCollatorForWholeWordMask.torch_mask_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L992" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Any</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_labels<span class="opacity-60">: typing.Any</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set ‘mask_labels’ means we use whole word mask (wwm), we directly mask idxs according to it’s ref.</p></div></div> <h2 class="relative group"><a id="transformers.DataCollatorForPermutationLanguageModeling" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForPermutationLanguageModeling"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DataCollatorForPermutationLanguageModeling </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataCollatorForPermutationLanguageModeling"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DataCollatorForPermutationLanguageModeling</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DataCollatorForPermutationLanguageModeling" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataCollatorForPermutationLanguageModeling"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L1201" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: PreTrainedTokenizerBase</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">plm_probability<span class="opacity-60">: float = 0.16666666666666666</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_span_length<span class="opacity-60">: int = 5</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: str = &#39;pt&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Data collator used for permutation language modeling.</p> <ul><li>collates batches of tensors, honoring their tokenizer’s pad_token</li> <li>preprocesses batches for permutation language modeling with procedures specific to XLNet</li></ul> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataCollatorForPermutationLanguageModeling.numpy_mask_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>numpy_mask_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.DataCollatorForPermutationLanguageModeling.numpy_mask_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataCollatorForPermutationLanguageModeling.numpy_mask_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L1444" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Any</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>The masked tokens to be predicted for a particular sequence are determined by the following algorithm:</p> <ol start="0"><li>Start from the beginning of the sequence by setting <code>cur_len = 0</code> (number of tokens processed so far).</li> <li>Sample a <code>span_length</code> from the interval <code>[1, max_span_length]</code> (length of span of tokens to be masked)</li> <li>Reserve a context of length <code>context_length = span_length / plm_probability</code> to surround span to be masked</li> <li>Sample a starting point <code>start_index</code> from the interval <code>[cur_len, cur_len + context_length - span_length]</code> and mask tokens <code>start_index:start_index + span_length</code></li> <li>Set <code>cur_len = cur_len + context_length</code>. If <code>cur_len &lt; max_len</code> (i.e. there are tokens remaining in the sequence to be processed), repeat from Step 1.</li></ol></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataCollatorForPermutationLanguageModeling.tf_mask_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>tf_mask_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.DataCollatorForPermutationLanguageModeling.tf_mask_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataCollatorForPermutationLanguageModeling.tf_mask_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L1334" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Any</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>The masked tokens to be predicted for a particular sequence are determined by the following algorithm:</p> <ol start="0"><li>Start from the beginning of the sequence by setting <code>cur_len = 0</code> (number of tokens processed so far).</li> <li>Sample a <code>span_length</code> from the interval <code>[1, max_span_length]</code> (length of span of tokens to be masked)</li> <li>Reserve a context of length <code>context_length = span_length / plm_probability</code> to surround span to be masked</li> <li>Sample a starting point <code>start_index</code> from the interval <code>[cur_len, cur_len + context_length - span_length]</code> and mask tokens <code>start_index:start_index + span_length</code></li> <li>Set <code>cur_len = cur_len + context_length</code>. If <code>cur_len &lt; max_len</code> (i.e. there are tokens remaining in the sequence to be processed), repeat from Step 1.</li></ol></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataCollatorForPermutationLanguageModeling.torch_mask_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>torch_mask_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.DataCollatorForPermutationLanguageModeling.torch_mask_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataCollatorForPermutationLanguageModeling.torch_mask_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/data_collator.py#L1235" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Any</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>The masked tokens to be predicted for a particular sequence are determined by the following algorithm:</p> <ol start="0"><li>Start from the beginning of the sequence by setting <code>cur_len = 0</code> (number of tokens processed so far).</li> <li>Sample a <code>span_length</code> from the interval <code>[1, max_span_length]</code> (length of span of tokens to be masked)</li> <li>Reserve a context of length <code>context_length = span_length / plm_probability</code> to surround span to be masked</li> <li>Sample a starting point <code>start_index</code> from the interval <code>[cur_len, cur_len + context_length - span_length]</code> and mask tokens <code>start_index:start_index + span_length</code></li> <li>Set <code>cur_len = cur_len + context_length</code>. If <code>cur_len &lt; max_len</code> (i.e. there are tokens remaining in the sequence to be processed), repeat from Step 1.</li></ol></div></div> <script type="module" data-hydrate="18gae2k"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="18gae2k"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/main_classes/data_collator.mdx-hf-doc-builder.js") ], params: {} } }); </script>
43
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/main_classes/callback.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;callbacks&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.integrations.CometCallback&quot;,&quot;title&quot;:&quot;Available Callbacks&quot;},{&quot;local&quot;:&quot;transformers.TrainerCallback&quot;,&quot;title&quot;:&quot;TrainerCallback&quot;},{&quot;local&quot;:&quot;transformers.TrainerState&quot;,&quot;title&quot;:&quot;TrainerState&quot;},{&quot;local&quot;:&quot;transformers.TrainerControl&quot;,&quot;title&quot;:&quot;TrainerControl&quot;}],&quot;title&quot;:&quot;Callbacks&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/main_classes/callback.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Docstring-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/ExampleCodeBlock-hf-doc-builder.js"> <h1 class="relative group"><a id="callbacks" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#callbacks"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Callbacks </span></h1> <p>Callbacks are objects that can customize the behavior of the training loop in the PyTorch <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> (this feature is not yet implemented in TensorFlow) that can inspect the training loop state (for progress reporting, logging on TensorBoard or other ML platforms…) and take decisions (like early stopping).</p> <p>Callbacks are “read only” pieces of code, apart from the <a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerControl">TrainerControl</a> object they return, they cannot change anything in the training loop. For customizations that require changes in the training loop, you should subclass <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> and override the methods you need (see <a href="trainer">trainer</a> for examples).</p> <p>By default a <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> will use the following callbacks:</p> <ul><li><a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.DefaultFlowCallback">DefaultFlowCallback</a> which handles the default behavior for logging, saving and evaluation.</li> <li><a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.PrinterCallback">PrinterCallback</a> or <a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.ProgressCallback">ProgressCallback</a> to display progress and print the logs (the first one is used if you deactivate tqdm through the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>, otherwise it’s the second one).</li> <li><a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.integrations.TensorBoardCallback">TensorBoardCallback</a> if tensorboard is accessible (either through PyTorch &gt;= 1.4 or tensorboardX).</li> <li><a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.integrations.WandbCallback">WandbCallback</a> if <a href="https://www.wandb.com/" rel="nofollow">wandb</a> is installed.</li> <li><a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.integrations.CometCallback">CometCallback</a> if <a href="https://www.comet.ml/site/" rel="nofollow">comet_ml</a> is installed.</li> <li><a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.integrations.MLflowCallback">MLflowCallback</a> if <a href="https://www.mlflow.org/" rel="nofollow">mlflow</a> is installed.</li> <li><a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.integrations.NeptuneCallback">NeptuneCallback</a> if <a href="https://neptune.ai/" rel="nofollow">neptune</a> is installed.</li> <li><a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.integrations.AzureMLCallback">AzureMLCallback</a> if <a href="https://pypi.org/project/azureml-sdk/" rel="nofollow">azureml-sdk</a> is installed.</li> <li><a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.integrations.CodeCarbonCallback">CodeCarbonCallback</a> if <a href="https://pypi.org/project/codecarbon/" rel="nofollow">codecarbon</a> is installed.</li></ul> <p>The main class that implements callbacks is <a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a>. It gets the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> used to instantiate the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>, can access that Trainer’s internal state via <a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerState">TrainerState</a>, and can take some actions on the training loop via <a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerControl">TrainerControl</a>.</p> <h2 class="relative group"><a id="transformers.integrations.CometCallback" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.integrations.CometCallback"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Available Callbacks </span></h2> <p>Here is the list of the available <a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a> in the library:</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.integrations.CometCallback"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.integrations.</span><span class="font-semibold">CometCallback</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.integrations.CometCallback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.integrations.CometCallback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/integrations.py#L765" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A <a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a> that sends the logs to <a href="https://www.comet.ml/site/" rel="nofollow">Comet ML</a>.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.integrations.CometCallback.setup"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>setup</span></h4><!-- HTML_TAG_END --> <a id="transformers.integrations.CometCallback.setup" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.integrations.CometCallback.setup"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/integrations.py#L776" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Setup the optional Comet.ml integration.</p> <p>Environment: COMET_MODE (<code>str</code>, <em>optional</em>): Whether to create an online, offline experiment or disable Comet logging. Can be “OFFLINE”, “ONLINE”, or “DISABLED”. Defaults to “ONLINE”. COMET_PROJECT_NAME (<code>str</code>, <em>optional</em>): Comet project name for experiments COMET_OFFLINE_DIRECTORY (<code>str</code>, <em>optional</em>): Folder to use for saving offline experiments when <code>COMET_MODE</code> is “OFFLINE” COMET_LOG_ASSETS (<code>str</code>, <em>optional</em>): Whether or not to log training assets (tf event logs, checkpoints, etc), to Comet. Can be “TRUE”, or “FALSE”. Defaults to “TRUE”.</p> <p>For a number of configurable items in the environment, see <a href="https://www.comet.ml/docs/python-sdk/advanced/#comet-configuration-variables" rel="nofollow">here</a>.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DefaultFlowCallback"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DefaultFlowCallback</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DefaultFlowCallback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DefaultFlowCallback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L415" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A <a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a> that handles the default flow of the training loop for logs, evaluation and checkpoints.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PrinterCallback"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PrinterCallback</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PrinterCallback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PrinterCallback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L513" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A bare <a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a> that just prints the logs.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ProgressCallback"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ProgressCallback</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ProgressCallback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ProgressCallback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L465" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A <a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a> that displays the progress of training or evaluation.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.EarlyStoppingCallback"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">EarlyStoppingCallback</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.EarlyStoppingCallback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.EarlyStoppingCallback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L524" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">early_stopping_patience<span class="opacity-60">: int = 1</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">early_stopping_threshold<span class="opacity-60">: typing.Optional[float] = 0.0</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.EarlyStoppingCallback.early_stopping_patience" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.EarlyStoppingCallback.early_stopping_patience"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>early_stopping_patience</strong> (<code>int</code>) &#x2014; Use with <code>metric_for_best_model</code> to stop training when the specified metric worsens for <code>early_stopping_patience</code> evaluation calls.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.EarlyStoppingCallback.early_stopping_threshold(float," class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.EarlyStoppingCallback.early_stopping_threshold(float,"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>early_stopping_threshold(<code>float</code>,</strong> <em>optional</em>) &#x2014; Use with TrainingArguments <code>metric_for_best_model</code> and <code>early_stopping_patience</code> to denote how much the specified metric must improve to satisfy early stopping conditions. `<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>A <a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a> that handles early stopping.</p> <p>This callback depends on <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> argument <em>load_best_model_at_end</em> functionality to set best_metric in <a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerState">TrainerState</a>.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.integrations.TensorBoardCallback"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.integrations.</span><span class="font-semibold">TensorBoardCallback</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.integrations.TensorBoardCallback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.integrations.TensorBoardCallback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/integrations.py#L550" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tb_writer<span class="opacity-60"> = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.integrations.TensorBoardCallback.tb_writer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.integrations.TensorBoardCallback.tb_writer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tb_writer</strong> (<code>SummaryWriter</code>, <em>optional</em>) &#x2014; The writer to use. Will instantiate one if not set.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>A <a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a> that sends the logs to <a href="https://www.tensorflow.org/tensorboard" rel="nofollow">TensorBoard</a>.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.integrations.WandbCallback"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.integrations.</span><span class="font-semibold">WandbCallback</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.integrations.WandbCallback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.integrations.WandbCallback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/integrations.py#L639" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A <a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a> that sends the logs to <a href="https://www.wandb.com/" rel="nofollow">Weight and Biases</a>.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.integrations.WandbCallback.setup"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>setup</span></h4><!-- HTML_TAG_END --> <a id="transformers.integrations.WandbCallback.setup" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.integrations.WandbCallback.setup"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/integrations.py#L656" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Setup the optional Weights &amp; Biases (<em>wandb</em>) integration.</p> <p>One can subclass and override this method to customize the setup if needed. Find more information <a href="https://docs.wandb.ai/integrations/huggingface" rel="nofollow">here</a>. You can also override the following environment variables:</p> <p>Environment: WANDB_LOG_MODEL (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>): Whether or not to log model as artifact at the end of training. Use along with <em>TrainingArguments.load_best_model_at_end</em> to upload best model. WANDB_WATCH (<code>str</code>, <em>optional</em> defaults to <code>&quot;gradients&quot;</code>): Can be <code>&quot;gradients&quot;</code>, <code>&quot;all&quot;</code> or <code>&quot;false&quot;</code>. Set to <code>&quot;false&quot;</code> to disable gradient logging or <code>&quot;all&quot;</code> to log gradients and parameters. WANDB_PROJECT (<code>str</code>, <em>optional</em>, defaults to <code>&quot;huggingface&quot;</code>): Set this to a custom string to store results in a different project. WANDB_DISABLED (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>): Whether or not to disable wandb entirely. Set <em>WANDB_DISABLED=true</em> to disable.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.integrations.MLflowCallback"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.integrations.</span><span class="font-semibold">MLflowCallback</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.integrations.MLflowCallback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.integrations.MLflowCallback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/integrations.py#L865" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A <a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a> that sends the logs to <a href="https://www.mlflow.org/" rel="nofollow">MLflow</a>. Can be disabled by setting environment variable <code>DISABLE_MLFLOW_INTEGRATION = TRUE</code>.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.integrations.MLflowCallback.setup"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>setup</span></h4><!-- HTML_TAG_END --> <a id="transformers.integrations.MLflowCallback.setup" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.integrations.MLflowCallback.setup"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/integrations.py#L884" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Setup the optional MLflow integration.</p> <p>Environment: HF_MLFLOW_LOG_ARTIFACTS (<code>str</code>, <em>optional</em>): Whether to use MLflow .log_artifact() facility to log artifacts. This only makes sense if logging to a remote server, e.g. s3 or GCS. If set to <code>True</code> or <em>1</em>, will copy each saved checkpoint on each save in <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>’s <code>output_dir</code> to the local or remote artifact storage. Using it without a remote storage will just copy the files to your artifact location. MLFLOW_EXPERIMENT_NAME (<code>str</code>, <em>optional</em>): Whether to use an MLflow experiment_name under which to launch the run. Default to “None” which will point to the “Default” experiment in MLflow. Otherwise, it is a case sensitive name of the experiment to be activated. If an experiment with this name does not exist, a new experiment with this name is created. MLFLOW_TAGS (<code>str</code>, <em>optional</em>): A string dump of a dictionary of key/value pair to be added to the MLflow run as tags. Example: os.environ[‘MLFLOW_TAGS’]=’{“release.candidate”: “RC1”, “release.version”: “2.2.0”}’ MLFLOW_NESTED_RUN (<code>str</code>, <em>optional</em>): Whether to use MLflow nested runs. If set to <code>True</code> or <em>1</em>, will create a nested run inside the current run. MLFLOW_RUN_ID (<code>str</code>, <em>optional</em>): Allow to reattach to an existing run which can be usefull when resuming training from a checkpoint. When MLFLOW_RUN_ID environment variable is set, start_run attempts to resume a run with the specified run ID and other parameters are ignored. MLFLOW_FLATTEN_PARAMS (<code>str</code>, <em>optional</em>): Whether to flatten the parameters dictionary before logging. Default to <code>False</code>.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.integrations.AzureMLCallback"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.integrations.</span><span class="font-semibold">AzureMLCallback</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.integrations.AzureMLCallback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.integrations.AzureMLCallback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/integrations.py#L842" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">azureml_run<span class="opacity-60"> = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A <a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a> that sends the logs to <a href="https://pypi.org/project/azureml-sdk/" rel="nofollow">AzureML</a>.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.integrations.CodeCarbonCallback"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.integrations.</span><span class="font-semibold">CodeCarbonCallback</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.integrations.CodeCarbonCallback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.integrations.CodeCarbonCallback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/integrations.py#L1273" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A <a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a> that tracks the CO2 emission of training.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.integrations.NeptuneCallback"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.integrations.</span><span class="font-semibold">NeptuneCallback</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.integrations.NeptuneCallback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.integrations.NeptuneCallback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/integrations.py#L1012" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">api_token<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">project<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">name<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">base_namespace<span class="opacity-60">: str = &#39;finetuning&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">run<span class="opacity-60">: typing.Optional[ForwardRef(&#39;Run&#39;)] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">log_parameters<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">log_checkpoints<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**neptune_run_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.integrations.NeptuneCallback.api_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.integrations.NeptuneCallback.api_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>api_token</strong> (<code>str</code>, optional) &#x2014; Neptune API token obtained upon registration. You can leave this argument out if you have saved your token to the <code>NEPTUNE_API_TOKEN</code> environment variable (strongly recommended). See full setup instructions in the <a href="https://docs.neptune.ai/getting-started/installation" rel="nofollow">docs</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.integrations.NeptuneCallback.project" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.integrations.NeptuneCallback.project"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>project</strong> (<code>str</code>, optional) &#x2014; Name of an existing Neptune project, in the form: &#x201C;workspace-name/project-name&#x201D;. You can find and copy the name from the project Settings -&gt; Properties in Neptune. If None (default), the value of the <code>NEPTUNE_PROJECT</code> environment variable will be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.integrations.NeptuneCallback.name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.integrations.NeptuneCallback.name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>name</strong> (<code>str</code>, optional) &#x2014; Custom name for the run.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.integrations.NeptuneCallback.base_namespace" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.integrations.NeptuneCallback.base_namespace"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>base_namespace</strong> (<code>str</code>, optional, defaults to &#x201C;finetuning&#x201D;) &#x2014; In the Neptune run, the root namespace that will contain all of the logged metadata.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.integrations.NeptuneCallback.log_parameters" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.integrations.NeptuneCallback.log_parameters"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>log_parameters</strong> (<code>bool</code>, optional, defaults to True) &#x2014; If True, logs all Trainer arguments and model parameters provided by the Trainer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.integrations.NeptuneCallback.log_checkpoints" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.integrations.NeptuneCallback.log_checkpoints"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>log_checkpoints</strong> (<code>str</code>, optional, defaults to None) &#x2014; If &#x201C;same&#x201D;, uploads checkpoints whenever they are saved by the Trainer. If &#x201C;last&#x201D;, uploads only the most recently saved checkpoint. If &#x201C;best&#x201D;, uploads the best checkpoint (among the ones saved by the Trainer). If None, does not upload checkpoints.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.integrations.NeptuneCallback.run" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.integrations.NeptuneCallback.run"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>run</strong> (<code>Run</code>, optional) &#x2014; Pass a Neptune run object if you want to continue logging to an existing run. Read more about resuming runs in the <a href="https://docs.neptune.ai/how-to-guides/neptune-api/resume-run" rel="nofollow">docs</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.integrations.NeptuneCallback.*neptune_run_kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.integrations.NeptuneCallback.*neptune_run_kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START -->*<strong>*neptune_run_kwargs</strong> (optional) &#x2014; Additional keyword arguments to be passed directly to the <a href="https://docs.neptune.ai/api-reference/neptune#.init_run" rel="nofollow">neptune.init_run()</a> function when a new run is created.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>TrainerCallback that sends the logs to <a href="https://neptune.ai" rel="nofollow">Neptune</a>.</p></div> <h2 class="relative group"><a id="transformers.TrainerCallback" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerCallback"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TrainerCallback </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerCallback"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TrainerCallback</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TrainerCallback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerCallback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L159" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerCallback.args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerCallback.args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>) &#x2014; The training arguments used to instantiate the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerCallback.state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerCallback.state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>state</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerState">TrainerState</a>) &#x2014; The current state of the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerCallback.control" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerCallback.control"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>control</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerControl">TrainerControl</a>) &#x2014; The object that is returned to the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> and can be used to make some decisions.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerCallback.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerCallback.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <code>torch.nn.Module</code>) &#x2014; The model being trained.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerCallback.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerCallback.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer used for encoding the data.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerCallback.optimizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerCallback.optimizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>optimizer</strong> (<code>torch.optim.Optimizer</code>) &#x2014; The optimizer used for the training steps.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerCallback.lr_scheduler" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerCallback.lr_scheduler"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>lr_scheduler</strong> (<code>torch.optim.lr_scheduler.LambdaLR</code>) &#x2014; The scheduler used for setting the learning rate.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerCallback.train_dataloader" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerCallback.train_dataloader"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>train_dataloader</strong> (<code>torch.utils.data.DataLoader</code>, <em>optional</em>) &#x2014; The current dataloader used for training.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerCallback.eval_dataloader" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerCallback.eval_dataloader"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eval_dataloader</strong> (<code>torch.utils.data.DataLoader</code>, <em>optional</em>) &#x2014; The current dataloader used for training.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerCallback.metrics" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerCallback.metrics"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>metrics</strong> (<code>Dict[str, float]</code>) &#x2014; The metrics computed by the last evaluation phase.</p> <p>Those are only accessible in the event <code>on_evaluate</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerCallback.logs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerCallback.logs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logs</strong> (<code>Dict[str, float]</code>) &#x2014; The values to log.</p> <p>Those are only accessible in the event <code>on_log</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>A class for objects that will inspect the state of the training loop at some events and take some decisions. At each of those events the following arguments are available:</p> <p>The <code>control</code> object is the only one that can be changed by the callback, in which case the event that changes it should return the modified version.</p> <p>The argument <code>args</code>, <code>state</code> and <code>control</code> are positionals for all events, all the others are grouped in <code>kwargs</code>. You can unpack the ones you need in the signature of the event using them. As an example, see the code of the simple <code>~transformer.PrinterCallback</code>.</p> <div class="relative group rounded-md"><a id="transformers.TrainerCallback.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerCallback.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">class</span> <span class="hljs-title class_">PrinterCallback</span>(<span class="hljs-title class_ inherited__">TrainerCallback</span>): <span class="hljs-keyword">def</span> <span class="hljs-title function_">on_log</span>(<span class="hljs-params">self, args, state, control, logs=<span class="hljs-literal">None</span>, **kwargs</span>): _ = logs.pop(<span class="hljs-string">&quot;total_flos&quot;</span>, <span class="hljs-literal">None</span>) <span class="hljs-keyword">if</span> state.is_local_process_zero: <span class="hljs-built_in">print</span>(logs)<!-- HTML_TAG_END --></pre></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerCallback.on_epoch_begin"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>on_epoch_begin</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainerCallback.on_epoch_begin" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerCallback.on_epoch_begin"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L227" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60">: TrainerState</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">control<span class="opacity-60">: TrainerControl</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Event called at the beginning of an epoch.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerCallback.on_epoch_end"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>on_epoch_end</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainerCallback.on_epoch_end" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerCallback.on_epoch_end"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L233" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60">: TrainerState</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">control<span class="opacity-60">: TrainerControl</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Event called at the end of an epoch.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerCallback.on_evaluate"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>on_evaluate</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainerCallback.on_evaluate" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerCallback.on_evaluate"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L259" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60">: TrainerState</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">control<span class="opacity-60">: TrainerControl</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Event called after an evaluation phase.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerCallback.on_init_end"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>on_init_end</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainerCallback.on_init_end" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerCallback.on_init_end"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L209" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60">: TrainerState</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">control<span class="opacity-60">: TrainerControl</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Event called at the end of the initialization of the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerCallback.on_log"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>on_log</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainerCallback.on_log" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerCallback.on_log"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L277" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60">: TrainerState</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">control<span class="opacity-60">: TrainerControl</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Event called after logging the last logs.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerCallback.on_predict"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>on_predict</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainerCallback.on_predict" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerCallback.on_predict"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L265" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60">: TrainerState</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">control<span class="opacity-60">: TrainerControl</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">metrics<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Event called after a successful prediction.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerCallback.on_prediction_step"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>on_prediction_step</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainerCallback.on_prediction_step" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerCallback.on_prediction_step"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L283" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60">: TrainerState</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">control<span class="opacity-60">: TrainerControl</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Event called after a prediction step.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerCallback.on_save"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>on_save</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainerCallback.on_save" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerCallback.on_save"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L271" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60">: TrainerState</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">control<span class="opacity-60">: TrainerControl</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Event called after a checkpoint save.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerCallback.on_step_begin"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>on_step_begin</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainerCallback.on_step_begin" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerCallback.on_step_begin"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L239" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60">: TrainerState</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">control<span class="opacity-60">: TrainerControl</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Event called at the beginning of a training step. If using gradient accumulation, one training step might take several inputs.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerCallback.on_step_end"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>on_step_end</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainerCallback.on_step_end" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerCallback.on_step_end"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L252" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60">: TrainerState</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">control<span class="opacity-60">: TrainerControl</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Event called at the end of a training step. If using gradient accumulation, one training step might take several inputs.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerCallback.on_substep_end"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>on_substep_end</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainerCallback.on_substep_end" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerCallback.on_substep_end"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L246" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60">: TrainerState</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">control<span class="opacity-60">: TrainerControl</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Event called at the end of an substep during gradient accumulation.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerCallback.on_train_begin"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>on_train_begin</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainerCallback.on_train_begin" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerCallback.on_train_begin"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L215" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60">: TrainerState</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">control<span class="opacity-60">: TrainerControl</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Event called at the beginning of training.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerCallback.on_train_end"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>on_train_end</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainerCallback.on_train_end" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerCallback.on_train_end"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L221" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60">: TrainerState</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">control<span class="opacity-60">: TrainerControl</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Event called at the end of training.</p></div></div> <p>Here is an example of how to register a custom callback with the PyTorch <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">class</span> <span class="hljs-title class_">MyCallback</span>(<span class="hljs-title class_ inherited__">TrainerCallback</span>): <span class="hljs-string">&quot;A callback that prints a message at the beginning of training&quot;</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">on_train_begin</span>(<span class="hljs-params">self, args, state, control, **kwargs</span>): <span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Starting training&quot;</span>) trainer = Trainer( model, args, train_dataset=train_dataset, eval_dataset=eval_dataset, callbacks=[MyCallback], <span class="hljs-comment"># We can either pass the callback class this way or an instance of it (MyCallback())</span> )<!-- HTML_TAG_END --></pre></div> <p>Another way to register a callback is to call <code>trainer.add_callback()</code> as follows:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->trainer = Trainer(...) trainer.add_callback(MyCallback) <span class="hljs-comment"># Alternatively, we can pass an instance of the callback class</span> trainer.add_callback(MyCallback())<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="transformers.TrainerState" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerState"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TrainerState </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerState"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TrainerState</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TrainerState" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerState"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L35" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">epoch<span class="opacity-60">: typing.Optional[float] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">global_step<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_steps<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_train_epochs<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">total_flos<span class="opacity-60">: float = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">log_history<span class="opacity-60">: typing.List[typing.Dict[str, float]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">best_metric<span class="opacity-60">: typing.Optional[float] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">best_model_checkpoint<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_local_process_zero<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_world_process_zero<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_hyper_param_search<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">trial_name<span class="opacity-60">: str = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">trial_params<span class="opacity-60">: typing.Dict[str, typing.Union[str, float, int, bool]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerState.epoch" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerState.epoch"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>epoch</strong> (<code>float</code>, <em>optional</em>) &#x2014; Only set during training, will represent the epoch the training is at (the decimal part being the percentage of the current epoch completed).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerState.global_step" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerState.global_step"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>global_step</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; During training, represents the number of update steps completed.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerState.max_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerState.max_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The number of update steps to do during the current training.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerState.total_flos" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerState.total_flos"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>total_flos</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The total number of floating operations done by the model since the beginning of training (stored as floats to avoid overflow).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerState.log_history" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerState.log_history"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>log_history</strong> (<code>List[Dict[str, float]]</code>, <em>optional</em>) &#x2014; The list of logs done since the beginning of training.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerState.best_metric" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerState.best_metric"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>best_metric</strong> (<code>float</code>, <em>optional</em>) &#x2014; When tracking the best model, the value of the best metric encountered so far.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerState.best_model_checkpoint" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerState.best_model_checkpoint"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>best_model_checkpoint</strong> (<code>str</code>, <em>optional</em>) &#x2014; When tracking the best model, the value of the name of the checkpoint for the best model encountered so far.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerState.is_local_process_zero" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerState.is_local_process_zero"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_local_process_zero</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several machines) main process.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerState.is_world_process_zero" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerState.is_world_process_zero"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_world_process_zero</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not this process is the global main process (when training in a distributed fashion on several machines, this is only going to be <code>True</code> for one process).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerState.is_hyper_param_search" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerState.is_hyper_param_search"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_hyper_param_search</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether we are in the process of a hyper parameter search using Trainer.hyperparameter_search. This will impact the way data will be logged in TensorBoard.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>A class containing the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> inner state that will be saved along the model and optimizer when checkpointing and passed to the <a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a>.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>In all this class, one step is to be understood as one update step. When using gradient accumulation, one update step may require several forward and backward passes: if you use <code>gradient_accumulation_steps=n</code>, then one update step requires going through <em>n</em> batches.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerState.load_from_json"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>load_from_json</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainerState.load_from_json" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerState.load_from_json"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L101" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">json_path<span class="opacity-60">: str</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Create an instance from the content of <code>json_path</code>.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerState.save_to_json"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_to_json</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainerState.save_to_json" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerState.save_to_json"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L95" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">json_path<span class="opacity-60">: str</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Save the content of this instance in JSON format inside <code>json_path</code>.</p></div></div> <h2 class="relative group"><a id="transformers.TrainerControl" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerControl"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TrainerControl </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerControl"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TrainerControl</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TrainerControl" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerControl"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L110" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">should_training_stop<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">should_epoch_stop<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">should_save<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">should_evaluate<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">should_log<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerControl.should_training_stop" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerControl.should_training_stop"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>should_training_stop</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the training should be interrupted.</p> <p>If <code>True</code>, this variable will not be set back to <code>False</code>. The training will just stop.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerControl.should_epoch_stop" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerControl.should_epoch_stop"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>should_epoch_stop</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the current epoch should be interrupted.</p> <p>If <code>True</code>, this variable will be set back to <code>False</code> at the beginning of the next epoch.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerControl.should_save" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerControl.should_save"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>should_save</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the model should be saved at this step.</p> <p>If <code>True</code>, this variable will be set back to <code>False</code> at the beginning of the next step.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerControl.should_evaluate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerControl.should_evaluate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>should_evaluate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the model should be evaluated at this step.</p> <p>If <code>True</code>, this variable will be set back to <code>False</code> at the beginning of the next step.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerControl.should_log" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerControl.should_log"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>should_log</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the logs should be reported at this step.</p> <p>If <code>True</code>, this variable will be set back to <code>False</code> at the beginning of the next step.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>A class that handles the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> control flow. This class is used by the <a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a> to activate some switches in the training loop.</p></div> <script type="module" data-hydrate="tvd650"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="tvd650"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/main_classes/callback.mdx-hf-doc-builder.js") ], params: {} } }); </script>
44
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/main_classes/feature_extractor.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;feature-extractor&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.FeatureExtractionMixin&quot;,&quot;title&quot;:&quot;FeatureExtractionMixin&quot;},{&quot;local&quot;:&quot;transformers.SequenceFeatureExtractor&quot;,&quot;title&quot;:&quot;SequenceFeatureExtractor&quot;},{&quot;local&quot;:&quot;transformers.BatchFeature&quot;,&quot;title&quot;:&quot;BatchFeature&quot;},{&quot;local&quot;:&quot;transformers.ImageFeatureExtractionMixin&quot;,&quot;title&quot;:&quot;ImageFeatureExtractionMixin&quot;}],&quot;title&quot;:&quot;Feature Extractor&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/main_classes/feature_extractor.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Docstring-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/ExampleCodeBlock-hf-doc-builder.js"> <h1 class="relative group"><a id="feature-extractor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#feature-extractor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Feature Extractor </span></h1> <p>A feature extractor is in charge of preparing input features for audio or vision models. This includes feature extraction from sequences, <em>e.g.</em>, pre-processing audio files to Log-Mel Spectrogram features, feature extraction from images <em>e.g.</em> cropping image image files, but also padding, normalization, and conversion to Numpy, PyTorch, and TensorFlow tensors.</p> <h2 class="relative group"><a id="transformers.FeatureExtractionMixin" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionMixin"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FeatureExtractionMixin </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FeatureExtractionMixin"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FeatureExtractionMixin</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FeatureExtractionMixin" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FeatureExtractionMixin"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/feature_extraction_utils.py#L198" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>This is a feature extraction mixin used to provide saving/loading functionality for sequential and image feature extractors.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FeatureExtractionMixin.from_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.FeatureExtractionMixin.from_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FeatureExtractionMixin.from_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/feature_extraction_utils.py#L222" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pretrained_model_name_or_path<span class="opacity-60">: typing.Union[str, os.PathLike]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionMixin.from_pretrained.pretrained_model_name_or_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionMixin.from_pretrained.pretrained_model_name_or_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; This can be either:</p> <ul> <li>a string, the <em>model id</em> of a pretrained feature_extractor hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>a path to a <em>directory</em> containing a feature extractor file saved using the <a href="/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.save_pretrained">save_pretrained()</a> method, e.g., <code>./my_model_directory/</code>.</li> <li>a path or url to a saved feature extractor JSON <em>file</em>, e.g., <code>./my_model_directory/preprocessor_config.json</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionMixin.from_pretrained.cache_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionMixin.from_pretrained.cache_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cache_dir</strong> (<code>str</code> or <code>os.PathLike</code>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model feature extractor should be cached if the standard cache should not be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionMixin.from_pretrained.force_download" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionMixin.from_pretrained.force_download"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to force to (re-)download the feature extractor files and override the cached versions if they exist.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionMixin.from_pretrained.resume_download" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionMixin.from_pretrained.resume_download"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>resume_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionMixin.from_pretrained.proxies" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionMixin.from_pretrained.proxies"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>proxies</strong> (<code>Dict[str, str]</code>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <code>{&apos;http&apos;: &apos;foo.bar:3128&apos;, &apos;http://hostname&apos;: &apos;foo.bar:4012&apos;}.</code> The proxies are used on each request.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionMixin.from_pretrained.use_auth_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionMixin.from_pretrained.use_auth_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_auth_token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>huggingface-cli login</code> (stored in <code>~/.huggingface</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionMixin.from_pretrained.revision" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionMixin.from_pretrained.revision"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;main&quot;</code>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <code>revision</code> can be any identifier allowed by git.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionMixin.from_pretrained.return_unused_kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionMixin.from_pretrained.return_unused_kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_unused_kwargs</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If <code>False</code>, then this function returns just the final feature extractor object. If <code>True</code>, then this functions returns a <code>Tuple(feature_extractor, unused_kwargs)</code> where <em>unused_kwargs</em> is a dictionary consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of <code>kwargs</code> which has not been used to update <code>feature_extractor</code> and is otherwise ignored.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionMixin.from_pretrained.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionMixin.from_pretrained.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (<code>Dict[str, Any]</code>, <em>optional</em>) &#x2014; The values in kwargs of any keys which are feature extractor attributes will be used to override the loaded values. Behavior concerning key/value pairs whose keys are <em>not</em> feature extractor attributes is controlled by the <code>return_unused_kwargs</code> keyword parameter.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Instantiate a type of <a href="/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin">FeatureExtractionMixin</a> from a feature extractor, <em>e.g.</em> a derived class of <a href="/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.SequenceFeatureExtractor">SequenceFeatureExtractor</a>.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Passing <code>use_auth_token=True</code> is required when you want to use a private model.</p></div> <div class="relative group rounded-md"><a id="transformers.FeatureExtractionMixin.from_pretrained.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionMixin.from_pretrained.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment"># We can&#x27;t instantiate directly the base class *FeatureExtractionMixin* nor *SequenceFeatureExtractor* so let&#x27;s show the examples on a</span> <span class="hljs-comment"># derived class: *Wav2Vec2FeatureExtractor*</span> feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( <span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span> ) <span class="hljs-comment"># Download feature_extraction_config from huggingface.co and cache.</span> feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( <span class="hljs-string">&quot;./test/saved_model/&quot;</span> ) <span class="hljs-comment"># E.g. feature_extractor (or model) was saved using *save_pretrained(&#x27;./test/saved_model/&#x27;)*</span> feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&quot;./test/saved_model/preprocessor_config.json&quot;</span>) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( <span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>, return_attention_mask=<span class="hljs-literal">False</span>, foo=<span class="hljs-literal">False</span> ) <span class="hljs-keyword">assert</span> feature_extractor.return_attention_mask <span class="hljs-keyword">is</span> <span class="hljs-literal">False</span> feature_extractor, unused_kwargs = Wav2Vec2FeatureExtractor.from_pretrained( <span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>, return_attention_mask=<span class="hljs-literal">False</span>, foo=<span class="hljs-literal">False</span>, return_unused_kwargs=<span class="hljs-literal">True</span> ) <span class="hljs-keyword">assert</span> feature_extractor.return_attention_mask <span class="hljs-keyword">is</span> <span class="hljs-literal">False</span> <span class="hljs-keyword">assert</span> unused_kwargs == {<span class="hljs-string">&quot;foo&quot;</span>: <span class="hljs-literal">False</span>}<!-- HTML_TAG_END --></pre></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FeatureExtractionMixin.save_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.FeatureExtractionMixin.save_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FeatureExtractionMixin.save_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/feature_extraction_utils.py#L306" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_directory<span class="opacity-60">: typing.Union[str, os.PathLike]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionMixin.save_pretrained.save_directory" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionMixin.save_pretrained.save_directory"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Directory where the feature extractor JSON file will be saved (will be created if it does not exist).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionMixin.save_pretrained.push_to_hub" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionMixin.save_pretrained.push_to_hub"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with <code>repo_id</code> (will default to the name of <code>save_directory</code> in your namespace). kwargs &#x2014; Additional key word arguments passed along to the <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.push_to_hub">push_to_hub()</a> method.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Save a feature_extractor object to the directory <code>save_directory</code>, so that it can be re-loaded using the <a href="/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.from_pretrained">from_pretrained()</a> class method.</p></div></div> <h2 class="relative group"><a id="transformers.SequenceFeatureExtractor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SequenceFeatureExtractor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>SequenceFeatureExtractor </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SequenceFeatureExtractor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">SequenceFeatureExtractor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.SequenceFeatureExtractor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SequenceFeatureExtractor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/feature_extraction_sequence_utils.py#L30" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">feature_size<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sampling_rate<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding_value<span class="opacity-60">: float</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SequenceFeatureExtractor.feature_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SequenceFeatureExtractor.feature_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>feature_size</strong> (<code>int</code>) &#x2014; The feature dimension of the extracted features.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SequenceFeatureExtractor.sampling_rate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SequenceFeatureExtractor.sampling_rate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sampling_rate</strong> (<code>int</code>) &#x2014; The sampling rate at which the audio files should be digitalized expressed in Hertz per second (Hz).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SequenceFeatureExtractor.padding_value" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SequenceFeatureExtractor.padding_value"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding_value</strong> (<code>float</code>) &#x2014; The value that is used to fill the padding values / vectors.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This is a general feature extraction class for speech recognition.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SequenceFeatureExtractor.pad"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>pad</span></h4><!-- HTML_TAG_END --> <a id="transformers.SequenceFeatureExtractor.pad" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SequenceFeatureExtractor.pad"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/feature_extraction_sequence_utils.py#L53" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">processed_features<span class="opacity-60">: typing.Union[transformers.feature_extraction_utils.BatchFeature, typing.List[transformers.feature_extraction_utils.BatchFeature], typing.Dict[str, transformers.feature_extraction_utils.BatchFeature], typing.Dict[str, typing.List[transformers.feature_extraction_utils.BatchFeature]], typing.List[typing.Dict[str, transformers.feature_extraction_utils.BatchFeature]]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.utils.generic.PaddingStrategy] = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">truncation<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_attention_mask<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.utils.generic.TensorType, NoneType] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SequenceFeatureExtractor.pad.processed_features" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SequenceFeatureExtractor.pad.processed_features"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>processed_features</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.BatchFeature">BatchFeature</a>, list of <a href="/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.BatchFeature">BatchFeature</a>, <code>Dict[str, List[float]]</code>, <code>Dict[str, List[List[float]]</code> or <code>List[Dict[str, List[float]]]</code>) &#x2014; Processed inputs. Can represent one input (<a href="/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.BatchFeature">BatchFeature</a> or <code>Dict[str, List[float]]</code>) or a batch of input values / vectors (list of <a href="/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.BatchFeature">BatchFeature</a>, <em>Dict[str, List[List[float]]]</em> or <em>List[Dict[str, List[float]]]</em>) so you can use this method during preprocessing as well as in a PyTorch Dataloader collate function.</p> <p>Instead of <code>List[float]</code> you can have tensors (numpy arrays, PyTorch tensors or TensorFlow tensors), see the note above for the return type.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SequenceFeatureExtractor.pad.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SequenceFeatureExtractor.pad.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Select a strategy to pad the returned sequences (according to the model&#x2019;s padding side and padding index) among:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SequenceFeatureExtractor.pad.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SequenceFeatureExtractor.pad.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Maximum length of the returned list and optionally padding length (see above).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SequenceFeatureExtractor.pad.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SequenceFeatureExtractor.pad.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>) &#x2014; Activates truncation to cut input sequences longer than <code>max_length</code> to <code>max_length</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SequenceFeatureExtractor.pad.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SequenceFeatureExtractor.pad.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value.</p> <p>This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability</p> <blockquote> <p>= 7.5 (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.</p> </blockquote><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SequenceFeatureExtractor.pad.return_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SequenceFeatureExtractor.pad.return_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific feature_extractor&#x2019;s default.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SequenceFeatureExtractor.pad.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SequenceFeatureExtractor.pad.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Pad input values / input vectors or a batch of input values / input vectors up to predefined length or to the max sequence length in the batch.</p> <p>Padding side (left/right) padding values are defined at the feature extractor level (with <code>self.padding_side</code>, <code>self.padding_value</code>)</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If the <code>processed_features</code> passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the result will use the same type unless you provide a different tensor type with <code>return_tensors</code>. In the case of PyTorch tensors, you will lose the specific device of your tensors however.</p></div></div></div> <h2 class="relative group"><a id="transformers.BatchFeature" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchFeature"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BatchFeature </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchFeature"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">BatchFeature</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.BatchFeature" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchFeature"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/feature_extraction_utils.py#L56" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data<span class="opacity-60">: typing.Union[typing.Dict[str, typing.Any], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tensor_type<span class="opacity-60">: typing.Union[NoneType, str, transformers.utils.generic.TensorType] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchFeature.data" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchFeature.data"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>data</strong> (<code>dict</code>) &#x2014; Dictionary of lists/arrays/tensors returned by the <strong>call</strong>/pad methods (&#x2018;input_values&#x2019;, &#x2018;attention_mask&#x2019;, etc.).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchFeature.tensor_type" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchFeature.tensor_type"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tensor_type</strong> (<code>Union[None, str, TensorType]</code>, <em>optional</em>) &#x2014; You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at initialization.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Holds the output of the <a href="/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.SequenceFeatureExtractor.pad">pad()</a> and feature extractor specific <code>__call__</code> methods.</p> <p>This class is derived from a python dictionary and can be used as a dictionary.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchFeature.convert_to_tensors"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>convert_to_tensors</span></h4><!-- HTML_TAG_END --> <a id="transformers.BatchFeature.convert_to_tensors" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchFeature.convert_to_tensors"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/feature_extraction_utils.py#L110" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tensor_type<span class="opacity-60">: typing.Union[str, transformers.utils.generic.TensorType, NoneType] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchFeature.convert_to_tensors.tensor_type" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchFeature.convert_to_tensors.tensor_type"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tensor_type</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; The type of tensors to use. If <code>str</code>, should be one of the values of the enum <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>. If <code>None</code>, no modification is done.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Convert the inner content to tensors.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchFeature.to"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to</span></h4><!-- HTML_TAG_END --> <a id="transformers.BatchFeature.to" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchFeature.to"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/feature_extraction_utils.py#L175" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">device<span class="opacity-60">: typing.Union[str, ForwardRef(&#39;torch.device&#39;)]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchFeature.to.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchFeature.to.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>str</code> or <code>torch.device</code>) &#x2014; The device to put the tensors on.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.BatchFeature.to.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The same instance after modification.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Send all values to device by calling <code>v.to(device)</code> (PyTorch only).</p></div></div> <h2 class="relative group"><a id="transformers.ImageFeatureExtractionMixin" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ImageFeatureExtractionMixin </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageFeatureExtractionMixin"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ImageFeatureExtractionMixin</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ImageFeatureExtractionMixin" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageFeatureExtractionMixin"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/image_utils.py#L78" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Mixin that contain utilities for preparing image features.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageFeatureExtractionMixin.center_crop"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>center_crop</span></h4><!-- HTML_TAG_END --> <a id="transformers.ImageFeatureExtractionMixin.center_crop" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageFeatureExtractionMixin.center_crop"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/image_utils.py#L304" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">size<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>new_image</span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.center_crop.image" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.center_crop.image"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image</strong> (<code>PIL.Image.Image</code> or <code>np.ndarray</code> or <code>torch.Tensor</code> of shape (n_channels, height, width) or (height, width, n_channels)) &#x2014; The image to resize.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.center_crop.size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.center_crop.size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>size</strong> (<code>int</code> or <code>Tuple[int, int]</code>) &#x2014; The size to which crop the image.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.ImageFeatureExtractionMixin.center_crop.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>new_image</p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A center cropped <code>PIL.Image.Image</code> or <code>np.ndarray</code> or <code>torch.Tensor</code> of shape: (n_channels, height, width).</p> <!-- HTML_TAG_END --></p> </div></div> <p>Crops <code>image</code> to the given size using a center crop. Note that if the image is too small to be cropped to the size given, it will be padded (so the returned result has the size asked).</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageFeatureExtractionMixin.convert_rgb"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>convert_rgb</span></h4><!-- HTML_TAG_END --> <a id="transformers.ImageFeatureExtractionMixin.convert_rgb" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageFeatureExtractionMixin.convert_rgb"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/image_utils.py#L120" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.convert_rgb.image" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.convert_rgb.image"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image</strong> (<code>PIL.Image.Image</code>) &#x2014; The image to convert.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Converts <code>PIL.Image.Image</code> to RGB format.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageFeatureExtractionMixin.expand_dims"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>expand_dims</span></h4><!-- HTML_TAG_END --> <a id="transformers.ImageFeatureExtractionMixin.expand_dims" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageFeatureExtractionMixin.expand_dims"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/image_utils.py#L173" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.expand_dims.image" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.expand_dims.image"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image</strong> (<code>PIL.Image.Image</code> or <code>np.ndarray</code> or <code>torch.Tensor</code>) &#x2014; The image to expand.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Expands 2-dimensional <code>image</code> to 3 dimensions.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageFeatureExtractionMixin.flip_channel_order"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>flip_channel_order</span></h4><!-- HTML_TAG_END --> <a id="transformers.ImageFeatureExtractionMixin.flip_channel_order" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageFeatureExtractionMixin.flip_channel_order"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/image_utils.py#L379" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.flip_channel_order.image" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.flip_channel_order.image"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image</strong> (<code>PIL.Image.Image</code> or <code>np.ndarray</code> or <code>torch.Tensor</code>) &#x2014; The image whose color channels to flip. If <code>np.ndarray</code> or <code>torch.Tensor</code>, the channel dimension should be first.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Flips the channel order of <code>image</code> from RGB to BGR, or vice versa. Note that this will trigger a conversion of <code>image</code> to a NumPy array if it’s a PIL Image.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageFeatureExtractionMixin.normalize"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>normalize</span></h4><!-- HTML_TAG_END --> <a id="transformers.ImageFeatureExtractionMixin.normalize" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageFeatureExtractionMixin.normalize"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/image_utils.py#L193" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mean<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">std<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">rescale<span class="opacity-60"> = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.normalize.image" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.normalize.image"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image</strong> (<code>PIL.Image.Image</code> or <code>np.ndarray</code> or <code>torch.Tensor</code>) &#x2014; The image to normalize.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.normalize.mean" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.normalize.mean"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mean</strong> (<code>List[float]</code> or <code>np.ndarray</code> or <code>torch.Tensor</code>) &#x2014; The mean (per channel) to use for normalization.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.normalize.std" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.normalize.std"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>std</strong> (<code>List[float]</code> or <code>np.ndarray</code> or <code>torch.Tensor</code>) &#x2014; The standard deviation (per channel) to use for normalization.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.normalize.rescale" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.normalize.rescale"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>rescale</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to rescale the image to be between 0 and 1. If a PIL image is provided, scaling will happen automatically.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Normalizes <code>image</code> with <code>mean</code> and <code>std</code>. Note that this will trigger a conversion of <code>image</code> to a NumPy array if it’s a PIL Image.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageFeatureExtractionMixin.rescale"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>rescale</span></h4><!-- HTML_TAG_END --> <a id="transformers.ImageFeatureExtractionMixin.rescale" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageFeatureExtractionMixin.rescale"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/image_utils.py#L134" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scale<span class="opacity-60">: typing.Union[float, int]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Rescale a numpy image by scale amount</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageFeatureExtractionMixin.resize"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>resize</span></h4><!-- HTML_TAG_END --> <a id="transformers.ImageFeatureExtractionMixin.resize" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageFeatureExtractionMixin.resize"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/image_utils.py#L239" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">size<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">resample<span class="opacity-60"> = &lt;Resampling.BILINEAR: 2&gt;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">default_to_square<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_size<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>image</span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.resize.image" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.resize.image"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image</strong> (<code>PIL.Image.Image</code> or <code>np.ndarray</code> or <code>torch.Tensor</code>) &#x2014; The image to resize.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.resize.size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.resize.size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>size</strong> (<code>int</code> or <code>Tuple[int, int]</code>) &#x2014; The size to use for resizing the image. If <code>size</code> is a sequence like (h, w), output size will be matched to this.</p> <p>If <code>size</code> is an int and <code>default_to_square</code> is <code>True</code>, then image will be resized to (size, size). If <code>size</code> is an int and <code>default_to_square</code> is <code>False</code>, then smaller edge of the image will be matched to this number. i.e, if height &gt; width, then image will be rescaled to (size * height / width, size).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.resize.resample" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.resize.resample"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>resample</strong> (<code>int</code>, <em>optional</em>, defaults to <code>PIL.Image.BILINEAR</code>) &#x2014; The filter to user for resampling.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.resize.default_to_square" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.resize.default_to_square"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>default_to_square</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; How to convert <code>size</code> when it is a single int. If set to <code>True</code>, the <code>size</code> will be converted to a square (<code>size</code>,<code>size</code>). If set to <code>False</code>, will replicate <a href="https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize" rel="nofollow"><code>torchvision.transforms.Resize</code></a> with support for resizing only the smallest edge and providing an optional <code>max_size</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.resize.max_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.resize.max_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_size</strong> (<code>int</code>, <em>optional</em>, defaults to <code>None</code>) &#x2014; The maximum allowed for the longer edge of the resized image: if the longer edge of the image is greater than <code>max_size</code> after being resized according to <code>size</code>, then the image is resized again so that the longer edge is equal to <code>max_size</code>. As a result, <code>size</code> might be overruled, i.e the smaller edge may be shorter than <code>size</code>. Only used if <code>default_to_square</code> is <code>False</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.ImageFeatureExtractionMixin.resize.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>image</p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A resized <code>PIL.Image.Image</code>.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Resizes <code>image</code>. Enforces conversion of input to PIL.Image.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageFeatureExtractionMixin.rotate"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>rotate</span></h4><!-- HTML_TAG_END --> <a id="transformers.ImageFeatureExtractionMixin.rotate" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageFeatureExtractionMixin.rotate"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/image_utils.py#L396" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">angle<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">resample<span class="opacity-60"> = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">expand<span class="opacity-60"> = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">center<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">translate<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fillcolor<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>image</span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.rotate.image" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.rotate.image"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image</strong> (<code>PIL.Image.Image</code> or <code>np.ndarray</code> or <code>torch.Tensor</code>) &#x2014; The image to rotate. If <code>np.ndarray</code> or <code>torch.Tensor</code>, will be converted to <code>PIL.Image.Image</code> before rotating.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.ImageFeatureExtractionMixin.rotate.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>image</p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A rotated <code>PIL.Image.Image</code>.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Returns a rotated copy of <code>image</code>. This method returns a copy of <code>image</code>, rotated the given number of degrees counter clockwise around its centre.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageFeatureExtractionMixin.to_numpy_array"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_numpy_array</span></h4><!-- HTML_TAG_END --> <a id="transformers.ImageFeatureExtractionMixin.to_numpy_array" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageFeatureExtractionMixin.to_numpy_array"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/image_utils.py#L141" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">rescale<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">channel_first<span class="opacity-60"> = True</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.to_numpy_array.image" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.to_numpy_array.image"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image</strong> (<code>PIL.Image.Image</code> or <code>np.ndarray</code> or <code>torch.Tensor</code>) &#x2014; The image to convert to a NumPy array.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.to_numpy_array.rescale" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.to_numpy_array.rescale"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>rescale</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.). Will default to <code>True</code> if the image is a PIL Image or an array/tensor of integers, <code>False</code> otherwise.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.to_numpy_array.channel_first" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.to_numpy_array.channel_first"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>channel_first</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to permute the dimensions of the image to put the channel dimension first.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Converts <code>image</code> to a numpy array. Optionally rescales it and puts the channel dimension as the first dimension.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageFeatureExtractionMixin.to_pil_image"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_pil_image</span></h4><!-- HTML_TAG_END --> <a id="transformers.ImageFeatureExtractionMixin.to_pil_image" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageFeatureExtractionMixin.to_pil_image"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/image_utils.py#L90" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">rescale<span class="opacity-60"> = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.to_pil_image.image" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.to_pil_image.image"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image</strong> (<code>PIL.Image.Image</code> or <code>numpy.ndarray</code> or <code>torch.Tensor</code>) &#x2014; The image to convert to the PIL Image format.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.to_pil_image.rescale" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.to_pil_image.rescale"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>rescale</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will default to <code>True</code> if the image type is a floating type, <code>False</code> otherwise.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Converts <code>image</code> to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if needed.</p></div></div> <script type="module" data-hydrate="1tr620f"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="1tr620f"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/main_classes/feature_extractor.mdx-hf-doc-builder.js") ], params: {} } }); </script>
45
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/main_classes/text_generation.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;generation&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.generation_utils.GenerationMixin&quot;,&quot;title&quot;:&quot;GenerationMixin&quot;},{&quot;local&quot;:&quot;transformers.generation_tf_utils.TFGenerationMixin&quot;,&quot;title&quot;:&quot;TFGenerationMixin&quot;},{&quot;local&quot;:&quot;transformers.generation_flax_utils.FlaxGenerationMixin&quot;,&quot;title&quot;:&quot;FlaxGenerationMixin&quot;}],&quot;title&quot;:&quot;Generation&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/main_classes/text_generation.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Docstring-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/ExampleCodeBlock-hf-doc-builder.js"> <h1 class="relative group"><a id="generation" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#generation"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Generation </span></h1> <p>Each framework has a generate method for auto-regressive text generation implemented in their respective <code>GenerationMixin</code> class:</p> <ul><li>PyTorch <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate">generate()</a> is implemented in <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin">GenerationMixin</a>.</li> <li>TensorFlow <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_tf_utils.TFGenerationMixin.generate">generate()</a> is implemented in <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_tf_utils.TFGenerationMixin">TFGenerationMixin</a>.</li> <li>Flax/JAX <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_flax_utils.FlaxGenerationMixin.generate">generate()</a> is implemented in <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_flax_utils.FlaxGenerationMixin">FlaxGenerationMixin</a>.</li></ul> <h2 class="relative group"><a id="transformers.generation_utils.GenerationMixin" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>GenerationMixin </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.GenerationMixin"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.generation_utils.</span><span class="font-semibold">GenerationMixin</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.generation_utils.GenerationMixin" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.GenerationMixin"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L389" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A class containing all functions for auto-regressive text generation, to be used as a mixin in <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>.</p> <p>The class exposes <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate">generate()</a>, which can be used for:</p> <ul><li><em>greedy decoding</em> by calling <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.greedy_search">greedy_search()</a> if <code>num_beams=1</code> and <code>do_sample=False</code>.</li> <li><em>multinomial sampling</em> by calling <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.sample">sample()</a> if <code>num_beams=1</code> and <code>do_sample=True</code>.</li> <li><em>beam-search decoding</em> by calling <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_search">beam_search()</a> if <code>num_beams&gt;1</code> and <code>do_sample=False</code>.</li> <li><em>beam-search multinomial sampling</em> by calling <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_sample">beam_sample()</a> if <code>num_beams&gt;1</code> and <code>do_sample=True</code>.</li> <li><em>diverse beam-search decoding</em> by calling <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.group_beam_search">group_beam_search()</a>, if <code>num_beams&gt;1</code> and <code>num_beam_groups&gt;1</code>.</li> <li><em>constrained beam-search decoding</em> by calling <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.constrained_beam_search">constrained_beam_search()</a>, if <code>constraints!=None</code> or <code>force_words_ids!=None</code>.</li></ul> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.GenerationMixin.generate"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>generate</span></h4><!-- HTML_TAG_END --> <a id="transformers.generation_utils.GenerationMixin.generate" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.GenerationMixin.generate"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L914" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_sample<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">early_stopping<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_beams<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">temperature<span class="opacity-60">: typing.Optional[float] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_k<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_p<span class="opacity-60">: typing.Optional[float] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">typical_p<span class="opacity-60">: typing.Optional[float] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repetition_penalty<span class="opacity-60">: typing.Optional[float] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bad_words_ids<span class="opacity-60">: typing.Optional[typing.Iterable[int]] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">force_words_ids<span class="opacity-60">: typing.Union[typing.Iterable[int], typing.Iterable[typing.Iterable[int]], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">length_penalty<span class="opacity-60">: typing.Optional[float] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">no_repeat_ngram_size<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_no_repeat_ngram_size<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_return_sequences<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_time<span class="opacity-60">: typing.Optional[float] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_new_tokens<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_start_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_beam_groups<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">diversity_penalty<span class="opacity-60">: typing.Optional[float] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">prefix_allowed_tokens_fn<span class="opacity-60">: typing.Union[typing.Callable[[int, torch.Tensor], typing.List[int]], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits_processor<span class="opacity-60">: typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = []</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">renormalize_logits<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stopping_criteria<span class="opacity-60">: typing.Optional[transformers.generation_stopping_criteria.StoppingCriteriaList] = []</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">constraints<span class="opacity-60">: typing.Optional[typing.List[transformers.generation_beam_constraints.Constraint]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_scores<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict_in_generate<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">forced_bos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">forced_eos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">remove_invalid_values<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">synced_gpus<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">exponential_decay_length_penalty<span class="opacity-60">: typing.Union[typing.Tuple[typing.Union[int, float]], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">suppress_tokens<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">begin_suppress_tokens<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">forced_decoder_ids<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**model_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput" >ModelOutput</a> or <code>torch.LongTensor</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>torch.Tensor</code> of varying shape depending on the modality, <em>optional</em>) &#x2014; The sequence used as a prompt for the generation or as model inputs to the encoder. If <code>None</code> the method initializes it with <code>bos_token_id</code> and a batch size of 1. For decoder-only models <code>inputs</code> should of in the format of <code>input_ids</code>. For encoder-decoder models <em>inputs</em> can represent any of <code>input_ids</code>, <code>input_values</code>, <code>input_features</code>, or <code>pixel_values</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to <code>model.config.max_length</code>) &#x2014; The maximum length the generated tokens can have. Corresponds to the length of the input prompt + <code>max_new_tokens</code>. In general, prefer the use of <code>max_new_tokens</code>, which ignores the number of tokens in the prompt.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.max_new_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.max_new_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_new_tokens</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.min_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.min_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_length</strong> (<code>int</code>, <em>optional</em>, defaults to <code>model.config.min_length</code> or 10 if the config does not set any value) &#x2014; The minimum length of the sequence to be generated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.do_sample" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.do_sample"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_sample</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>model.config.do_sample</code> or <code>False</code> if the config does not set any value) &#x2014; Whether or not to use sampling ; use greedy decoding otherwise.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.early_stopping" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.early_stopping"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>early_stopping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to stop the beam search when at least <code>num_beams</code> sentences are finished per batch or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.num_beams" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.num_beams"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_beams</strong> (<code>int</code>, <em>optional</em>, defaults to <code>model.config.num_beams</code> or 1 if the config does not set any value) &#x2014; Number of beams for beam search. 1 means no beam search.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.temperature" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.temperature"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>temperature</strong> (<code>float</code>, <em>optional</em>, defaults to <code>model.config.temperature</code> or 1.0 if the config does not set any value) &#x2014; The value used to module the next token probabilities.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.top_k" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.top_k"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to <code>model.config.top_k</code> or 50 if the config does not set any value) &#x2014; The number of highest probability vocabulary tokens to keep for top-k-filtering.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.top_p" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.top_p"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_p</strong> (<code>float</code>, <em>optional</em>, defaults to <code>model.config.top_p</code> or 1.0 if the config does not set any value) &#x2014; If set to float &lt; 1, only the smallest set of most probable tokens with probabilities that add up to <code>top_p</code> or higher are kept for generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.typical_p" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.typical_p"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>typical_p</strong> (<code>float</code>, <em>optional</em>, defaults to <code>model.config.typical_p</code> or 1.0 if the config does not set any value) &#x2014; The amount of probability mass from the original distribution to be considered in typical decoding. If set to 1.0 it takes no effect. See <a href="https://arxiv.org/pdf/2202.00666.pdf" rel="nofollow">this paper</a> for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.repetition_penalty" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.repetition_penalty"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repetition_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to <code>model.config.repetition_penalty</code> or 1.0 if the config does not set any value) &#x2014; The parameter for repetition penalty. 1.0 means no penalty. See <a href="https://arxiv.org/pdf/1909.05858.pdf" rel="nofollow">this paper</a> for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to <code>model.config.pad_token_id</code>) &#x2014; The id of the <em>padding</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.bos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.bos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to <code>model.config.bos_token_id</code>) &#x2014; The id of the <em>beginning-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to <code>model.config.eos_token_id</code>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.length_penalty" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.length_penalty"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>length_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to <code>model.config.length_penalty</code> or 1.0 if the config does not set any value) &#x2014; Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log likelihood of the sequence (i.e. negative), <code>length_penalty</code> &gt; 0.0 promotes longer sequences, while <code>length_penalty</code> &lt; 0.0 encourages shorter sequences.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.no_repeat_ngram_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.no_repeat_ngram_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>no_repeat_ngram_size</strong> (<code>int</code>, <em>optional</em>, defaults to <code>model.config.no_repeat_ngram_size</code> or 0 if the config does not set any value) &#x2014; If set to int &gt; 0, all ngrams of that size can only occur once.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.encoder_no_repeat_ngram_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.encoder_no_repeat_ngram_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_no_repeat_ngram_size</strong> (<code>int</code>, <em>optional</em>, defaults to <code>model.config.encoder_no_repeat_ngram_size</code> or 0 if the config does not set any value) &#x2014; If set to int &gt; 0, all ngrams of that size that occur in the <code>encoder_input_ids</code> cannot occur in the <code>decoder_input_ids</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.bad_words_ids(List[List[int]]," class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.bad_words_ids(List[List[int]],"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bad_words_ids(<code>List[List[int]]</code>,</strong> <em>optional</em>, defaults to <code>model.config.bad_words_ids</code>) &#x2014; List of token ids that are not allowed to be generated. In order to get the token ids of the words that should not appear in the generated text, use <code>tokenizer(bad_words, add_prefix_space=True, add_special_tokens=False).input_ids</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.force_words_ids(List[List[int]]" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.force_words_ids(List[List[int]]"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>force_words_ids(<code>List[List[int]]</code></strong> or <code>List[List[List[int]]]</code>, <em>optional</em>) &#x2014; List of token ids that must be generated. If given a <code>List[List[int]]</code>, this is treated as a simple list of words that must be included, the opposite to <code>bad_words_ids</code>. If given <code>List[List[List[int]]]</code>, this triggers a <a href="https://github.com/huggingface/transformers/issues/14081" rel="nofollow">disjunctive constraint</a>, where one can allow different forms of each word.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.num_return_sequences(int," class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.num_return_sequences(int,"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_return_sequences(<code>int</code>,</strong> <em>optional</em>, defaults to <code>model.config.num_return_sequences</code> or 1 if the config does not set any value) &#x2014; The number of independently computed returned sequences for each element in the batch.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.max_time(float," class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.max_time(float,"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_time(<code>float</code>,</strong> <em>optional</em>) &#x2014; The maximum amount of time you allow the computation to run for in seconds. generation will still finish the current pass after allocated time has been passed.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values are in <code>[0, 1]</code>, 1 for tokens that are not masked, and 0 for masked tokens. If not provided, will default to a tensor the same shape as <code>input_ids</code> that masks the pad token. <a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.decoder_start_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.decoder_start_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_start_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; If an encoder-decoder model starts decoding with a different token than <em>bos</em>, the id of that token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.num_beam_groups" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.num_beam_groups"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_beam_groups</strong> (<code>int</code>, <em>optional</em>, defaults to <code>model.config.num_beam_groups</code> or 1 if the config does not set any value) &#x2014; Number of groups to divide <code>num_beams</code> into in order to ensure diversity among different groups of beams. <a href="https://arxiv.org/pdf/1610.02424.pdf" rel="nofollow">this paper</a> for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.diversity_penalty" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.diversity_penalty"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>diversity_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to <code>model.config.diversity_penalty</code> or 0.0 if the config does not set any value) &#x2014; This value is subtracted from a beam&#x2019;s score if it generates a token same as any beam from other group at a particular time. Note that <code>diversity_penalty</code> is only effective if <code>group beam search</code> is enabled.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.prefix_allowed_tokens_fn" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.prefix_allowed_tokens_fn"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>prefix_allowed_tokens_fn</strong> (<code>Callable[[int, torch.Tensor], List[int]]</code>, <em>optional</em>) &#x2014; If provided, this function constraints the beam search to allowed tokens only at each step. If not provided no constraint is applied. This function takes 2 arguments: the batch ID <code>batch_id</code> and <code>input_ids</code>. It has to return a list with the allowed tokens for the next generation step conditioned on the batch ID <code>batch_id</code> and the previously generated tokens <code>inputs_ids</code>. This argument is useful for constrained generation conditioned on the prefix, as described in <a href="https://arxiv.org/abs/2010.00904" rel="nofollow">Autoregressive Entity Retrieval</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.logits_processor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.logits_processor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits_processor</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; Custom logits processors that complement the default logits processors built from arguments and a model&#x2019;s config. If a logit processor is passed that is already created with the arguments or a model&#x2019;s config an error is thrown. This feature is intended for advanced users. renormalize_logits &#x2014; (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>): Whether to renormalize the logits after applying all the logits processors or warpers (including the custom ones). It&#x2019;s highly recommended to set this flag to <code>True</code> as the search algorithms suppose the score logits are normalized but some logit processors or warpers break the normalization.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.stopping_criteria" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.stopping_criteria"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stopping_criteria</strong> (<code>StoppingCriteriaList</code>, <em>optional</em>) &#x2014; Custom stopping criteria that complement the default stopping criteria built from arguments and a model&#x2019;s config. If a stopping criteria is passed that is already created with the arguments or a model&#x2019;s config an error is thrown. This feature is intended for advanced users.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.constraints" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.constraints"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>constraints</strong> (<code>List[Constraint]</code>, <em>optional</em>) &#x2014; Custom constraints that can be added to the generation to ensure that the output will contain the use of certain tokens as defined by <code>Constraint</code> objects, in the most sensible way possible.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>model.config.output_attentions</code> or <code>False</code> if the config does not set any value) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>model.config.output_hidden_states</code> or <code>False</code> if the config does not set any value) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.output_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.output_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>model.config.output_scores</code> or <code>False</code> if the config does not set any value) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.return_dict_in_generate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.return_dict_in_generate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>model.config.return_dict_in_generate</code> or <code>False</code> if the config does not set any value) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.forced_bos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.forced_bos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>forced_bos_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to <code>model.config.forced_bos_token_id</code>) &#x2014; The id of the token to force as the first generated token after the <code>decoder_start_token_id</code>. Useful for multilingual models like <a href="../model_doc/mbart">mBART</a> where the first generated token needs to be the target language token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.forced_eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.forced_eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>forced_eos_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to <code>model.config.forced_eos_token_id</code>) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.remove_invalid_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.remove_invalid_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>remove_invalid_values</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>model.config.remove_invalid_values</code>) &#x2014; Whether to remove possible <em>nan</em> and <em>inf</em> outputs of the model to prevent the generation method to crash. Note that using <code>remove_invalid_values</code> can slow down generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.synced_gpus" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.synced_gpus"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>synced_gpus</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to continue running the while loop until max_length (needed for ZeRO stage 3)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.exponential_decay_length_penalty" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.exponential_decay_length_penalty"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>exponential_decay_length_penalty</strong> (<code>tuple(int, float)</code>, <em>optional</em>, defaults to <code>model.config.exponential_decay_length_penalty</code>) &#x2014; This Tuple adds an exponentially increasing length penalty, after a certain amount of tokens have been generated. The tuple shall consist of: <code>(start_index, decay_factor)</code> where <code>start_index</code> indicates where penalty starts and <code>decay_factor</code> represents the factor of exponential decay<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.suppress_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.suppress_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>suppress_tokens</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>model.config.suppress_tokens</code>) &#x2014; A list of tokens that will be supressed at generation. The <code>SupressTokens</code> logit processor will set their log probs to <code>-inf</code> so that they are not sampled.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.begin_suppress_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.begin_suppress_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>begin_suppress_tokens</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>model.config.begin_suppress_tokens</code>) &#x2014; A list of tokens that will be supressed at the begining of the generation. The <code>SupressBeginTokens</code> logit processor will set their log probs to <code>-inf</code> so that they are not sampled.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.forced_decoder_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.forced_decoder_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>forced_decoder_ids</strong> (<code>List[int]</code>, <em>optional</em>, defaults to <code>model.config.forced_decoder_ids</code>) &#x2014; A list of tokens that will be forced as beginning tokens, before sampling.</p> <p>model<em>kwargs &#x2014; Additional model specific kwargs will be forwarded to the <code>forward</code> function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder</em>*.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.generation_utils.GenerationMixin.generate.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput" >ModelOutput</a> or <code>torch.LongTensor</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput" >ModelOutput</a> (if <code>return_dict_in_generate=True</code> or when <code>config.return_dict_in_generate=True</code>) or a <code>torch.FloatTensor</code>.</p> <p>If the model is <em>not</em> an encoder-decoder model (<code>model.config.is_encoder_decoder=False</code>), the possible <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput" >ModelOutput</a> types are:</p> <ul> <li><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.GreedySearchDecoderOnlyOutput" >GreedySearchDecoderOnlyOutput</a>,</li> <li><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.SampleDecoderOnlyOutput" >SampleDecoderOnlyOutput</a>,</li> <li><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.BeamSearchDecoderOnlyOutput" >BeamSearchDecoderOnlyOutput</a>,</li> <li><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.BeamSampleDecoderOnlyOutput" >BeamSampleDecoderOnlyOutput</a></li> </ul> <p>If the model is an encoder-decoder model (<code>model.config.is_encoder_decoder=True</code>), the possible <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput" >ModelOutput</a> types are:</p> <ul> <li><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.GreedySearchEncoderDecoderOutput" >GreedySearchEncoderDecoderOutput</a>,</li> <li><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.SampleEncoderDecoderOutput" >SampleEncoderDecoderOutput</a>,</li> <li><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.BeamSearchEncoderDecoderOutput" >BeamSearchEncoderDecoderOutput</a>,</li> <li><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.BeamSampleEncoderDecoderOutput" >BeamSampleEncoderDecoderOutput</a></li> </ul> <!-- HTML_TAG_END --></p> </div></div> <p>Generates sequences of token ids for models with a language modeling head. The method supports the following generation methods for text-decoder, text-to-text, speech-to-text, and vision-to-text models:</p> <ul><li><em>greedy decoding</em> by calling <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.greedy_search">greedy_search()</a> if <code>num_beams=1</code> and <code>do_sample=False</code>.</li> <li><em>multinomial sampling</em> by calling <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.sample">sample()</a> if <code>num_beams=1</code> and <code>do_sample=True</code>.</li> <li><em>beam-search decoding</em> by calling <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_search">beam_search()</a> if <code>num_beams&gt;1</code> and <code>do_sample=False</code>.</li> <li><em>beam-search multinomial sampling</em> by calling <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_sample">beam_sample()</a> if <code>num_beams&gt;1</code> and <code>do_sample=True</code>.</li> <li><em>diverse beam-search decoding</em> by calling <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.group_beam_search">group_beam_search()</a>, if <code>num_beams&gt;1</code> and <code>num_beam_groups&gt;1</code>.</li> <li><em>constrained beam-search decoding</em> by calling <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.constrained_beam_search">constrained_beam_search()</a>, if <code>constraints!=None</code> or <code>force_words_ids!=None</code>.</li></ul> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>Apart from <code>inputs</code>, all the arguments below will default to the value of the attribute of the same name as defined in the model’s config (<code>config.json</code>) which in turn defaults to the <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> of the model.</p></div> <p>Most of these parameters are explained in more detail in <a href="https://huggingface.co/blog/how-to-generate" rel="nofollow">this blog post</a>.</p> <p>Examples:</p> <div class="relative group rounded-md"><a id="transformers.generation_utils.GenerationMixin.generate.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Greedy Decoding:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;Today I believe we can finally&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(prompt, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># generate up to 30 tokens</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.generate(input_ids, do_sample=<span class="hljs-literal">False</span>, max_length=<span class="hljs-number">30</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Today I believe we can finally get to the point where we can make a difference in the lives of the people of the United States of America.\n&#x27;</span>]<!-- HTML_TAG_END --></pre></div></div> <div class="relative group rounded-md"><a id="transformers.generation_utils.GenerationMixin.generate.example-2" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.example-2"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Multinomial Sampling:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;Today I believe we can finally&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(prompt, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># sample up to 30 tokens</span> <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.generate(input_ids, do_sample=<span class="hljs-literal">True</span>, max_length=<span class="hljs-number">30</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Today I believe we can finally get rid of discrimination,&quot; said Rep. Mark Pocan (D-Wis.).\n\n&quot;Just look at the&#x27;</span>]<!-- HTML_TAG_END --></pre></div></div> <div class="relative group rounded-md"><a id="transformers.generation_utils.GenerationMixin.generate.example-3" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.example-3"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Beam-search decoding:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForSeq2SeqLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;Helsinki-NLP/opus-mt-en-de&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;Helsinki-NLP/opus-mt-en-de&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sentence = <span class="hljs-string">&quot;Paris is one of the densest populated areas in Europe.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(sentence, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.generate(input_ids, num_beams=<span class="hljs-number">5</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Paris ist eines der dichtesten besiedelten Gebiete Europas.&#x27;</span>]<!-- HTML_TAG_END --></pre></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.GenerationMixin.greedy_search"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>greedy_search</span></h4><!-- HTML_TAG_END --> <a id="transformers.generation_utils.GenerationMixin.greedy_search" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.GenerationMixin.greedy_search"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L1637" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits_processor<span class="opacity-60">: typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stopping_criteria<span class="opacity-60">: typing.Optional[transformers.generation_stopping_criteria.StoppingCriteriaList] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_scores<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict_in_generate<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">synced_gpus<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**model_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.greedy_search.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.greedy_search.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The sequence used as a prompt for the generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.greedy_search.logits_processor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.greedy_search.logits_processor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits_processor</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> used to modify the prediction scores of the language modeling head applied at each generation step.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.greedy_search.stopping_criteria" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.greedy_search.stopping_criteria"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stopping_criteria</strong> (<code>StoppingCriteriaList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.StoppingCriteriaList">StoppingCriteriaList</a>. List of instances of class derived from <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.StoppingCriteria">StoppingCriteria</a> used to tell if the generation loop should stop.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.greedy_search.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.greedy_search.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; <strong>DEPRECATED</strong>. Use <code>logits_processor</code> or <code>stopping_criteria</code> directly to cap the number of generated tokens. The maximum length of the sequence to be generated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.greedy_search.pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.greedy_search.pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.greedy_search.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.greedy_search.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.greedy_search.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.greedy_search.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.greedy_search.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.greedy_search.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.greedy_search.output_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.greedy_search.output_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.greedy_search.return_dict_in_generate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.greedy_search.return_dict_in_generate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.greedy_search.synced_gpus" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.greedy_search.synced_gpus"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>synced_gpus</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs &#x2014; Additional model specific keyword arguments will be forwarded to the <code>forward</code> function of the model. If model is an encoder-decoder model the kwargs should include <code>encoder_outputs</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Generates sequences of token ids for models with a language modeling head using <strong>greedy decoding</strong> and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.</p> <div class="relative group rounded-md"><a id="transformers.generation_utils.GenerationMixin.greedy_search.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.greedy_search.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ( <span class="hljs-meta">... </span> AutoTokenizer, <span class="hljs-meta">... </span> AutoModelForCausalLM, <span class="hljs-meta">... </span> LogitsProcessorList, <span class="hljs-meta">... </span> MinLengthLogitsProcessor, <span class="hljs-meta">... </span> StoppingCriteriaList, <span class="hljs-meta">... </span> MaxLengthCriteria, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># set pad_token_id to eos_token_id because GPT2 does not have a PAD token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.pad_token_id = model.config.eos_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>input_prompt = <span class="hljs-string">&quot;It might be possible to&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(input_prompt, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_processor = LogitsProcessorList( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> MinLengthLogitsProcessor(<span class="hljs-number">10</span>, eos_token_id=model.config.eos_token_id), <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=<span class="hljs-number">20</span>)]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.greedy_search( <span class="hljs-meta">... </span> input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&quot;It might be possible to get a better understanding of the nature of the problem, but it&#x27;s not&quot;</span>]<!-- HTML_TAG_END --></pre></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.GenerationMixin.sample"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>sample</span></h4><!-- HTML_TAG_END --> <a id="transformers.generation_utils.GenerationMixin.sample" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.GenerationMixin.sample"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L1865" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits_processor<span class="opacity-60">: typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stopping_criteria<span class="opacity-60">: typing.Optional[transformers.generation_stopping_criteria.StoppingCriteriaList] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits_warper<span class="opacity-60">: typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_scores<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict_in_generate<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">synced_gpus<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**model_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.sample.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.sample.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The sequence used as a prompt for the generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.sample.logits_processor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.sample.logits_processor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits_processor</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> used to modify the prediction scores of the language modeling head applied at each generation step.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.sample.stopping_criteria" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.sample.stopping_criteria"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stopping_criteria</strong> (<code>StoppingCriteriaList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.StoppingCriteriaList">StoppingCriteriaList</a>. List of instances of class derived from <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.StoppingCriteria">StoppingCriteria</a> used to tell if the generation loop should stop.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.sample.logits_warper" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.sample.logits_warper"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits_warper</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsWarper">LogitsWarper</a> used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.sample.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.sample.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; <strong>DEPRECATED</strong>. Use <code>logits_processor</code> or <code>stopping_criteria</code> directly to cap the number of generated tokens. The maximum length of the sequence to be generated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.sample.pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.sample.pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.sample.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.sample.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.sample.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.sample.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.sample.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.sample.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.sample.output_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.sample.output_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.sample.return_dict_in_generate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.sample.return_dict_in_generate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.sample.synced_gpus" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.sample.synced_gpus"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>synced_gpus</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs &#x2014; Additional model specific kwargs will be forwarded to the <code>forward</code> function of the model. If model is an encoder-decoder model the kwargs should include <code>encoder_outputs</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Generates sequences of token ids for models with a language modeling head using <strong>multinomial sampling</strong> and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.</p> <div class="relative group rounded-md"><a id="transformers.generation_utils.GenerationMixin.sample.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.sample.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ( <span class="hljs-meta">... </span> AutoTokenizer, <span class="hljs-meta">... </span> AutoModelForCausalLM, <span class="hljs-meta">... </span> LogitsProcessorList, <span class="hljs-meta">... </span> MinLengthLogitsProcessor, <span class="hljs-meta">... </span> TopKLogitsWarper, <span class="hljs-meta">... </span> TemperatureLogitsWarper, <span class="hljs-meta">... </span> StoppingCriteriaList, <span class="hljs-meta">... </span> MaxLengthCriteria, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># set pad_token_id to eos_token_id because GPT2 does not have a EOS token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.pad_token_id = model.config.eos_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>input_prompt = <span class="hljs-string">&quot;Today is a beautiful day, and&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(input_prompt, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_processor = LogitsProcessorList( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> MinLengthLogitsProcessor(<span class="hljs-number">15</span>, eos_token_id=model.config.eos_token_id), <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_warper = LogitsProcessorList( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> TopKLogitsWarper(<span class="hljs-number">50</span>), <span class="hljs-meta">... </span> TemperatureLogitsWarper(<span class="hljs-number">0.7</span>), <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=<span class="hljs-number">20</span>)]) <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.sample( <span class="hljs-meta">... </span> input_ids, <span class="hljs-meta">... </span> logits_processor=logits_processor, <span class="hljs-meta">... </span> logits_warper=logits_warper, <span class="hljs-meta">... </span> stopping_criteria=stopping_criteria, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Today is a beautiful day, and a wonderful day.\n\nI was lucky enough to meet the&#x27;</span>]<!-- HTML_TAG_END --></pre></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.GenerationMixin.beam_search"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>beam_search</span></h4><!-- HTML_TAG_END --> <a id="transformers.generation_utils.GenerationMixin.beam_search" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.GenerationMixin.beam_search"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L2117" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">beam_scorer<span class="opacity-60">: BeamScorer</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits_processor<span class="opacity-60">: typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stopping_criteria<span class="opacity-60">: typing.Optional[transformers.generation_stopping_criteria.StoppingCriteriaList] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_scores<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict_in_generate<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">synced_gpus<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**model_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_search.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_search.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The sequence used as a prompt for the generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_search.beam_scorer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_search.beam_scorer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>beam_scorer</strong> (<code>BeamScorer</code>) &#x2014; An derived instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> should be read.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_search.logits_processor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_search.logits_processor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits_processor</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> used to modify the prediction scores of the language modeling head applied at each generation step.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_search.stopping_criteria" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_search.stopping_criteria"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stopping_criteria</strong> (<code>StoppingCriteriaList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.StoppingCriteriaList">StoppingCriteriaList</a>. List of instances of class derived from <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.StoppingCriteria">StoppingCriteria</a> used to tell if the generation loop should stop.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_search.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_search.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; <strong>DEPRECATED</strong>. Use <code>logits_processor</code> or <code>stopping_criteria</code> directly to cap the number of generated tokens. The maximum length of the sequence to be generated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_search.pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_search.pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_search.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_search.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_search.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_search.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_search.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_search.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_search.output_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_search.output_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_search.return_dict_in_generate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_search.return_dict_in_generate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_search.synced_gpus" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_search.synced_gpus"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>synced_gpus</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs &#x2014; Additional model specific kwargs will be forwarded to the <code>forward</code> function of the model. If model is an encoder-decoder model the kwargs should include <code>encoder_outputs</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Generates sequences of token ids for models with a language modeling head using <strong>beam search decoding</strong> and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.</p> <div class="relative group rounded-md"><a id="transformers.generation_utils.GenerationMixin.beam_search.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_search.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ( <span class="hljs-meta">... </span> AutoTokenizer, <span class="hljs-meta">... </span> AutoModelForSeq2SeqLM, <span class="hljs-meta">... </span> LogitsProcessorList, <span class="hljs-meta">... </span> MinLengthLogitsProcessor, <span class="hljs-meta">... </span> BeamSearchScorer, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_str = <span class="hljs-string">&quot;translate English to German: How old are you?&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_ids = tokenizer(encoder_input_str, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># lets run beam search using 3 beams</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_beams = <span class="hljs-number">3</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># define decoder start token ids</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.ones((num_beams, <span class="hljs-number">1</span>), device=model.device, dtype=torch.long) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = input_ids * model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># add encoder_outputs to model keyword arguments</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model_kwargs = { <span class="hljs-meta">... </span> <span class="hljs-string">&quot;encoder_outputs&quot;</span>: model.get_encoder()( <span class="hljs-meta">... </span> encoder_input_ids.repeat_interleave(num_beams, dim=<span class="hljs-number">0</span>), return_dict=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate beam scorer</span> <span class="hljs-meta">&gt;&gt;&gt; </span>beam_scorer = BeamSearchScorer( <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">1</span>, <span class="hljs-meta">... </span> num_beams=num_beams, <span class="hljs-meta">... </span> device=model.device, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_processor = LogitsProcessorList( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> MinLengthLogitsProcessor(<span class="hljs-number">5</span>, eos_token_id=model.config.eos_token_id), <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.beam_search(input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Wie alt bist du?&#x27;</span>]<!-- HTML_TAG_END --></pre></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.GenerationMixin.beam_sample"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>beam_sample</span></h4><!-- HTML_TAG_END --> <a id="transformers.generation_utils.GenerationMixin.beam_sample" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.GenerationMixin.beam_sample"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L2426" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">beam_scorer<span class="opacity-60">: BeamScorer</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits_processor<span class="opacity-60">: typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stopping_criteria<span class="opacity-60">: typing.Optional[transformers.generation_stopping_criteria.StoppingCriteriaList] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits_warper<span class="opacity-60">: typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_scores<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict_in_generate<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">synced_gpus<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**model_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_sample.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_sample.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The sequence used as a prompt for the generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_sample.beam_scorer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_sample.beam_scorer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>beam_scorer</strong> (<code>BeamScorer</code>) &#x2014; A derived instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> should be read.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_sample.logits_processor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_sample.logits_processor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits_processor</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> used to modify the prediction scores of the language modeling head applied at each generation step.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_sample.stopping_criteria" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_sample.stopping_criteria"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stopping_criteria</strong> (<code>StoppingCriteriaList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.StoppingCriteriaList">StoppingCriteriaList</a>. List of instances of class derived from <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.StoppingCriteria">StoppingCriteria</a> used to tell if the generation loop should stop.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_sample.logits_warper" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_sample.logits_warper"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits_warper</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsWarper">LogitsWarper</a> used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_sample.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_sample.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; <strong>DEPRECATED</strong>. Use <code>logits_processor</code> or <code>stopping_criteria</code> directly to cap the number of generated tokens. The maximum length of the sequence to be generated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_sample.pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_sample.pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_sample.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_sample.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_sample.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_sample.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_sample.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_sample.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_sample.output_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_sample.output_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_sample.return_dict_in_generate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_sample.return_dict_in_generate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_sample.synced_gpus" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_sample.synced_gpus"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>synced_gpus</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs &#x2014; Additional model specific kwargs will be forwarded to the <code>forward</code> function of the model. If model is an encoder-decoder model the kwargs should include <code>encoder_outputs</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Generates sequences of token ids for models with a language modeling head using <strong>beam search multinomial sampling</strong> and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.</p> <div class="relative group rounded-md"><a id="transformers.generation_utils.GenerationMixin.beam_sample.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_sample.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ( <span class="hljs-meta">... </span> AutoTokenizer, <span class="hljs-meta">... </span> AutoModelForSeq2SeqLM, <span class="hljs-meta">... </span> LogitsProcessorList, <span class="hljs-meta">... </span> MinLengthLogitsProcessor, <span class="hljs-meta">... </span> TopKLogitsWarper, <span class="hljs-meta">... </span> TemperatureLogitsWarper, <span class="hljs-meta">... </span> BeamSearchScorer, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_str = <span class="hljs-string">&quot;translate English to German: How old are you?&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_ids = tokenizer(encoder_input_str, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># lets run beam search using 3 beams</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_beams = <span class="hljs-number">3</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># define decoder start token ids</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.ones((num_beams, <span class="hljs-number">1</span>), device=model.device, dtype=torch.long) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = input_ids * model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># add encoder_outputs to model keyword arguments</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model_kwargs = { <span class="hljs-meta">... </span> <span class="hljs-string">&quot;encoder_outputs&quot;</span>: model.get_encoder()( <span class="hljs-meta">... </span> encoder_input_ids.repeat_interleave(num_beams, dim=<span class="hljs-number">0</span>), return_dict=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate beam scorer</span> <span class="hljs-meta">&gt;&gt;&gt; </span>beam_scorer = BeamSearchScorer( <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">1</span>, <span class="hljs-meta">... </span> max_length=model.config.max_length, <span class="hljs-meta">... </span> num_beams=num_beams, <span class="hljs-meta">... </span> device=model.device, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_processor = LogitsProcessorList( <span class="hljs-meta">... </span> [MinLengthLogitsProcessor(<span class="hljs-number">5</span>, eos_token_id=model.config.eos_token_id)] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_warper = LogitsProcessorList( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> TopKLogitsWarper(<span class="hljs-number">50</span>), <span class="hljs-meta">... </span> TemperatureLogitsWarper(<span class="hljs-number">0.7</span>), <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.beam_sample( <span class="hljs-meta">... </span> input_ids, beam_scorer, logits_processor=logits_processor, logits_warper=logits_warper, **model_kwargs <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Wie alt bist du?&#x27;</span>]<!-- HTML_TAG_END --></pre></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.GenerationMixin.group_beam_search"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>group_beam_search</span></h4><!-- HTML_TAG_END --> <a id="transformers.generation_utils.GenerationMixin.group_beam_search" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.GenerationMixin.group_beam_search"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L2742" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">beam_scorer<span class="opacity-60">: BeamScorer</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits_processor<span class="opacity-60">: typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stopping_criteria<span class="opacity-60">: typing.Optional[transformers.generation_stopping_criteria.StoppingCriteriaList] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_scores<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict_in_generate<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">synced_gpus<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**model_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.group_beam_search.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.group_beam_search.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The sequence used as a prompt for the generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.group_beam_search.beam_scorer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.group_beam_search.beam_scorer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>beam_scorer</strong> (<code>BeamScorer</code>) &#x2014; An derived instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> should be read.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.group_beam_search.logits_processor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.group_beam_search.logits_processor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits_processor</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> used to modify the prediction scores of the language modeling head applied at each generation step.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.group_beam_search.stopping_criteria" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.group_beam_search.stopping_criteria"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stopping_criteria</strong> (<code>StoppingCriteriaList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.StoppingCriteriaList">StoppingCriteriaList</a>. List of instances of class derived from <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.StoppingCriteria">StoppingCriteria</a> used to tell if the generation loop should stop.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.group_beam_search.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.group_beam_search.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; <strong>DEPRECATED</strong>. Use <code>logits_processor</code> or <code>stopping_criteria</code> directly to cap the number of generated tokens. The maximum length of the sequence to be generated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.group_beam_search.pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.group_beam_search.pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.group_beam_search.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.group_beam_search.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.group_beam_search.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.group_beam_search.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.group_beam_search.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.group_beam_search.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.group_beam_search.output_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.group_beam_search.output_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.group_beam_search.return_dict_in_generate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.group_beam_search.return_dict_in_generate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.group_beam_search.synced_gpus" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.group_beam_search.synced_gpus"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>synced_gpus</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to continue running the while loop until max_length (needed for ZeRO stage 3)</p> <p>model_kwargs &#x2014; Additional model specific kwargs that will be forwarded to the <code>forward</code> function of the model. If model is an encoder-decoder model the kwargs should include <code>encoder_outputs</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Generates sequences of token ids for models with a language modeling head using <strong>diverse beam search decoding</strong> and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.</p> <div class="relative group rounded-md"><a id="transformers.generation_utils.GenerationMixin.group_beam_search.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.group_beam_search.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ( <span class="hljs-meta">... </span> AutoTokenizer, <span class="hljs-meta">... </span> AutoModelForSeq2SeqLM, <span class="hljs-meta">... </span> LogitsProcessorList, <span class="hljs-meta">... </span> MinLengthLogitsProcessor, <span class="hljs-meta">... </span> HammingDiversityLogitsProcessor, <span class="hljs-meta">... </span> BeamSearchScorer, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_str = <span class="hljs-string">&quot;translate English to German: How old are you?&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_ids = tokenizer(encoder_input_str, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># lets run diverse beam search using 6 beams</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_beams = <span class="hljs-number">6</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># define decoder start token ids</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.ones((num_beams, <span class="hljs-number">1</span>), device=model.device, dtype=torch.long) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = input_ids * model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># add encoder_outputs to model keyword arguments</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model_kwargs = { <span class="hljs-meta">... </span> <span class="hljs-string">&quot;encoder_outputs&quot;</span>: model.get_encoder()( <span class="hljs-meta">... </span> encoder_input_ids.repeat_interleave(num_beams, dim=<span class="hljs-number">0</span>), return_dict=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate beam scorer</span> <span class="hljs-meta">&gt;&gt;&gt; </span>beam_scorer = BeamSearchScorer( <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">1</span>, <span class="hljs-meta">... </span> max_length=model.config.max_length, <span class="hljs-meta">... </span> num_beams=num_beams, <span class="hljs-meta">... </span> device=model.device, <span class="hljs-meta">... </span> num_beam_groups=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_processor = LogitsProcessorList( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> HammingDiversityLogitsProcessor(<span class="hljs-number">5.5</span>, num_beams=<span class="hljs-number">6</span>, num_beam_groups=<span class="hljs-number">3</span>), <span class="hljs-meta">... </span> MinLengthLogitsProcessor(<span class="hljs-number">5</span>, eos_token_id=model.config.eos_token_id), <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.group_beam_search( <span class="hljs-meta">... </span> input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Wie alt bist du?&#x27;</span>]<!-- HTML_TAG_END --></pre></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.GenerationMixin.constrained_beam_search"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>constrained_beam_search</span></h4><!-- HTML_TAG_END --> <a id="transformers.generation_utils.GenerationMixin.constrained_beam_search" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.GenerationMixin.constrained_beam_search"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L3104" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">constrained_beam_scorer<span class="opacity-60">: ConstrainedBeamSearchScorer</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits_processor<span class="opacity-60">: typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stopping_criteria<span class="opacity-60">: typing.Optional[transformers.generation_stopping_criteria.StoppingCriteriaList] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_scores<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict_in_generate<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">synced_gpus<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**model_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.constrained_beam_search.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.constrained_beam_search.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The sequence used as a prompt for the generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.constrained_beam_search.constrained_beam_scorer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.constrained_beam_search.constrained_beam_scorer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>constrained_beam_scorer</strong> (<code>ConstrainedBeamSearchScorer</code>) &#x2014; A derived instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> that defines how beam hypotheses are constructed, stored and sorted during generation, while satisfying a list of positive constraints. For more information, the documentation of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.ConstrainedBeamSearchScorer">ConstrainedBeamSearchScorer</a> should be read.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.constrained_beam_search.logits_processor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.constrained_beam_search.logits_processor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits_processor</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> used to modify the prediction scores of the language modeling head applied at each generation step.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.constrained_beam_search.stopping_criteria" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.constrained_beam_search.stopping_criteria"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stopping_criteria</strong> (<code>StoppingCriteriaList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.StoppingCriteriaList">StoppingCriteriaList</a>. List of instances of class derived from <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.StoppingCriteria">StoppingCriteria</a> used to tell if the generation loop should stop.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.constrained_beam_search.logits_warper" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.constrained_beam_search.logits_warper"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits_warper</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsWarper">LogitsWarper</a> used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.constrained_beam_search.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.constrained_beam_search.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; <strong>DEPRECATED</strong>. Use <code>logits_processor</code> or <code>stopping_criteria</code> directly to cap the number of generated tokens. The maximum length of the sequence to be generated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.constrained_beam_search.pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.constrained_beam_search.pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.constrained_beam_search.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.constrained_beam_search.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.constrained_beam_search.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.constrained_beam_search.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.constrained_beam_search.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.constrained_beam_search.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.constrained_beam_search.output_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.constrained_beam_search.output_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.constrained_beam_search.return_dict_in_generate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.constrained_beam_search.return_dict_in_generate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.constrained_beam_search.synced_gpus" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.constrained_beam_search.synced_gpus"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>synced_gpus</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs &#x2014; Additional model specific kwargs will be forwarded to the <code>forward</code> function of the model. If model is an encoder-decoder model the kwargs should include <code>encoder_outputs</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Generates sequences of token ids for models with a language modeling head using <strong>constrained beam search decoding</strong> and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.</p> <div class="relative group rounded-md"><a id="transformers.generation_utils.GenerationMixin.constrained_beam_search.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.constrained_beam_search.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ( <span class="hljs-meta">... </span> AutoTokenizer, <span class="hljs-meta">... </span> AutoModelForSeq2SeqLM, <span class="hljs-meta">... </span> LogitsProcessorList, <span class="hljs-meta">... </span> MinLengthLogitsProcessor, <span class="hljs-meta">... </span> ConstrainedBeamSearchScorer, <span class="hljs-meta">... </span> PhrasalConstraint, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_str = <span class="hljs-string">&quot;translate English to German: How old are you?&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_ids = tokenizer(encoder_input_str, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># lets run beam search using 3 beams</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_beams = <span class="hljs-number">3</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># define decoder start token ids</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.ones((num_beams, <span class="hljs-number">1</span>), device=model.device, dtype=torch.long) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = input_ids * model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># add encoder_outputs to model keyword arguments</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model_kwargs = { <span class="hljs-meta">... </span> <span class="hljs-string">&quot;encoder_outputs&quot;</span>: model.get_encoder()( <span class="hljs-meta">... </span> encoder_input_ids.repeat_interleave(num_beams, dim=<span class="hljs-number">0</span>), return_dict=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span>constraint_str = <span class="hljs-string">&quot;Sie&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>constraint_token_ids = tokenizer.encode(constraint_str)[:-<span class="hljs-number">1</span>] <span class="hljs-comment"># slice to remove eos token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>constraints = [PhrasalConstraint(token_ids=constraint_token_ids)] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate beam scorer</span> <span class="hljs-meta">&gt;&gt;&gt; </span>beam_scorer = ConstrainedBeamSearchScorer( <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">1</span>, num_beams=num_beams, device=model.device, constraints=constraints <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_processor = LogitsProcessorList( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> MinLengthLogitsProcessor(<span class="hljs-number">5</span>, eos_token_id=model.config.eos_token_id), <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.constrained_beam_search( <span class="hljs-meta">... </span> input_ids, beam_scorer, constraints=constraints, logits_processor=logits_processor, **model_kwargs <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Wie alt sind Sie?&#x27;</span>]<!-- HTML_TAG_END --></pre></div></div></div></div> <h2 class="relative group"><a id="transformers.generation_tf_utils.TFGenerationMixin" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFGenerationMixin </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_tf_utils.TFGenerationMixin"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.generation_tf_utils.</span><span class="font-semibold">TFGenerationMixin</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.generation_tf_utils.TFGenerationMixin" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_tf_utils.TFGenerationMixin"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_utils.py#L351" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A class containing all of the functions supporting generation, to be used as a mixin in <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_tf_utils.TFGenerationMixin.generate"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>generate</span></h4><!-- HTML_TAG_END --> <a id="transformers.generation_tf_utils.TFGenerationMixin.generate" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_tf_utils.TFGenerationMixin.generate"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_utils.py#L375" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_new_tokens<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_length<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_sample<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">early_stopping<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_beams<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">temperature<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_k<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_p<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repetition_penalty<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bad_words_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token_id<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">length_penalty<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">no_repeat_ngram_size<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_return_sequences<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_start_token_id<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_scores<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict_in_generate<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">forced_bos_token_id<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">forced_eos_token_id<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**model_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput" >ModelOutput</a> or <code>tf.Tensor</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, `(batch_size, sequence_length, &#x2014;<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.feature_dim)`" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.feature_dim)`"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>feature_dim)`</strong> or <code>(batch_size, num_channels, height, width)</code>, <em>optional</em>) &#x2014; The sequence used as a prompt for the generation or as model inputs to the encoder. If <code>None</code> the method initializes it with <code>bos_token_id</code> and a batch size of 1. For decoder-only models <code>inputs</code> should of in the format of <code>input_ids</code>. For encoder-decoder models <em>inputs</em> can represent any of <code>input_ids</code>, <code>input_values</code>, <code>input_features</code>, or <code>pixel_values</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to <code>model.config.max_length</code>) &#x2014; The maximum length the generated tokens can have. Corresponds to the length of the input prompt + <code>max_new_tokens</code>. In general, prefer the use of <code>max_new_tokens</code>, which ignores the number of tokens in the prompt.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.max_new_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.max_new_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_new_tokens</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.min_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.min_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; The minimum length of the sequence to be generated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.do_sample" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.do_sample"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_sample</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use sampling ; use greedy decoding otherwise.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.early_stopping" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.early_stopping"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>early_stopping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to stop the beam search when at least <code>num_beams</code> sentences are finished per batch or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.num_beams" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.num_beams"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_beams</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of beams for beam search. 1 means no beam search.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.temperature" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.temperature"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>temperature</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; The value used to module the next token probabilities.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.top_k" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.top_k"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to 50) &#x2014; The number of highest probability vocabulary tokens to keep for top-k-filtering.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.top_p" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.top_p"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_p</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; If set to float &lt; 1, only the most probable tokens with probabilities that add up to <code>top_p</code> or higher are kept for generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.repetition_penalty" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.repetition_penalty"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repetition_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; The parameter for repetition penalty. 1.0 means no penalty. See <a href="https://arxiv.org/pdf/1909.05858.pdf" rel="nofollow">this paper</a> for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.bos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.bos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>beginning-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.length_penalty" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.length_penalty"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>length_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log likelihood of the sequence (i.e. negative), <code>length_penalty</code> &gt; 0.0 promotes longer sequences, while <code>length_penalty</code> &lt; 0.0 encourages shorter sequences.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.no_repeat_ngram_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.no_repeat_ngram_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>no_repeat_ngram_size</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to int &gt; 0, all ngrams of that size can only occur once.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.bad_words_ids(List[int]," class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.bad_words_ids(List[int],"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bad_words_ids(<code>List[int]</code>,</strong> <em>optional</em>) &#x2014; List of token ids that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use <code>tokenizer.encode(bad_word, add_prefix_space=True)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.num_return_sequences(int," class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.num_return_sequences(int,"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_return_sequences(<code>int</code>,</strong> <em>optional</em>, defaults to 1) &#x2014; The number of independently computed returned sequences for each element in the batch.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>tf.Tensor</code> of <code>dtype=tf.int32</code> and shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values are in <code>[0, 1]</code>, 1 for tokens that are not masked, and 0 for masked tokens.</p> <p>If not provided, will default to a tensor the same shape as <code>input_ids</code> that masks the pad token.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.decoder_start_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.decoder_start_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_start_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; If an encoder-decoder model starts decoding with a different token than <em>bos</em>, the id of that token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.use_cache" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.use_cache"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_cache</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.output_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.output_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.return_dict_in_generate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.return_dict_in_generate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.forced_bos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.forced_bos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>forced_bos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the token to force as the first generated token after the <code>decoder_start_token_id</code>. Useful for multilingual models like <a href="../model_doc/mbart">mBART</a> where the first generated token needs to be the target language token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.forced_eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.forced_eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>forced_eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached. model_specific_kwargs &#x2014; Additional model specific kwargs will be forwarded to the <code>forward</code> function of the model.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.generation_tf_utils.TFGenerationMixin.generate.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput" >ModelOutput</a> or <code>tf.Tensor</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput" >ModelOutput</a> (if <code>return_dict_in_generate=True</code> or when <code>config.return_dict_in_generate=True</code>) or a <code>tf.Tensor</code>.</p> <p>If the model is <em>not</em> an encoder-decoder model (<code>model.config.is_encoder_decoder=False</code>), the possible <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput" >ModelOutput</a> types are:</p> <ul> <li><code>TFGreedySearchDecoderOnlyOutput</code>,</li> <li><code>TFSampleDecoderOnlyOutput</code>,</li> <li><code>TFBeamSearchDecoderOnlyOutput</code>,</li> <li><code>TFBeamSampleDecoderOnlyOutput</code></li> </ul> <p>If the model is an encoder-decoder model (<code>model.config.is_encoder_decoder=True</code>), the possible <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput" >ModelOutput</a> types are:</p> <ul> <li><code>TFGreedySearchEncoderDecoderOutput</code>,</li> <li><code>TFSampleEncoderDecoderOutput</code>,</li> <li><code>TFBeamSearchEncoderDecoderOutput</code>,</li> <li><code>TFBeamSampleEncoderDecoderOutput</code></li> </ul> <!-- HTML_TAG_END --></p> </div></div> <p>Generates sequences for models with a language modeling head. The method currently supports greedy decoding, beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling.</p> <p>Adapted in part from <a href="https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529" rel="nofollow">Facebook’s XLM beam search code</a>.</p> <p>Apart from <code>input_ids</code> and <code>attention_mask</code>, all the arguments below will default to the value of the attribute of the same name inside the <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> of the model. The default values indicated are the default values of those config.</p> <p>Most of these parameters are explained in more detail in <a href="https://huggingface.co/blog/how-to-generate" rel="nofollow">this blog post</a>.</p> <div class="relative group rounded-md"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>) <span class="hljs-comment"># Initialize tokenizer</span> model = TFAutoModelWithLMHead.from_pretrained( <span class="hljs-string">&quot;distilgpt2&quot;</span> ) <span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> outputs = model.generate(max_length=<span class="hljs-number">40</span>) <span class="hljs-comment"># do greedy decoding</span> <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;Generated: <span class="hljs-subst">{tokenizer.decode(outputs[<span class="hljs-number">0</span>], skip_special_tokens=<span class="hljs-literal">True</span>)}</span>&quot;</span>) tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;openai-gpt&quot;</span>) <span class="hljs-comment"># Initialize tokenizer</span> model = TFAutoModelWithLMHead.from_pretrained( <span class="hljs-string">&quot;openai-gpt&quot;</span> ) <span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> input_context = <span class="hljs-string">&quot;The dog&quot;</span> input_ids = tokenizer.encode(input_context, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-comment"># encode input context</span> outputs = model.generate( input_ids=input_ids, num_beams=<span class="hljs-number">5</span>, num_return_sequences=<span class="hljs-number">3</span>, temperature=<span class="hljs-number">1.5</span> ) <span class="hljs-comment"># generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context &#x27;The dog&#x27;</span> <span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(<span class="hljs-number">3</span>): <span class="hljs-comment"># 3 output sequences were generated</span> <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;Generated <span class="hljs-subst">{i}</span>: <span class="hljs-subst">{tokenizer.decode(outputs[i], skip_special_tokens=<span class="hljs-literal">True</span>)}</span>&quot;</span>) tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>) <span class="hljs-comment"># Initialize tokenizer</span> model = TFAutoModelWithLMHead.from_pretrained( <span class="hljs-string">&quot;distilgpt2&quot;</span> ) <span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> input_context = <span class="hljs-string">&quot;The dog&quot;</span> input_ids = tokenizer.encode(input_context, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-comment"># encode input context</span> outputs = model.generate( input_ids=input_ids, max_length=<span class="hljs-number">40</span>, temperature=<span class="hljs-number">0.7</span>, num_return_sequences=<span class="hljs-number">3</span>, do_sample=<span class="hljs-literal">True</span> ) <span class="hljs-comment"># generate 3 candidates using sampling</span> <span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(<span class="hljs-number">3</span>): <span class="hljs-comment"># 3 output sequences were generated</span> <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;Generated <span class="hljs-subst">{i}</span>: <span class="hljs-subst">{tokenizer.decode(outputs[i], skip_special_tokens=<span class="hljs-literal">True</span>)}</span>&quot;</span>) tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;ctrl&quot;</span>) <span class="hljs-comment"># Initialize tokenizer</span> model = TFAutoModelWithLMHead.from_pretrained( <span class="hljs-string">&quot;ctrl&quot;</span> ) <span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> input_context = <span class="hljs-string">&quot;Legal My neighbor is&quot;</span> <span class="hljs-comment"># &quot;Legal&quot; is one of the control codes for ctrl</span> input_ids = tokenizer.encode(input_context, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-comment"># encode input context</span> outputs = model.generate( input_ids=input_ids, max_length=<span class="hljs-number">50</span>, temperature=<span class="hljs-number">0.7</span>, repetition_penalty=<span class="hljs-number">1.2</span> ) <span class="hljs-comment"># generate sequences</span> <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;Generated: <span class="hljs-subst">{tokenizer.decode(outputs[<span class="hljs-number">0</span>], skip_special_tokens=<span class="hljs-literal">True</span>)}</span>&quot;</span>) tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-comment"># Initialize tokenizer</span> model = TFAutoModelWithLMHead.from_pretrained( <span class="hljs-string">&quot;gpt2&quot;</span> ) <span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> input_context = <span class="hljs-string">&quot;My cute dog&quot;</span> bad_words_ids = [ tokenizer.encode(bad_word, add_prefix_space=<span class="hljs-literal">True</span>) <span class="hljs-keyword">for</span> bad_word <span class="hljs-keyword">in</span> [<span class="hljs-string">&quot;idiot&quot;</span>, <span class="hljs-string">&quot;stupid&quot;</span>, <span class="hljs-string">&quot;shut up&quot;</span>] ] input_ids = tokenizer.encode(input_context, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-comment"># encode input context</span> outputs = model.generate( input_ids=input_ids, max_length=<span class="hljs-number">100</span>, do_sample=<span class="hljs-literal">True</span>, bad_words_ids=bad_words_ids ) <span class="hljs-comment"># generate sequences without allowing bad_words to be generated</span><!-- HTML_TAG_END --></pre></div></div></div></div> <h2 class="relative group"><a id="transformers.generation_flax_utils.FlaxGenerationMixin" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGenerationMixin"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxGenerationMixin </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_flax_utils.FlaxGenerationMixin"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.generation_flax_utils.</span><span class="font-semibold">FlaxGenerationMixin</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.generation_flax_utils.FlaxGenerationMixin" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_flax_utils.FlaxGenerationMixin"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_utils.py#L125" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A class containing all functions for auto-regressive text generation, to be used as a mixin in <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.FlaxPreTrainedModel">FlaxPreTrainedModel</a>.</p> <p>The class exposes <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_flax_utils.FlaxGenerationMixin.generate">generate()</a>, which can be used for:</p> <ul><li><em>greedy decoding</em> by calling <code>_greedy_search()</code> if <code>num_beams=1</code> and <code>do_sample=False</code>.</li> <li><em>multinomial sampling</em> by calling <code>_sample()</code> if <code>num_beams=1</code> and <code>do_sample=True</code>.</li> <li><em>beam-search decoding</em> by calling <code>~generation_utils.FlaxGenerationMixin._beam_search</code> if <code>num_beams&gt;1</code> and <code>do_sample=False</code>.</li></ul> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_flax_utils.FlaxGenerationMixin.generate"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>generate</span></h4><!-- HTML_TAG_END --> <a id="transformers.generation_flax_utils.FlaxGenerationMixin.generate" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_flax_utils.FlaxGenerationMixin.generate"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_utils.py#L211" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_new_tokens<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_start_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_sample<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">prng_key<span class="opacity-60">: typing.Optional[jax._src.numpy.ndarray.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_k<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_p<span class="opacity-60">: typing.Optional[float] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">temperature<span class="opacity-60">: typing.Optional[float] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_beams<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">no_repeat_ngram_size<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">forced_bos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">forced_eos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">length_penalty<span class="opacity-60">: typing.Optional[float] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">early_stopping<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">trace<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: typing.Union[typing.Dict[str, jax._src.numpy.ndarray.ndarray], NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**model_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxGenerationMixin.generate.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGenerationMixin.generate.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The sequence used as a prompt for the generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxGenerationMixin.generate.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGenerationMixin.generate.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to <code>model.config.max_length</code>) &#x2014; The maximum length the generated tokens can have. Corresponds to the length of the input prompt + <code>max_new_tokens</code>. In general, prefer the use of <code>max_new_tokens</code>, which ignores the number of tokens in the prompt.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxGenerationMixin.generate.max_new_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGenerationMixin.generate.max_new_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_new_tokens</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxGenerationMixin.generate.do_sample" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGenerationMixin.generate.do_sample"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_sample</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use sampling ; use greedy decoding otherwise.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxGenerationMixin.generate.temperature" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGenerationMixin.generate.temperature"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>temperature</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; The value used to module the next token probabilities.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxGenerationMixin.generate.top_k" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGenerationMixin.generate.top_k"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to 50) &#x2014; The number of highest probability vocabulary tokens to keep for top-k-filtering.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxGenerationMixin.generate.top_p" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGenerationMixin.generate.top_p"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_p</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; If set to float &lt; 1, only the most probable tokens with probabilities that add up to <code>top_p</code> or higher are kept for generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxGenerationMixin.generate.pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGenerationMixin.generate.pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxGenerationMixin.generate.bos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGenerationMixin.generate.bos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>beginning-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxGenerationMixin.generate.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGenerationMixin.generate.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxGenerationMixin.generate.num_beams" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGenerationMixin.generate.num_beams"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_beams</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of beams for beam search. 1 means no beam search.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxGenerationMixin.generate.decoder_start_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGenerationMixin.generate.decoder_start_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_start_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; If an encoder-decoder model starts decoding with a different token than <em>bos</em>, the id of that token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxGenerationMixin.generate.trace" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGenerationMixin.generate.trace"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>trace</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to trace generation. Setting <code>trace=False</code> should only be used for debugging and will lead to a considerably slower runtime.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxGenerationMixin.generate.params" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGenerationMixin.generate.params"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>params</strong> (<code>Dict[str, jnp.ndarray]</code>, <em>optional</em>) &#x2014; Optionally the model parameters can be passed. Can be useful for parallelized generation. model<em>kwargs &#x2014; Additional model specific kwargs will be forwarded to the <code>forward</code> function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder</em>*. Also accepts <code>encoder_outputs</code> to skip encoder part.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Generates sequences of token ids for models with a language modeling head. The method supports the following generation methods for text-decoder, text-to-text, speech-to-text, and vision-to-text models:</p> <ul><li><em>greedy decoding</em> by calling <code>_greedy_search()</code> if <code>num_beams=1</code> and <code>do_sample=False</code>.</li> <li><em>multinomial sampling</em> by calling <code>_sample()</code> if <code>num_beams=1</code> and <code>do_sample=True</code>.</li> <li><em>beam-search decoding</em> by calling <code>~generation_utils.FlaxGenerationMixin._beam_search</code> if <code>num_beams&gt;1</code> and <code>do_sample=False</code>.</li></ul> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>Apart from <code>inputs</code>, all the arguments below will default to the value of the attribute of the same name as defined in the model’s config (<code>config.json</code>) which in turn defaults to the <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> of the model.</p></div> <p>Most of these parameters are explained in more detail in <a href="https://huggingface.co/blog/how-to-generate" rel="nofollow">this blog post</a>.</p> <div class="relative group rounded-md"><a id="transformers.generation_flax_utils.FlaxGenerationMixin.generate.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGenerationMixin.generate.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, FlaxAutoModelForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_context = <span class="hljs-string">&quot;The dog&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># encode input context</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(input_context, return_tensors=<span class="hljs-string">&quot;np&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># generate candidates using sampling</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.generate(input_ids=input_ids, max_length=<span class="hljs-number">20</span>, top_k=<span class="hljs-number">30</span>, do_sample=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div></div></div></div> <script type="module" data-hydrate="18la6cn"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="18la6cn"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/main_classes/text_generation.mdx-hf-doc-builder.js") ], params: {} } }); </script>
46
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/main_classes/pipelines.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;pipelines&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.pipeline&quot;,&quot;title&quot;:&quot;The pipeline abstraction&quot;},{&quot;local&quot;:&quot;pipeline-batching&quot;,&quot;title&quot;:&quot;Pipeline batching&quot;},{&quot;local&quot;:&quot;pipeline-chunk-batching&quot;,&quot;title&quot;:&quot;Pipeline chunk batching&quot;},{&quot;local&quot;:&quot;pipeline-custom-code&quot;,&quot;title&quot;:&quot;Pipeline custom code&quot;},{&quot;local&quot;:&quot;implementing-a-pipeline&quot;,&quot;title&quot;:&quot;Implementing a pipeline&quot;},{&quot;local&quot;:&quot;the-task-specific-pipelines&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.AudioClassificationPipeline&quot;,&quot;title&quot;:&quot;AudioClassificationPipeline&quot;},{&quot;local&quot;:&quot;transformers.AutomaticSpeechRecognitionPipeline&quot;,&quot;title&quot;:&quot;AutomaticSpeechRecognitionPipeline&quot;},{&quot;local&quot;:&quot;transformers.Conversation&quot;,&quot;title&quot;:&quot;ConversationalPipeline&quot;},{&quot;local&quot;:&quot;transformers.DocumentQuestionAnsweringPipeline&quot;,&quot;title&quot;:&quot;DocumentQuestionAnsweringPipeline&quot;},{&quot;local&quot;:&quot;transformers.FeatureExtractionPipeline&quot;,&quot;title&quot;:&quot;FeatureExtractionPipeline&quot;},{&quot;local&quot;:&quot;transformers.FillMaskPipeline&quot;,&quot;title&quot;:&quot;FillMaskPipeline&quot;},{&quot;local&quot;:&quot;transformers.ImageClassificationPipeline&quot;,&quot;title&quot;:&quot;ImageClassificationPipeline&quot;},{&quot;local&quot;:&quot;transformers.ImageSegmentationPipeline&quot;,&quot;title&quot;:&quot;ImageSegmentationPipeline&quot;},{&quot;local&quot;:&quot;transformers.ImageToTextPipeline&quot;,&quot;title&quot;:&quot;ImageToTextPipeline&quot;},{&quot;local&quot;:&quot;transformers.TokenClassificationPipeline&quot;,&quot;title&quot;:&quot;NerPipeline&quot;},{&quot;local&quot;:&quot;transformers.ObjectDetectionPipeline&quot;,&quot;title&quot;:&quot;ObjectDetectionPipeline&quot;},{&quot;local&quot;:&quot;transformers.QuestionAnsweringPipeline&quot;,&quot;title&quot;:&quot;QuestionAnsweringPipeline&quot;},{&quot;local&quot;:&quot;transformers.SummarizationPipeline&quot;,&quot;title&quot;:&quot;SummarizationPipeline&quot;},{&quot;local&quot;:&quot;transformers.TableQuestionAnsweringPipeline&quot;,&quot;title&quot;:&quot;TableQuestionAnsweringPipeline&quot;},{&quot;local&quot;:&quot;transformers.TextClassificationPipeline&quot;,&quot;title&quot;:&quot;TextClassificationPipeline&quot;},{&quot;local&quot;:&quot;transformers.TextGenerationPipeline&quot;,&quot;title&quot;:&quot;TextGenerationPipeline&quot;},{&quot;local&quot;:&quot;transformers.Text2TextGenerationPipeline&quot;,&quot;title&quot;:&quot;Text2TextGenerationPipeline&quot;},{&quot;local&quot;:&quot;transformers.TokenClassificationPipeline&quot;,&quot;title&quot;:&quot;TokenClassificationPipeline&quot;},{&quot;local&quot;:&quot;transformers.TranslationPipeline&quot;,&quot;title&quot;:&quot;TranslationPipeline&quot;},{&quot;local&quot;:&quot;transformers.VisualQuestionAnsweringPipeline&quot;,&quot;title&quot;:&quot;VisualQuestionAnsweringPipeline&quot;},{&quot;local&quot;:&quot;transformers.ZeroShotClassificationPipeline&quot;,&quot;title&quot;:&quot;ZeroShotClassificationPipeline&quot;},{&quot;local&quot;:&quot;transformers.ZeroShotImageClassificationPipeline&quot;,&quot;title&quot;:&quot;ZeroShotImageClassificationPipeline&quot;},{&quot;local&quot;:&quot;transformers.ZeroShotObjectDetectionPipeline&quot;,&quot;title&quot;:&quot;ZeroShotObjectDetectionPipeline&quot;}],&quot;title&quot;:&quot;The task specific pipelines&quot;},{&quot;local&quot;:&quot;transformers.Pipeline&quot;,&quot;title&quot;:&quot;Parent class: `Pipeline`&quot;}],&quot;title&quot;:&quot;Pipelines&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/main_classes/pipelines.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Docstring-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/ExampleCodeBlock-hf-doc-builder.js"> <h1 class="relative group"><a id="pipelines" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#pipelines"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Pipelines </span></h1> <p>The pipelines are a great and easy way to use models for inference. These pipelines are objects that abstract most of the complex code from the library, offering a simple API dedicated to several tasks, including Named Entity Recognition, Masked Language Modeling, Sentiment Analysis, Feature Extraction and Question Answering. See the <a href="../task_summary">task summary</a> for examples of use.</p> <p>There are two categories of pipeline abstractions to be aware about:</p> <ul><li><p>The <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> which is the most powerful object encapsulating all other pipelines.</p></li> <li><p>The other task-specific pipelines:</p> <ul><li><a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.AudioClassificationPipeline">AudioClassificationPipeline</a></li> <li><a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline">AutomaticSpeechRecognitionPipeline</a></li> <li><a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.ConversationalPipeline">ConversationalPipeline</a></li> <li><a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.DocumentQuestionAnsweringPipeline">DocumentQuestionAnsweringPipeline</a></li> <li><a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.FeatureExtractionPipeline">FeatureExtractionPipeline</a></li> <li><a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.FillMaskPipeline">FillMaskPipeline</a></li> <li><a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.ImageClassificationPipeline">ImageClassificationPipeline</a></li> <li><a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.ImageSegmentationPipeline">ImageSegmentationPipeline</a></li> <li><a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.ImageToTextPipeline">ImageToTextPipeline</a></li> <li><a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.ObjectDetectionPipeline">ObjectDetectionPipeline</a></li> <li><a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.QuestionAnsweringPipeline">QuestionAnsweringPipeline</a></li> <li><a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.SummarizationPipeline">SummarizationPipeline</a></li> <li><a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.TableQuestionAnsweringPipeline">TableQuestionAnsweringPipeline</a></li> <li><a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.TextClassificationPipeline">TextClassificationPipeline</a></li> <li><a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.TextGenerationPipeline">TextGenerationPipeline</a></li> <li><a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.Text2TextGenerationPipeline">Text2TextGenerationPipeline</a></li> <li><a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.TokenClassificationPipeline">TokenClassificationPipeline</a></li> <li><a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.TranslationPipeline">TranslationPipeline</a></li> <li><a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.VisualQuestionAnsweringPipeline">VisualQuestionAnsweringPipeline</a></li> <li><a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.ZeroShotClassificationPipeline">ZeroShotClassificationPipeline</a></li> <li><a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.ZeroShotImageClassificationPipeline">ZeroShotImageClassificationPipeline</a></li> <li><a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.ZeroShotObjectDetectionPipeline">ZeroShotObjectDetectionPipeline</a></li></ul></li></ul> <h2 class="relative group"><a id="transformers.pipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>The pipeline abstraction </span></h2> <p>The <em>pipeline</em> abstraction is a wrapper around all the other available pipelines. It is instantiated as any other pipeline but can provide additional quality of life.</p> <p>Simple call on one item:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>pipe = pipeline(<span class="hljs-string">&quot;text-classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pipe(<span class="hljs-string">&quot;This restaurant is awesome&quot;</span>) [{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;POSITIVE&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9998743534088135</span>}]<!-- HTML_TAG_END --></pre></div> <p>If you want to use a specific model from the <a href="https://huggingface.co" rel="nofollow">hub</a> you can ignore the task if the model on the hub already defines it:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>pipe = pipeline(model=<span class="hljs-string">&quot;roberta-large-mnli&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pipe(<span class="hljs-string">&quot;This restaurant is awesome&quot;</span>) [{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;POSITIVE&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9998743534088135</span>}]<!-- HTML_TAG_END --></pre></div> <p>To call a pipeline on many items, you can either call with a <em>list</em>.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>pipe = pipeline(<span class="hljs-string">&quot;text-classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pipe([<span class="hljs-string">&quot;This restaurant is awesome&quot;</span>, <span class="hljs-string">&quot;This restaurant is aweful&quot;</span>]) [{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;POSITIVE&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9998743534088135</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;NEGATIVE&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9996669292449951</span>}]<!-- HTML_TAG_END --></pre></div> <p>To iterate of full datasets it is recommended to use a <code>dataset</code> directly. This means you don’t need to allocate the whole dataset at once, nor do you need to do batching yourself. This should work just as fast as custom loops on GPU. If it doesn’t don’t hesitate to create an issue.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">import</span> datasets <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-keyword">from</span> transformers.pipelines.pt_utils <span class="hljs-keyword">import</span> KeyDataset <span class="hljs-keyword">from</span> tqdm.auto <span class="hljs-keyword">import</span> tqdm pipe = pipeline(<span class="hljs-string">&quot;automatic-speech-recognition&quot;</span>, model=<span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>, device=<span class="hljs-number">0</span>) dataset = datasets.load_dataset(<span class="hljs-string">&quot;superb&quot;</span>, name=<span class="hljs-string">&quot;asr&quot;</span>, split=<span class="hljs-string">&quot;test&quot;</span>) <span class="hljs-comment"># KeyDataset (only *pt*) will simply return the item in the dict returned by the dataset item</span> <span class="hljs-comment"># as we&#x27;re not interested in the *target* part of the dataset.</span> <span class="hljs-keyword">for</span> out <span class="hljs-keyword">in</span> tqdm(pipe(KeyDataset(dataset, <span class="hljs-string">&quot;file&quot;</span>))): <span class="hljs-built_in">print</span>(out) <span class="hljs-comment"># {&quot;text&quot;: &quot;NUMBER TEN FRESH NELLY IS WAITING ON YOU GOOD NIGHT HUSBAND&quot;}</span> <span class="hljs-comment"># {&quot;text&quot;: ....}</span> <span class="hljs-comment"># ....</span><!-- HTML_TAG_END --></pre></div> <p>For ease of use, a generator is also possible:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline pipe = pipeline(<span class="hljs-string">&quot;text-classification&quot;</span>) <span class="hljs-keyword">def</span> <span class="hljs-title function_">data</span>(): <span class="hljs-keyword">while</span> <span class="hljs-literal">True</span>: <span class="hljs-comment"># This could come from a dataset, a database, a queue or HTTP request</span> <span class="hljs-comment"># in a server</span> <span class="hljs-comment"># Caveat: because this is iterative, you cannot use `num_workers &gt; 1` variable</span> <span class="hljs-comment"># to use multiple threads to preprocess data. You can still have 1 thread that</span> <span class="hljs-comment"># does the preprocessing while the main runs the big inference</span> <span class="hljs-keyword">yield</span> <span class="hljs-string">&quot;This is a test&quot;</span> <span class="hljs-keyword">for</span> out <span class="hljs-keyword">in</span> pipe(data()): <span class="hljs-built_in">print</span>(out) <span class="hljs-comment"># {&quot;text&quot;: &quot;NUMBER TEN FRESH NELLY IS WAITING ON YOU GOOD NIGHT HUSBAND&quot;}</span> <span class="hljs-comment"># {&quot;text&quot;: ....}</span> <span class="hljs-comment"># ....</span><!-- HTML_TAG_END --></pre></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.pipeline"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.pipeline</span></h4><!-- HTML_TAG_END --> <a id="transformers.pipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.pipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/__init__.py#L450" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">task<span class="opacity-60">: str = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60">: typing.Optional = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: typing.Union[str, transformers.configuration_utils.PretrainedConfig, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: typing.Union[str, transformers.tokenization_utils.PreTrainedTokenizer, transformers.tokenization_utils_fast.PreTrainedTokenizerFast, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">feature_extractor<span class="opacity-60">: typing.Union[str, ForwardRef(&#39;SequenceFeatureExtractor&#39;), NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">framework<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">revision<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_fast<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_auth_token<span class="opacity-60">: typing.Union[str, bool, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">device<span class="opacity-60">: typing.Union[int, str, ForwardRef(&#39;torch.device&#39;), NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">device_map<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">torch_dtype<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">trust_remote_code<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model_kwargs<span class="opacity-60">: typing.Dict[str, typing.Any] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pipeline_class<span class="opacity-60">: typing.Optional[typing.Any] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.Pipeline" >Pipeline</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.pipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>) &#x2014; The task defining which pipeline will be returned. Currently accepted tasks are:</p> <ul> <li><code>&quot;audio-classification&quot;</code>: will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.AudioClassificationPipeline">AudioClassificationPipeline</a>.</li> <li><code>&quot;automatic-speech-recognition&quot;</code>: will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline">AutomaticSpeechRecognitionPipeline</a>.</li> <li><code>&quot;conversational&quot;</code>: will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.ConversationalPipeline">ConversationalPipeline</a>.</li> <li><code>&quot;feature-extraction&quot;</code>: will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.FeatureExtractionPipeline">FeatureExtractionPipeline</a>.</li> <li><code>&quot;fill-mask&quot;</code>: will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.FillMaskPipeline">FillMaskPipeline</a>:.</li> <li><code>&quot;image-classification&quot;</code>: will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.ImageClassificationPipeline">ImageClassificationPipeline</a>.</li> <li><code>&quot;question-answering&quot;</code>: will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.QuestionAnsweringPipeline">QuestionAnsweringPipeline</a>.</li> <li><code>&quot;table-question-answering&quot;</code>: will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.TableQuestionAnsweringPipeline">TableQuestionAnsweringPipeline</a>.</li> <li><code>&quot;text2text-generation&quot;</code>: will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.Text2TextGenerationPipeline">Text2TextGenerationPipeline</a>.</li> <li><code>&quot;text-classification&quot;</code> (alias <code>&quot;sentiment-analysis&quot;</code> available): will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.TextClassificationPipeline">TextClassificationPipeline</a>.</li> <li><code>&quot;text-generation&quot;</code>: will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.TextGenerationPipeline">TextGenerationPipeline</a>:.</li> <li><code>&quot;token-classification&quot;</code> (alias <code>&quot;ner&quot;</code> available): will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.TokenClassificationPipeline">TokenClassificationPipeline</a>.</li> <li><code>&quot;translation&quot;</code>: will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.TranslationPipeline">TranslationPipeline</a>.</li> <li><code>&quot;translation_xx_to_yy&quot;</code>: will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.TranslationPipeline">TranslationPipeline</a>.</li> <li><code>&quot;summarization&quot;</code>: will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.SummarizationPipeline">SummarizationPipeline</a>.</li> <li><code>&quot;zero-shot-classification&quot;</code>: will return a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.ZeroShotClassificationPipeline">ZeroShotClassificationPipeline</a>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.pipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>, <em>optional</em>) &#x2014; The model that will be used by the pipeline to make predictions. This can be a model identifier or an actual instance of a pretrained model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> (for PyTorch) or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> (for TensorFlow).</p> <p>If not provided, the default for the <code>task</code> will be loaded.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.pipeline.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipeline.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>, <em>optional</em>) &#x2014; The configuration that will be used by the pipeline to instantiate the model. This can be a model identifier or an actual pretrained model configuration inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>.</p> <p>If not provided, the default configuration file for the requested model will be used. That means that if <code>model</code> is given, its default configuration will be used. However, if <code>model</code> is not supplied, this <code>task</code>&#x2019;s default model&#x2019;s config is used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.pipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>, <em>optional</em>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This can be a model identifier or an actual pretrained tokenizer inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.</p> <p>If not provided, the default tokenizer for the given <code>model</code> will be loaded (if it is a string). If <code>model</code> is not specified or not a string, then the default tokenizer for <code>config</code> is loaded (if it is a string). However, if <code>config</code> is also not given or not a string, then the default tokenizer for the given <code>task</code> will be loaded.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.pipeline.feature_extractor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipeline.feature_extractor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>feature_extractor</strong> (<code>str</code> or <code>PreTrainedFeatureExtractor</code>, <em>optional</em>) &#x2014; The feature extractor that will be used by the pipeline to encode data for the model. This can be a model identifier or an actual pretrained feature extractor inheriting from <code>PreTrainedFeatureExtractor</code>.</p> <p>Feature extractors are used for non-NLP models, such as Speech or Vision models as well as multi-modal models. Multi-modal models will also require a tokenizer to be passed.</p> <p>If not provided, the default feature extractor for the given <code>model</code> will be loaded (if it is a string). If <code>model</code> is not specified or not a string, then the default feature extractor for <code>config</code> is loaded (if it is a string). However, if <code>config</code> is also not given or not a string, then the default feature extractor for the given <code>task</code> will be loaded.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.pipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.pipeline.revision" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipeline.revision"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;main&quot;</code>) &#x2014; When passing a task name or a string model identifier: The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <code>revision</code> can be any identifier allowed by git.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.pipeline.use_fast" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipeline.use_fast"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_fast</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to use a Fast tokenizer if possible (a <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.pipeline.use_auth_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipeline.use_auth_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_auth_token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>huggingface-cli login</code> (stored in <code>~/.huggingface</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.pipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code> or <code>str</code> or <code>torch.device</code>) &#x2014; Defines the device (<em>e.g.</em>, <code>&quot;cpu&quot;</code>, <code>&quot;cuda:1&quot;</code>, <code>&quot;mps&quot;</code>, or a GPU ordinal rank like <code>1</code>) on which this pipeline will be allocated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.pipeline.device_map" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipeline.device_map"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device_map</strong> (<code>str</code> or <code>Dict[str, Union[int, str, torch.device]</code>, <em>optional</em>) &#x2014; Sent directly as <code>model_kwargs</code> (just a simpler shortcut). When <code>accelerate</code> library is present, set <code>device_map=&quot;auto&quot;</code> to compute the most optimized <code>device_map</code> automatically. <a href="https://huggingface.co/docs/accelerate/main/en/big_modeling#accelerate.cpu_offload" rel="nofollow">More information</a></p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>Do not use <code>device_map</code> AND <code>device</code> at the same time as they will conflict</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.pipeline.torch_dtype" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipeline.torch_dtype"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>torch_dtype</strong> (<code>str</code> or <code>torch.dtype</code>, <em>optional</em>) &#x2014; Sent directly as <code>model_kwargs</code> (just a simpler shortcut) to use the available precision for this model (<code>torch.float16</code>, <code>torch.bfloat16</code>, &#x2026; or <code>&quot;auto&quot;</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.pipeline.trust_remote_code" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipeline.trust_remote_code"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>trust_remote_code</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to allow for custom code defined on the Hub in their own modeling, configuration, tokenization or even pipeline files. This option should only be set to <code>True</code> for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine. model_kwargs &#x2014; Additional dictionary of keyword arguments passed along to the model&#x2019;s <code>from_pretrained(..., **model_kwargs)</code> function. kwargs &#x2014; Additional keyword arguments passed along to the specific pipeline init (see the documentation for the corresponding pipeline class for possible values).<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.pipeline.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.Pipeline" >Pipeline</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A suitable pipeline for the task.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Utility factory method to build a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.Pipeline">Pipeline</a>.</p> <p>Pipelines are made of:</p> <ul><li>A <a href="tokenizer">tokenizer</a> in charge of mapping raw textual input to token.</li> <li>A <a href="model">model</a> to make predictions from the inputs.</li> <li>Some (optional) post processing for enhancing model’s output.</li></ul> <div class="relative group rounded-md"><a id="transformers.pipeline.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipeline.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline, AutoModelForTokenClassification, AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Sentiment analysis pipeline</span> <span class="hljs-meta">&gt;&gt;&gt; </span>pipeline(<span class="hljs-string">&quot;sentiment-analysis&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Question answering pipeline, specifying the checkpoint identifier</span> <span class="hljs-meta">&gt;&gt;&gt; </span>pipeline(<span class="hljs-string">&quot;question-answering&quot;</span>, model=<span class="hljs-string">&quot;distilbert-base-cased-distilled-squad&quot;</span>, tokenizer=<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Named entity recognition pipeline, passing in a specific model and tokenizer</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&quot;dbmdz/bert-large-cased-finetuned-conll03-english&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pipeline(<span class="hljs-string">&quot;ner&quot;</span>, model=model, tokenizer=tokenizer)<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="pipeline-batching" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#pipeline-batching"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Pipeline batching </span></h2> <p>All pipelines can use batching. This will work whenever the pipeline uses its streaming ability (so when passing lists or <code>Dataset</code> or <code>generator</code>).</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-keyword">from</span> transformers.pipelines.pt_utils <span class="hljs-keyword">import</span> KeyDataset <span class="hljs-keyword">import</span> datasets dataset = datasets.load_dataset(<span class="hljs-string">&quot;imdb&quot;</span>, name=<span class="hljs-string">&quot;plain_text&quot;</span>, split=<span class="hljs-string">&quot;unsupervised&quot;</span>) pipe = pipeline(<span class="hljs-string">&quot;text-classification&quot;</span>, device=<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> out <span class="hljs-keyword">in</span> pipe(KeyDataset(dataset, <span class="hljs-string">&quot;text&quot;</span>), batch_size=<span class="hljs-number">8</span>, truncation=<span class="hljs-string">&quot;only_first&quot;</span>): <span class="hljs-built_in">print</span>(out) <span class="hljs-comment"># [{&#x27;label&#x27;: &#x27;POSITIVE&#x27;, &#x27;score&#x27;: 0.9998743534088135}]</span> <span class="hljs-comment"># Exactly the same output as before, but the content are passed</span> <span class="hljs-comment"># as batches to the model</span><!-- HTML_TAG_END --></pre></div> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>However, this is not automatically a win for performance. It can be either a 10x speedup or 5x slowdown depending on hardware, data and the actual model being used.</p> <p>Example where it’s mostly a speedup:</p></div> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-keyword">from</span> torch.utils.data <span class="hljs-keyword">import</span> Dataset <span class="hljs-keyword">from</span> tqdm.auto <span class="hljs-keyword">import</span> tqdm pipe = pipeline(<span class="hljs-string">&quot;text-classification&quot;</span>, device=<span class="hljs-number">0</span>) <span class="hljs-keyword">class</span> <span class="hljs-title class_">MyDataset</span>(<span class="hljs-title class_ inherited__">Dataset</span>): <span class="hljs-keyword">def</span> <span class="hljs-title function_">__len__</span>(<span class="hljs-params">self</span>): <span class="hljs-keyword">return</span> <span class="hljs-number">5000</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">__getitem__</span>(<span class="hljs-params">self, i</span>): <span class="hljs-keyword">return</span> <span class="hljs-string">&quot;This is a test&quot;</span> dataset = MyDataset() <span class="hljs-keyword">for</span> batch_size <span class="hljs-keyword">in</span> [<span class="hljs-number">1</span>, <span class="hljs-number">8</span>, <span class="hljs-number">64</span>, <span class="hljs-number">256</span>]: <span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;-&quot;</span> * <span class="hljs-number">30</span>) <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;Streaming batch_size=<span class="hljs-subst">{batch_size}</span>&quot;</span>) <span class="hljs-keyword">for</span> out <span class="hljs-keyword">in</span> tqdm(pipe(dataset, batch_size=batch_size), total=<span class="hljs-built_in">len</span>(dataset)): <span class="hljs-keyword">pass</span><!-- HTML_TAG_END --></pre></div> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-section"># On GTX 970 ------------------------------</span> Streaming no batching 100%|██████████████████████████████████████████████████████████████████████| 5000/5000 [00:26&lt;00:00, 187.52it/s] <span class="hljs-code">------------------------------ Streaming batch_size=8 100%|█████████████████████████████████████████████████████████████████████| 5000/5000 [00:04&lt;00:00, 1205.95it/s] ------------------------------</span> Streaming batch<span class="hljs-emphasis">_size=64 100%|█████████████████████████████████████████████████████████████████████| 5000/5000 [00:02&lt;00:00, 2478.24it/s] ------------------------------ Streaming batch_</span>size=256 100%|█████████████████████████████████████████████████████████████████████| 5000/5000 [00:01&lt;00:00, 2554.43it/s] (diminishing returns, saturated the GPU)<!-- HTML_TAG_END --></pre></div> <p>Example where it’s most a slowdown:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">class</span> <span class="hljs-title class_">MyDataset</span>(<span class="hljs-title class_ inherited__">Dataset</span>): <span class="hljs-keyword">def</span> <span class="hljs-title function_">__len__</span>(<span class="hljs-params">self</span>): <span class="hljs-keyword">return</span> <span class="hljs-number">5000</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">__getitem__</span>(<span class="hljs-params">self, i</span>): <span class="hljs-keyword">if</span> i % <span class="hljs-number">64</span> == <span class="hljs-number">0</span>: n = <span class="hljs-number">100</span> <span class="hljs-keyword">else</span>: n = <span class="hljs-number">1</span> <span class="hljs-keyword">return</span> <span class="hljs-string">&quot;This is a test&quot;</span> * n<!-- HTML_TAG_END --></pre></div> <p>This is a occasional very long sentence compared to the other. In that case, the <strong>whole</strong> batch will need to be 400 tokens long, so the whole batch will be [64, 400] instead of [64, 4], leading to the high slowdown. Even worse, on bigger batches, the program simply crashes.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment">------------------------------</span> Streaming no batching <span class="hljs-number">100</span>%|█████████████████████████████████████████████████████████████████████| <span class="hljs-number">1000</span>/<span class="hljs-number">1000</span> [<span class="hljs-number">00</span>:<span class="hljs-number">05</span>&lt;<span class="hljs-number">00</span>:<span class="hljs-number">00</span>, <span class="hljs-number">183.69</span><span class="hljs-keyword">it</span>/s] <span class="hljs-comment">------------------------------</span> Streaming batch_size=<span class="hljs-number">8</span> <span class="hljs-number">100</span>%|█████████████████████████████████████████████████████████████████████| <span class="hljs-number">1000</span>/<span class="hljs-number">1000</span> [<span class="hljs-number">00</span>:<span class="hljs-number">03</span>&lt;<span class="hljs-number">00</span>:<span class="hljs-number">00</span>, <span class="hljs-number">265.74</span><span class="hljs-keyword">it</span>/s] <span class="hljs-comment">------------------------------</span> Streaming batch_size=<span class="hljs-number">64</span> <span class="hljs-number">100</span>%|██████████████████████████████████████████████████████████████████████| <span class="hljs-number">1000</span>/<span class="hljs-number">1000</span> [<span class="hljs-number">00</span>:<span class="hljs-number">26</span>&lt;<span class="hljs-number">00</span>:<span class="hljs-number">00</span>, <span class="hljs-number">37.80</span><span class="hljs-keyword">it</span>/s] <span class="hljs-comment">------------------------------</span> Streaming batch_size=<span class="hljs-number">256</span> <span class="hljs-number">0</span>%| | <span class="hljs-number">0</span>/<span class="hljs-number">1000</span> [<span class="hljs-number">00</span>:<span class="hljs-number">00</span><span class="hljs-meta">&lt;?</span>, ?<span class="hljs-keyword">it</span>/s] Traceback (most recent call <span class="hljs-keyword">last</span>): File <span class="hljs-string">&quot;/home/nicolas/src/transformers/test.py&quot;</span>, <span class="hljs-built_in">line</span> <span class="hljs-number">42</span>, <span class="hljs-keyword">in</span> &lt;module&gt; <span class="hljs-keyword">for</span> out <span class="hljs-keyword">in</span> tqdm(pipe(dataset, batch_size=<span class="hljs-number">256</span>), total=<span class="hljs-built_in">len</span>(dataset)): .... q = q / math.<span class="hljs-built_in">sqrt</span>(dim_per_head) <span class="hljs-comment"># (bs, n_heads, q_length, dim_per_head)</span> RuntimeError: CUDA out <span class="hljs-keyword">of</span> memory. Tried <span class="hljs-built_in">to</span> allocate <span class="hljs-number">376.00</span> MiB (GPU <span class="hljs-number">0</span>; <span class="hljs-number">3.95</span> GiB total capacity; <span class="hljs-number">1.72</span> GiB already allocated; <span class="hljs-number">354.88</span> MiB free; <span class="hljs-number">2.46</span> GiB reserved <span class="hljs-keyword">in</span> total <span class="hljs-keyword">by</span> PyTorch)<!-- HTML_TAG_END --></pre></div> <p>There are no good (general) solutions for this problem, and your mileage may vary depending on your use cases. Rule of thumb:</p> <p>For users, a rule of thumb is:</p> <ul><li><p><strong>Measure performance on your load, with your hardware. Measure, measure, and keep measuring. Real numbers are the only way to go.</strong></p></li> <li><p>If you are latency constrained (live product doing inference), don’t batch</p></li> <li><p>If you are using CPU, don’t batch.</p></li> <li><p>If you are using throughput (you want to run your model on a bunch of static data), on GPU, then:</p> <ul><li>If you have no clue about the size of the sequence_length (“natural” data), by default don’t batch, measure and try tentatively to add it, add OOM checks to recover when it will fail (and it will at some point if you don’t control the sequence_length.)</li> <li>If your sequence_length is super regular, then batching is more likely to be VERY interesting, measure and push it until you get OOMs.</li> <li>The larger the GPU the more likely batching is going to be more interesting</li></ul></li> <li><p>As soon as you enable batching, make sure you can handle OOMs nicely.</p></li></ul> <h2 class="relative group"><a id="pipeline-chunk-batching" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#pipeline-chunk-batching"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Pipeline chunk batching </span></h2> <p><code>zero-shot-classification</code> and <code>question-answering</code> are slightly specific in the sense, that a single input might yield multiple forward pass of a model. Under normal circumstances, this would yield issues with <code>batch_size</code> argument.</p> <p>In order to circumvent this issue, both of these pipelines are a bit specific, they are <code>ChunkPipeline</code> instead of regular <code>Pipeline</code>. In short:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->preprocessed = pipe.preprocess(inputs) model_outputs = pipe.forward(preprocessed) outputs = pipe.postprocess(model_outputs)<!-- HTML_TAG_END --></pre></div> <p>Now becomes:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->all_model_outputs = [] <span class="hljs-keyword">for</span> preprocessed <span class="hljs-keyword">in</span> pipe.preprocess(inputs): model_outputs = pipe.forward(preprocessed) all_model_outputs.append(model_outputs) outputs = pipe.postprocess(all_model_outputs)<!-- HTML_TAG_END --></pre></div> <p>This should be very transparent to your code because the pipelines are used in the same way.</p> <p>This is a simplified view, since the pipeline can handle automatically the batch to ! Meaning you don’t have to care about how many forward passes you inputs are actually going to trigger, you can optimize the <code>batch_size</code> independently of the inputs. The caveats from the previous section still apply.</p> <h2 class="relative group"><a id="pipeline-custom-code" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#pipeline-custom-code"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Pipeline custom code </span></h2> <p>If you want to override a specific pipeline.</p> <p>Don’t hesitate to create an issue for your task at hand, the goal of the pipeline is to be easy to use and support most cases, so <code>transformers</code> could maybe support your use case.</p> <p>If you want to try simply you can:</p> <ul><li>Subclass your pipeline of choice</li></ul> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">class</span> <span class="hljs-title class_">MyPipeline</span>(<span class="hljs-title class_ inherited__">TextClassificationPipeline</span>): <span class="hljs-keyword">def</span> <span class="hljs-title function_">postprocess</span>(): <span class="hljs-comment"># Your code goes here</span> scores = scores * <span class="hljs-number">100</span> <span class="hljs-comment"># And here</span> my_pipeline = MyPipeline(model=model, tokenizer=tokenizer, ...) <span class="hljs-comment"># or if you use *pipeline* function, then:</span> my_pipeline = pipeline(model=<span class="hljs-string">&quot;xxxx&quot;</span>, pipeline_class=MyPipeline)<!-- HTML_TAG_END --></pre></div> <p>That should enable you to do all the custom code you want.</p> <h2 class="relative group"><a id="implementing-a-pipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#implementing-a-pipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Implementing a pipeline </span></h2> <p><a href="../add_new_pipeline">Implementing a new pipeline</a></p> <h2 class="relative group"><a id="the-task-specific-pipelines" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#the-task-specific-pipelines"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>The task specific pipelines </span></h2> <h3 class="relative group"><a id="transformers.AudioClassificationPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AudioClassificationPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>AudioClassificationPipeline </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.AudioClassificationPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">AudioClassificationPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.AudioClassificationPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.AudioClassificationPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/audio_classification.py#L66" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AudioClassificationPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AudioClassificationPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AudioClassificationPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AudioClassificationPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AudioClassificationPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AudioClassificationPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AudioClassificationPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AudioClassificationPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AudioClassificationPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AudioClassificationPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AudioClassificationPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AudioClassificationPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AudioClassificationPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AudioClassificationPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AudioClassificationPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AudioClassificationPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AudioClassificationPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AudioClassificationPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AudioClassificationPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AudioClassificationPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Audio classification pipeline using any <code>AutoModelForAudioClassification</code>. This pipeline predicts the class of a raw waveform or an audio file. In case of an audio file, ffmpeg should be installed to support multiple audio formats.</p> <p>This pipeline can currently be loaded from <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;audio-classification&quot;</code>.</p> <p>See the list of available models on <a href="https://huggingface.co/models?filter=audio-classification" rel="nofollow">huggingface.co/models</a>.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.AudioClassificationPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.AudioClassificationPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.AudioClassificationPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/audio_classification.py#L89" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Union[numpy.ndarray, bytes, str]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A list of <code>dict</code> with the following keys</span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AudioClassificationPipeline.__call__.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AudioClassificationPipeline.__call__.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>np.ndarray</code> or <code>bytes</code> or <code>str</code>) &#x2014; The inputs is either a raw waveform (<code>np.ndarray</code> of shape (n, ) of type <code>np.float32</code> or <code>np.float64</code>) at the correct sampling rate (no further check will be done) or a <code>str</code> that is the filename of the audio file, the file will be read at the correct sampling rate to get the waveform using <em>ffmpeg</em>. This requires <em>ffmpeg</em> to be installed on the system. If <em>inputs</em> is <code>bytes</code> it is supposed to be the content of an audio file and is interpreted by <em>ffmpeg</em> in the same way.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AudioClassificationPipeline.__call__.top_k" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AudioClassificationPipeline.__call__.top_k"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to None) &#x2014; The number of top labels that will be returned by the pipeline. If the provided number is <code>None</code> or higher than the number of labels available in the model configuration, it will default to the number of labels.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.AudioClassificationPipeline.__call__.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A list of <code>dict</code> with the following keys</p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <ul> <li><strong>label</strong> (<code>str</code>) — The label predicted.</li> <li><strong>score</strong> (<code>float</code>) — The corresponding probability.</li> </ul> <!-- HTML_TAG_END --></p> </div></div> <p>Classify the sequence(s) given as inputs. See the <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline">AutomaticSpeechRecognitionPipeline</a> documentation for more information.</p></div></div> <h3 class="relative group"><a id="transformers.AutomaticSpeechRecognitionPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AutomaticSpeechRecognitionPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>AutomaticSpeechRecognitionPipeline </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.AutomaticSpeechRecognitionPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">AutomaticSpeechRecognitionPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.AutomaticSpeechRecognitionPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.AutomaticSpeechRecognitionPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/automatic_speech_recognition.py#L68" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">feature_extractor<span class="opacity-60">: typing.Union[ForwardRef(&#39;SequenceFeatureExtractor&#39;), str]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AutomaticSpeechRecognitionPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AutomaticSpeechRecognitionPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AutomaticSpeechRecognitionPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AutomaticSpeechRecognitionPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AutomaticSpeechRecognitionPipeline.feature_extractor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AutomaticSpeechRecognitionPipeline.feature_extractor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>feature_extractor</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.SequenceFeatureExtractor">SequenceFeatureExtractor</a>) &#x2014; The feature extractor that will be used by the pipeline to encode waveform for the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AutomaticSpeechRecognitionPipeline.chunk_length_s" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AutomaticSpeechRecognitionPipeline.chunk_length_s"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>chunk_length_s</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The input length for in each chunk. If <code>chunk_length_s = 0</code> then chunking is disabled (default). Only available for CTC models, e.g. <a href="/docs/transformers/pr_19429/en/model_doc/wav2vec2#transformers.Wav2Vec2ForCTC">Wav2Vec2ForCTC</a>.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>For more information on how to effectively use <code>chunk_length_s</code>, please have a look at the <a href="https://huggingface.co/blog/asr-chunking" rel="nofollow">ASR chunking blog post</a>.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AutomaticSpeechRecognitionPipeline.stride_length_s" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AutomaticSpeechRecognitionPipeline.stride_length_s"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stride_length_s</strong> (<code>float</code>, <em>optional</em>, defaults to <code>chunk_length_s / 6</code>) &#x2014; The length of stride on the left and right of each chunk. Used only with <code>chunk_length_s &gt; 0</code>. This enables the model to <em>see</em> more context and infer letters better than without this context but the pipeline discards the stride bits at the end to make the final reconstitution as perfect as possible.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>For more information on how to effectively use <code>stride_length_s</code>, please have a look at the <a href="https://huggingface.co/blog/asr-chunking" rel="nofollow">ASR chunking blog post</a>.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AutomaticSpeechRecognitionPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AutomaticSpeechRecognitionPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed. If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AutomaticSpeechRecognitionPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AutomaticSpeechRecognitionPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AutomaticSpeechRecognitionPipeline.decoder" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AutomaticSpeechRecognitionPipeline.decoder"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder</strong> (<code>pyctcdecode.BeamSearchDecoderCTC</code>, <em>optional</em>) &#x2014; <a href="https://github.com/kensho-technologies/pyctcdecode/blob/2fd33dc37c4111417e08d89ccd23d28e9b308d19/pyctcdecode/decoder.py#L180" rel="nofollow">PyCTCDecode&#x2019;s BeamSearchDecoderCTC</a> can be passed for language model boosted decoding. See <a href="/docs/transformers/pr_19429/en/model_doc/wav2vec2#transformers.Wav2Vec2ProcessorWithLM">Wav2Vec2ProcessorWithLM</a> for more information.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Pipeline that aims at extracting spoken text contained within some audio.</p> <p>The input can be either a raw waveform or a audio file. In case of the audio file, ffmpeg should be installed for to support multiple audio formats</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.AutomaticSpeechRecognitionPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.AutomaticSpeechRecognitionPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.AutomaticSpeechRecognitionPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/automatic_speech_recognition.py#L142" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Union[numpy.ndarray, bytes, str]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Dict</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AutomaticSpeechRecognitionPipeline.__call__.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AutomaticSpeechRecognitionPipeline.__call__.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>np.ndarray</code> or <code>bytes</code> or <code>str</code> or <code>dict</code>) &#x2014; The inputs is either :<ul> <li><code>str</code> that is the filename of the audio file, the file will be read at the correct sampling rate to get the waveform using <em>ffmpeg</em>. This requires <em>ffmpeg</em> to be installed on the system.</li> <li><code>bytes</code> it is supposed to be the content of an audio file and is interpreted by <em>ffmpeg</em> in the same way.</li> <li>(<code>np.ndarray</code> of shape (n, ) of type <code>np.float32</code> or <code>np.float64</code>) Raw audio at the correct sampling rate (no further check will be done)</li> <li><code>dict</code> form can be used to pass raw audio sampled at arbitrary <code>sampling_rate</code> and let this pipeline do the resampling. The dict must be in the format <code>{&quot;sampling_rate&quot;: int, &quot;raw&quot;: np.array}</code> with optionally a <code>&quot;stride&quot;: (left: int, right: int)</code> than can ask the pipeline to treat the first <code>left</code> samples and last <code>right</code> samples to be ignored in decoding (but used at inference to provide more context to the model). Only use <code>stride</code> with CTC models.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AutomaticSpeechRecognitionPipeline.__call__.return_timestamps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AutomaticSpeechRecognitionPipeline.__call__.return_timestamps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_timestamps</strong> (<em>optional</em>, <code>str</code>) &#x2014; Only available for pure CTC models. If set to <code>&quot;char&quot;</code>, the pipeline will return <code>timestamps</code> along the text for every character in the text. For instance if you get <code>[{&quot;text&quot;: &quot;h&quot;, &quot;timestamps&quot;: (0.5,0.6), {&quot;text&quot;: &quot;i&quot;, &quot;timestamps&quot;: (0.7, .9)}]</code>, then it means the model predicts that the letter &#x201C;h&#x201D; was pronounced after <code>0.5</code> and before <code>0.6</code> seconds. If set to <code>&quot;word&quot;</code>, the pipeline will return <code>timestamps</code> along the text for every word in the text. For instance if you get <code>[{&quot;text&quot;: &quot;hi &quot;, &quot;timestamps&quot;: (0.5,0.9), {&quot;text&quot;: &quot;there&quot;, &quot;timestamps&quot;: (1.0, .1.5)}]</code>, then it means the model predicts that the word &#x201C;hi&#x201D; was pronounced after <code>0.5</code> and before <code>0.9</code> seconds.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.AutomaticSpeechRecognitionPipeline.__call__.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Dict</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A dictionary with the following keys:</p> <ul> <li><strong>text</strong> (<code>str</code> ) — The recognized text.</li> <li><strong>chunks</strong> (<em>optional(, <code>List[Dict]</code>) When using <code>return_timestamps</code>, the <code>chunks</code> will become a list containing all the various text chunks identified by the model, </em>e.g.* <code>[&#123;"text": "hi ", "timestamps": (0.5,0.9), &#123;"text": "there", "timestamps": (1.0, 1.5)&#125;]</code>. The original full text can roughly be recovered by doing <code>"".join(chunk["text"] for chunk in output["chunks"])</code>.</li> </ul> <!-- HTML_TAG_END --></p> </div></div> <p>Classify the sequence(s) given as inputs. See the <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline">AutomaticSpeechRecognitionPipeline</a> documentation for more information.</p></div></div> <h3 class="relative group"><a id="transformers.Conversation" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Conversation"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ConversationalPipeline </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Conversation"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Conversation</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Conversation" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Conversation"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/conversational.py#L18" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text<span class="opacity-60">: str = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">conversation_id<span class="opacity-60">: UUID = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_user_inputs<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">generated_responses<span class="opacity-60"> = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Conversation.text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Conversation.text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text</strong> (<code>str</code>, <em>optional</em>) &#x2014; The initial user input to start the conversation. If not provided, a user input needs to be provided manually using the <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.Conversation.add_user_input">add_user_input()</a> method before the conversation can begin.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Conversation.conversation_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Conversation.conversation_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>conversation_id</strong> (<code>uuid.UUID</code>, <em>optional</em>) &#x2014; Unique identifier for the conversation. If not provided, a random UUID4 id will be assigned to the conversation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Conversation.past_user_inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Conversation.past_user_inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_user_inputs</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; Eventual past history of the conversation of the user. You don&#x2019;t need to pass it manually if you use the pipeline interactively but if you want to recreate history you need to set both <code>past_user_inputs</code> and <code>generated_responses</code> with equal length lists of strings<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Conversation.generated_responses" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Conversation.generated_responses"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>generated_responses</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; Eventual past history of the conversation of the model. You don&#x2019;t need to pass it manually if you use the pipeline interactively but if you want to recreate history you need to set both <code>past_user_inputs</code> and <code>generated_responses</code> with equal length lists of strings<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Utility class containing a conversation and its history. This class is meant to be used as an input to the <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.ConversationalPipeline">ConversationalPipeline</a>. The conversation contains a number of utility function to manage the addition of new user input and generated model responses. A conversation needs to contain an unprocessed user input before being passed to the <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.ConversationalPipeline">ConversationalPipeline</a>. This user input is either created when the class is instantiated, or by calling <code>conversational_pipeline.append_response(&quot;input&quot;)</code> after a conversation turn.</p> <div class="relative group rounded-md"><a id="transformers.Conversation.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Conversation.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Usage:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->conversation = Conversation(<span class="hljs-string">&quot;Going to the movies tonight - any suggestions?&quot;</span>) <span class="hljs-comment"># Steps usually performed by the model when generating a response:</span> <span class="hljs-comment"># 1. Mark the user input as processed (moved to the history)</span> conversation.mark_processed() <span class="hljs-comment"># 2. Append a mode response</span> conversation.append_response(<span class="hljs-string">&quot;The Big lebowski.&quot;</span>) conversation.add_user_input(<span class="hljs-string">&quot;Is it good?&quot;</span>)<!-- HTML_TAG_END --></pre></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Conversation.add_user_input"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>add_user_input</span></h4><!-- HTML_TAG_END --> <a id="transformers.Conversation.add_user_input" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Conversation.add_user_input"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/conversational.py#L82" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">overwrite<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Conversation.add_user_input.text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Conversation.add_user_input.text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text</strong> (<code>str</code>) &#x2014; The user input for the next conversation round.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Conversation.add_user_input.overwrite" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Conversation.add_user_input.overwrite"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>overwrite</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not existing and unprocessed user input should be overwritten when this function is called.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Add a user input to the conversation for the next round. This populates the internal <code>new_user_input</code> field.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Conversation.append_response"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>append_response</span></h4><!-- HTML_TAG_END --> <a id="transformers.Conversation.append_response" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Conversation.append_response"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/conversational.py#L115" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">response<span class="opacity-60">: str</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Conversation.append_response.response" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Conversation.append_response.response"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>response</strong> (<code>str</code>) &#x2014; The model generated response.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Append a response to the list of generated responses.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Conversation.iter_texts"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>iter_texts</span></h4><!-- HTML_TAG_END --> <a id="transformers.Conversation.iter_texts" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Conversation.iter_texts"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/conversational.py#L124" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Iterates over all blobs of the conversation.</p> <p>Returns: Iterator of (is_user, text_chunk) in chronological order of the conversation. <code>is_user</code> is a <code>bool</code>, <code>text_chunks</code> is a <code>str</code>.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Conversation.mark_processed"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>mark_processed</span></h4><!-- HTML_TAG_END --> <a id="transformers.Conversation.mark_processed" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Conversation.mark_processed"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/conversational.py#L106" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Mark the conversation as processed (moves the content of <code>new_user_input</code> to <code>past_user_inputs</code>) and empties the <code>new_user_input</code> field.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConversationalPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ConversationalPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ConversationalPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConversationalPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/conversational.py#L163" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConversationalPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConversationalPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConversationalPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConversationalPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConversationalPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConversationalPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConversationalPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConversationalPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConversationalPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConversationalPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConversationalPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConversationalPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConversationalPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConversationalPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConversationalPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConversationalPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConversationalPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConversationalPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConversationalPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConversationalPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConversationalPipeline.min_length_for_response" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConversationalPipeline.min_length_for_response"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_length_for_response</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; The minimum length (in number of tokens) for a response.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConversationalPipeline.minimum_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConversationalPipeline.minimum_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>minimum_tokens</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; The minimum length of tokens to leave for a response.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Multi-turn conversational pipeline.</p> <p>This conversational pipeline can currently be loaded from <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;conversational&quot;</code>.</p> <p>The models that this pipeline can use are models that have been fine-tuned on a multi-turn conversational task, currently: <em>‘microsoft/DialoGPT-small’</em>, <em>‘microsoft/DialoGPT-medium’</em>, <em>‘microsoft/DialoGPT-large’</em>. See the up-to-date list of available models on <a href="https://huggingface.co/models?filter=conversational" rel="nofollow">huggingface.co/models</a>.</p> <div class="relative group rounded-md"><a id="transformers.ConversationalPipeline.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConversationalPipeline.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Usage:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->conversational_pipeline = pipeline(<span class="hljs-string">&quot;conversational&quot;</span>) conversation_1 = Conversation(<span class="hljs-string">&quot;Going to the movies tonight - any suggestions?&quot;</span>) conversation_2 = Conversation(<span class="hljs-string">&quot;What&#x27;s the last book you have read?&quot;</span>) conversational_pipeline([conversation_1, conversation_2]) conversation_1.add_user_input(<span class="hljs-string">&quot;Is it an action movie?&quot;</span>) conversation_2.add_user_input(<span class="hljs-string">&quot;What is the genre of this book?&quot;</span>) conversational_pipeline([conversation_1, conversation_2])<!-- HTML_TAG_END --></pre></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConversationalPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.ConversationalPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConversationalPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/conversational.py#L218" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">conversations<span class="opacity-60">: typing.Union[transformers.pipelines.conversational.Conversation, typing.List[transformers.pipelines.conversational.Conversation]]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_workers<span class="opacity-60"> = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.Conversation" >Conversation</a> or a list of <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.Conversation" >Conversation</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConversationalPipeline.__call__.conversations" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConversationalPipeline.__call__.conversations"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>conversations</strong> (a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.Conversation">Conversation</a> or a list of <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.Conversation">Conversation</a>) &#x2014; Conversations to generate responses for.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConversationalPipeline.__call__.clean_up_tokenization_spaces" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConversationalPipeline.__call__.clean_up_tokenization_spaces"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clean up the potential extra spaces in the text output. generate_kwargs &#x2014; Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework <a href="./model#generative-models">here</a>).<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.ConversationalPipeline.__call__.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.Conversation" >Conversation</a> or a list of <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.Conversation" >Conversation</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Conversation(s) with updated generated responses for those containing a new user input.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Generate responses for the conversation(s) given as inputs.</p></div></div> <h3 class="relative group"><a id="transformers.DocumentQuestionAnsweringPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DocumentQuestionAnsweringPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DocumentQuestionAnsweringPipeline </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DocumentQuestionAnsweringPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DocumentQuestionAnsweringPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DocumentQuestionAnsweringPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DocumentQuestionAnsweringPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/document_question_answering.py#L102" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DocumentQuestionAnsweringPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DocumentQuestionAnsweringPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DocumentQuestionAnsweringPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DocumentQuestionAnsweringPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DocumentQuestionAnsweringPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DocumentQuestionAnsweringPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DocumentQuestionAnsweringPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DocumentQuestionAnsweringPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DocumentQuestionAnsweringPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DocumentQuestionAnsweringPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DocumentQuestionAnsweringPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DocumentQuestionAnsweringPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DocumentQuestionAnsweringPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DocumentQuestionAnsweringPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DocumentQuestionAnsweringPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DocumentQuestionAnsweringPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DocumentQuestionAnsweringPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DocumentQuestionAnsweringPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DocumentQuestionAnsweringPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DocumentQuestionAnsweringPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Document Question Answering pipeline using any <code>AutoModelForDocumentQuestionAnswering</code>. The inputs/outputs are similar to the (extractive) question answering pipeline; however, the pipeline takes an image (and optional OCR’d words/boxes) as input instead of text context.</p> <p>This document question answering pipeline can currently be loaded from <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;document-question-answering&quot;</code>.</p> <p>The models that this pipeline can use are models that have been fine-tuned on a document question answering task. See the up-to-date list of available models on <a href="https://huggingface.co/models?filter=document-question-answering" rel="nofollow">huggingface.co/models</a>.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DocumentQuestionAnsweringPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.DocumentQuestionAnsweringPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DocumentQuestionAnsweringPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/document_question_answering.py#L171" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image<span class="opacity-60">: typing.Union[ForwardRef(&#39;Image.Image&#39;), str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">question<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">word_boxes<span class="opacity-60">: typing.Tuple[str, typing.List[float]] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A <code>dict</code> or a list of <code>dict</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DocumentQuestionAnsweringPipeline.__call__.image" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DocumentQuestionAnsweringPipeline.__call__.image"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image</strong> (<code>str</code> or <code>PIL.Image</code>) &#x2014; The pipeline handles three types of images:</p> <ul> <li>A string containing a http link pointing to an image</li> <li>A string containing a local path to an image</li> <li>An image loaded in PIL directly</li> </ul> <p>The pipeline accepts either a single image or a batch of images. If given a single image, it can be broadcasted to multiple questions.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DocumentQuestionAnsweringPipeline.__call__.question" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DocumentQuestionAnsweringPipeline.__call__.question"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>question</strong> (<code>str</code>) &#x2014; A question to ask of the document.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DocumentQuestionAnsweringPipeline.__call__.word_boxes" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DocumentQuestionAnsweringPipeline.__call__.word_boxes"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>word_boxes</strong> (<code>List[str, Tuple[float, float, float, float]]</code>, <em>optional</em>) &#x2014; A list of words and bounding boxes (normalized 0-&gt;1000). If you provide this optional input, then the pipeline will use these words and boxes instead of running OCR on the image to derive them for models that need them (e.g. LayoutLM). This allows you to reuse OCR&#x2019;d results across many invocations of the pipeline without having to re-run it each time.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DocumentQuestionAnsweringPipeline.__call__.top_k" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DocumentQuestionAnsweringPipeline.__call__.top_k"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The number of answers to return (will be chosen by order of likelihood). Note that we return less than top_k answers if there are not enough options available within the context.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DocumentQuestionAnsweringPipeline.__call__.doc_stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DocumentQuestionAnsweringPipeline.__call__.doc_stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>doc_stride</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; If the words in the document are too long to fit with the question for the model, it will be split in several chunks with some overlap. This argument controls the size of that overlap.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DocumentQuestionAnsweringPipeline.__call__.max_answer_len" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DocumentQuestionAnsweringPipeline.__call__.max_answer_len"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_answer_len</strong> (<code>int</code>, <em>optional</em>, defaults to 15) &#x2014; The maximum length of predicted answers (e.g., only answers with a shorter length are considered).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DocumentQuestionAnsweringPipeline.__call__.max_seq_len" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DocumentQuestionAnsweringPipeline.__call__.max_seq_len"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_seq_len</strong> (<code>int</code>, <em>optional</em>, defaults to 384) &#x2014; The maximum length of the total sentence (context + question) in tokens of each chunk passed to the model. The context will be split in several chunks (using <code>doc_stride</code> as overlap) if needed.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DocumentQuestionAnsweringPipeline.__call__.max_question_len" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DocumentQuestionAnsweringPipeline.__call__.max_question_len"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_question_len</strong> (<code>int</code>, <em>optional</em>, defaults to 64) &#x2014; The maximum length of the question after tokenization. It will be truncated if needed.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DocumentQuestionAnsweringPipeline.__call__.handle_impossible_answer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DocumentQuestionAnsweringPipeline.__call__.handle_impossible_answer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>handle_impossible_answer</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not we accept impossible as an answer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DocumentQuestionAnsweringPipeline.__call__.lang" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DocumentQuestionAnsweringPipeline.__call__.lang"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>lang</strong> (<code>str</code>, <em>optional</em>) &#x2014; Language to use while running OCR. Defaults to english.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DocumentQuestionAnsweringPipeline.__call__.tesseract_config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DocumentQuestionAnsweringPipeline.__call__.tesseract_config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tesseract_config</strong> (<code>str</code>, <em>optional</em>) &#x2014; Additional flags to pass to tesseract while running OCR.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.DocumentQuestionAnsweringPipeline.__call__.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A <code>dict</code> or a list of <code>dict</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Each result comes as a dictionary with the following keys:</p> <ul> <li><strong>score</strong> (<code>float</code>) — The probability associated to the answer.</li> <li><strong>start</strong> (<code>int</code>) — The start word index of the answer (in the OCR’d version of the input or provided <code>word_boxes</code>).</li> <li><strong>end</strong> (<code>int</code>) — The end word index of the answer (in the OCR’d version of the input or provided <code>word_boxes</code>).</li> <li><strong>answer</strong> (<code>str</code>) — The answer to the question.</li> </ul> <!-- HTML_TAG_END --></p> </div></div> <p>Answer the question(s) given as inputs by using the document(s). A document is defined as an image and an optional list of (word, box) tuples which represent the text in the document. If the <code>word_boxes</code> are not provided, it will use the Tesseract OCR engine (if available) to extract the words and boxes automatically for LayoutLM-like models which require them as input. For Donut, no OCR is run.</p> <p>You can invoke the pipeline several ways:</p> <ul><li><code>pipeline(image=image, question=question)</code></li> <li><code>pipeline(image=image, question=question, word_boxes=word_boxes)</code></li> <li><code>pipeline([{&quot;image&quot;: image, &quot;question&quot;: question}])</code></li> <li><code>pipeline([{&quot;image&quot;: image, &quot;question&quot;: question, &quot;word_boxes&quot;: word_boxes}])</code></li></ul></div></div> <h3 class="relative group"><a id="transformers.FeatureExtractionPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FeatureExtractionPipeline </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FeatureExtractionPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FeatureExtractionPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FeatureExtractionPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FeatureExtractionPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/feature_extraction.py#L7" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60">: typing.Union[ForwardRef(&#39;PreTrainedModel&#39;), ForwardRef(&#39;TFPreTrainedModel&#39;)]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: typing.Optional[transformers.tokenization_utils.PreTrainedTokenizer] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">feature_extractor<span class="opacity-60">: typing.Optional[ForwardRef(&#39;SequenceFeatureExtractor&#39;)] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">modelcard<span class="opacity-60">: typing.Optional[transformers.modelcard.ModelCard] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">framework<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">task<span class="opacity-60">: str = &#39;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args_parser<span class="opacity-60">: ArgumentHandler = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">device<span class="opacity-60">: typing.Union[int, str, ForwardRef(&#39;torch.device&#39;)] = -1</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">binary_output<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Feature extraction pipeline using no model head. This pipeline extracts the hidden states from the base transformer, which can be used as features in downstream tasks.</p> <p>This feature extraction pipeline can currently be loaded from <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the task identifier: <code>&quot;feature-extraction&quot;</code>.</p> <p>All models may be used for this pipeline. See a list of all models, including community-contributed models on <a href="https://huggingface.co/models" rel="nofollow">huggingface.co/models</a>.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FeatureExtractionPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FeatureExtractionPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FeatureExtractionPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/feature_extraction.py#L69" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A nested list of <code>float</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionPipeline.__call__.args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionPipeline.__call__.args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several texts (or one list of texts) to get the features of.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.FeatureExtractionPipeline.__call__.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A nested list of <code>float</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The features computed by the model.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Extract the features of the input(s).</p></div></div> <h3 class="relative group"><a id="transformers.FillMaskPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FillMaskPipeline </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FillMaskPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FillMaskPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FillMaskPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FillMaskPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/fill_mask.py#L34" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60">: typing.Union[ForwardRef(&#39;PreTrainedModel&#39;), ForwardRef(&#39;TFPreTrainedModel&#39;)]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: typing.Optional[transformers.tokenization_utils.PreTrainedTokenizer] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">feature_extractor<span class="opacity-60">: typing.Optional[ForwardRef(&#39;SequenceFeatureExtractor&#39;)] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">modelcard<span class="opacity-60">: typing.Optional[transformers.modelcard.ModelCard] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">framework<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">task<span class="opacity-60">: str = &#39;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args_parser<span class="opacity-60">: ArgumentHandler = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">device<span class="opacity-60">: typing.Union[int, str, ForwardRef(&#39;torch.device&#39;)] = -1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">binary_output<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FillMaskPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FillMaskPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FillMaskPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FillMaskPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FillMaskPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FillMaskPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FillMaskPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FillMaskPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FillMaskPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FillMaskPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FillMaskPipeline.top_k" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline.top_k"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_k</strong> (<code>int</code>, defaults to 5) &#x2014; The number of predictions to return.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FillMaskPipeline.targets" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline.targets"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>targets</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014; When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower).<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Masked language modeling prediction pipeline using any <code>ModelWithLMHead</code>. See the <a href="../task_summary#masked-language-modeling">masked language modeling examples</a> for more information.</p> <p>This mask filling pipeline can currently be loaded from <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;fill-mask&quot;</code>.</p> <p>The models that this pipeline can use are models that have been trained with a masked language modeling objective, which includes the bi-directional models in the library. See the up-to-date list of available models on <a href="https://huggingface.co/models?filter=fill-mask" rel="nofollow">huggingface.co/models</a>.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>This pipeline only works for inputs with exactly one token masked. Experimental: We added support for multiple masks. The returned values are raw model output, and correspond to disjoint probabilities where one might expect joint probabilities (See <a href="https://github.com/huggingface/transformers/pull/10222" rel="nofollow">discussion</a>).</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FillMaskPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FillMaskPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FillMaskPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/fill_mask.py#L205" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A list or a list of list of <code>dict</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FillMaskPipeline.__call__.args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline.__call__.args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several texts (or one list of prompts) with masked tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FillMaskPipeline.__call__.targets" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline.__call__.targets"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>targets</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014; When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FillMaskPipeline.__call__.top_k" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline.__call__.top_k"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_k</strong> (<code>int</code>, <em>optional</em>) &#x2014; When passed, overrides the number of predictions to return.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.FillMaskPipeline.__call__.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A list or a list of list of <code>dict</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Each result comes as list of dictionaries with the following keys:</p> <ul> <li><strong>sequence</strong> (<code>str</code>) — The corresponding input with the mask token prediction.</li> <li><strong>score</strong> (<code>float</code>) — The corresponding probability.</li> <li><strong>token</strong> (<code>int</code>) — The predicted token id (to replace the masked one).</li> <li><strong>token</strong> (<code>str</code>) — The predicted token (to replace the masked one).</li> </ul> <!-- HTML_TAG_END --></p> </div></div> <p>Fill the masked token in the text(s) given as inputs.</p></div></div> <h3 class="relative group"><a id="transformers.ImageClassificationPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageClassificationPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ImageClassificationPipeline </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageClassificationPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ImageClassificationPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ImageClassificationPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageClassificationPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/image_classification.py#L32" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageClassificationPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageClassificationPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageClassificationPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageClassificationPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageClassificationPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageClassificationPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageClassificationPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageClassificationPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageClassificationPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageClassificationPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageClassificationPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageClassificationPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageClassificationPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageClassificationPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageClassificationPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageClassificationPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageClassificationPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageClassificationPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageClassificationPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageClassificationPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Image classification pipeline using any <code>AutoModelForImageClassification</code>. This pipeline predicts the class of an image.</p> <p>This image classification pipeline can currently be loaded from <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;image-classification&quot;</code>.</p> <p>See the list of available models on <a href="https://huggingface.co/models?filter=image-classification" rel="nofollow">huggingface.co/models</a>.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageClassificationPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.ImageClassificationPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageClassificationPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/image_classification.py#L59" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">images<span class="opacity-60">: typing.Union[str, typing.List[str], ForwardRef(&#39;Image.Image&#39;), typing.List[ForwardRef(&#39;Image.Image&#39;)]]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageClassificationPipeline.__call__.images" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageClassificationPipeline.__call__.images"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>images</strong> (<code>str</code>, <code>List[str]</code>, <code>PIL.Image</code> or <code>List[PIL.Image]</code>) &#x2014; The pipeline handles three types of images:</p> <ul> <li>A string containing a http link pointing to an image</li> <li>A string containing a local path to an image</li> <li>An image loaded in PIL directly</li> </ul> <p>The pipeline accepts either a single image or a batch of images, which must then be passed as a string. Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL images.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageClassificationPipeline.__call__.top_k" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageClassificationPipeline.__call__.top_k"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to 5) &#x2014; The number of top labels that will be returned by the pipeline. If the provided number is higher than the number of labels available in the model configuration, it will default to the number of labels.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Assign labels to the image(s) passed as inputs.</p></div></div> <h3 class="relative group"><a id="transformers.ImageSegmentationPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageSegmentationPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ImageSegmentationPipeline </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageSegmentationPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ImageSegmentationPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ImageSegmentationPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageSegmentationPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/image_segmentation.py#L30" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageSegmentationPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageSegmentationPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageSegmentationPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageSegmentationPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageSegmentationPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageSegmentationPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageSegmentationPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageSegmentationPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageSegmentationPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageSegmentationPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageSegmentationPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageSegmentationPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageSegmentationPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageSegmentationPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageSegmentationPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageSegmentationPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageSegmentationPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageSegmentationPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageSegmentationPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageSegmentationPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Image segmentation pipeline using any <code>AutoModelForXXXSegmentation</code>. This pipeline predicts masks of objects and their classes.</p> <p>This image segmentation pipeline can currently be loaded from <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;image-segmentation&quot;</code>.</p> <p>See the list of available models on <a href="https://huggingface.co/models?filter=image-segmentation" rel="nofollow">huggingface.co/models</a>.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageSegmentationPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.ImageSegmentationPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageSegmentationPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/image_segmentation.py#L67" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">images<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageSegmentationPipeline.__call__.images" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageSegmentationPipeline.__call__.images"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>images</strong> (<code>str</code>, <code>List[str]</code>, <code>PIL.Image</code> or <code>List[PIL.Image]</code>) &#x2014; The pipeline handles three types of images:</p> <ul> <li>A string containing an HTTP(S) link pointing to an image</li> <li>A string containing a local path to an image</li> <li>An image loaded in PIL directly</li> </ul> <p>The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the same format: all as HTTP(S) links, all as local paths, or all as PIL images.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageSegmentationPipeline.__call__.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageSegmentationPipeline.__call__.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>semantic</code>) &#x2014; Segmentation task to be performed, choose [<code>semantic</code>, <code>instance</code> and <code>panoptic</code>] depending on model capabilities.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageSegmentationPipeline.__call__.threshold" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageSegmentationPipeline.__call__.threshold"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>threshold</strong> (<code>float</code>, <em>optional</em>, defaults to 0.9) &#x2014; Probability threshold to filter out predicted masks.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageSegmentationPipeline.__call__.overlap_mask_area_threshold" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageSegmentationPipeline.__call__.overlap_mask_area_threshold"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>overlap_mask_area_threshold</strong> (<code>float</code>, <em>optional</em>, defaults to 0.5) &#x2014; Mask overlap threshold to eliminate small, disconnected segments.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Perform segmentation (detect masks &amp; classes) in the image(s) passed as inputs.</p></div></div> <h3 class="relative group"><a id="transformers.ImageToTextPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageToTextPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ImageToTextPipeline </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageToTextPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ImageToTextPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ImageToTextPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageToTextPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/image_to_text.py#L29" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageToTextPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageToTextPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageToTextPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageToTextPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageToTextPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageToTextPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageToTextPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageToTextPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageToTextPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageToTextPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageToTextPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageToTextPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageToTextPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageToTextPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageToTextPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageToTextPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageToTextPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageToTextPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageToTextPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageToTextPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Image To Text pipeline using a <code>AutoModelForVision2Seq</code>. This pipeline predicts a caption for a given image.</p> <p>This image to text pipeline can currently be loaded from pipeline() using the following task identifier: “image-to-text”.</p> <p>See the list of available models on <a href="https://huggingface.co/models?pipeline_tag=image-to-text" rel="nofollow">huggingface.co/models</a>.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageToTextPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.ImageToTextPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageToTextPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/image_to_text.py#L50" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">images<span class="opacity-60">: typing.Union[str, typing.List[str], ForwardRef(&#39;Image.Image&#39;), typing.List[ForwardRef(&#39;Image.Image&#39;)]]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A list or a list of list of <code>dict</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageToTextPipeline.__call__.images" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageToTextPipeline.__call__.images"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>images</strong> (<code>str</code>, <code>List[str]</code>, <code>PIL.Image</code> or <code>List[PIL.Image]</code>) &#x2014; The pipeline handles three types of images:</p> <ul> <li>A string containing a HTTP(s) link pointing to an image</li> <li>A string containing a local path to an image</li> <li>An image loaded in PIL directly</li> </ul> <p>The pipeline accepts either a single image or a batch of images.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.ImageToTextPipeline.__call__.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A list or a list of list of <code>dict</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Each result comes as a dictionary with the following key:</p> <ul> <li><strong>generated_text</strong> (<code>str</code>) — The generated text.</li> </ul> <!-- HTML_TAG_END --></p> </div></div> <p>Assign labels to the image(s) passed as inputs.</p></div></div> <h3 class="relative group"><a id="transformers.TokenClassificationPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>NerPipeline </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TokenClassificationPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TokenClassificationPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TokenClassificationPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TokenClassificationPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/token_classification.py#L86" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args_parser<span class="opacity-60"> = &lt;transformers.pipelines.token_classification.TokenClassificationArgumentHandler object at 0x7f0401585430&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.ignore_labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.ignore_labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ignore_labels</strong> (<code>List[str]</code>, defaults to <code>[&quot;O&quot;]</code>) &#x2014; A list of labels to ignore.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.grouped_entities" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.grouped_entities"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>grouped_entities</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; DEPRECATED, use <code>aggregation_strategy</code> instead. Whether or not to group the tokens corresponding to the same entity together in the predictions or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.aggregation_strategy" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.aggregation_strategy"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>aggregation_strategy</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;none&quot;</code>) &#x2014; The strategy to fuse (or not) tokens based on the model prediction.</p> <ul> <li>&#x201C;none&#x201D; : Will simply not do any aggregation and simply return raw results from the model</li> <li>&#x201C;simple&#x201D; : Will attempt to group entities following the default schema. (A, B-TAG), (B, I-TAG), (C, I-TAG), (D, B-TAG2) (E, B-TAG2) will end up being [{&#x201C;word&#x201D;: ABC, &#x201C;entity&#x201D;: &#x201C;TAG&#x201D;}, {&#x201C;word&#x201D;: &#x201C;D&#x201D;, &#x201C;entity&#x201D;: &#x201C;TAG2&#x201D;}, {&#x201C;word&#x201D;: &#x201C;E&#x201D;, &#x201C;entity&#x201D;: &#x201C;TAG2&#x201D;}] Notice that two consecutive B tags will end up as different entities. On word based languages, we might end up splitting words undesirably : Imagine Microsoft being tagged as [{&#x201C;word&#x201D;: &#x201C;Micro&#x201D;, &#x201C;entity&#x201D;: &#x201C;ENTERPRISE&#x201D;}, {&#x201C;word&#x201D;: &#x201C;soft&#x201D;, &#x201C;entity&#x201D;: &#x201C;NAME&#x201D;}]. Look for FIRST, MAX, AVERAGE for ways to mitigate that and disambiguate words (on languages that support that meaning, which is basically tokens separated by a space). These mitigations will only work on real words, &#x201C;New york&#x201D; might still be tagged with two different entities.</li> <li>&#x201C;first&#x201D; : (works only on word based models) Will use the <code>SIMPLE</code> strategy except that words, cannot end up with different tags. Words will simply use the tag of the first token of the word when there is ambiguity.</li> <li>&#x201C;average&#x201D; : (works only on word based models) Will use the <code>SIMPLE</code> strategy except that words, cannot end up with different tags. scores will be averaged first across tokens, and then the maximum label is applied.</li> <li>&#x201C;max&#x201D; : (works only on word based models) Will use the <code>SIMPLE</code> strategy except that words, cannot end up with different tags. Word entity will simply be the token with the maximum score.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Named Entity Recognition pipeline using any <code>ModelForTokenClassification</code>. See the <a href="../task_summary#named-entity-recognition">named entity recognition examples</a> for more information.</p> <p>This token recognition pipeline can currently be loaded from <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;ner&quot;</code> (for predicting the classes of tokens in a sequence: person, organisation, location or miscellaneous).</p> <p>The models that this pipeline can use are models that have been fine-tuned on a token classification task. See the up-to-date list of available models on <a href="https://huggingface.co/models?filter=token-classification" rel="nofollow">huggingface.co/models</a>.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TokenClassificationPipeline.aggregate_words"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>aggregate_words</span></h4><!-- HTML_TAG_END --> <a id="transformers.TokenClassificationPipeline.aggregate_words" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TokenClassificationPipeline.aggregate_words"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/token_classification.py#L368" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">entities<span class="opacity-60">: typing.List[dict]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">aggregation_strategy<span class="opacity-60">: AggregationStrategy</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Override tokens from a given word that disagree to force agreement on word boundaries.</p> <p>Example: micro|soft| com|pany| B-ENT I-NAME I-ENT I-ENT will be rewritten with first strategy as microsoft| company| B-ENT I-ENT</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TokenClassificationPipeline.gather_pre_entities"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>gather_pre_entities</span></h4><!-- HTML_TAG_END --> <a id="transformers.TokenClassificationPipeline.gather_pre_entities" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TokenClassificationPipeline.gather_pre_entities"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/token_classification.py#L254" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sentence<span class="opacity-60">: str</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">offset_mapping<span class="opacity-60">: typing.Union[typing.List[typing.Tuple[int, int]], NoneType]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">special_tokens_mask<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">aggregation_strategy<span class="opacity-60">: AggregationStrategy</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Fuse various numpy arrays into dicts with all the information needed for aggregation</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TokenClassificationPipeline.group_entities"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>group_entities</span></h4><!-- HTML_TAG_END --> <a id="transformers.TokenClassificationPipeline.group_entities" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TokenClassificationPipeline.group_entities"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/token_classification.py#L430" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">entities<span class="opacity-60">: typing.List[dict]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.group_entities.entities" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.group_entities.entities"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>entities</strong> (<code>dict</code>) &#x2014; The entities predicted by the pipeline.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Find and group together the adjacent tokens with the same entity predicted.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TokenClassificationPipeline.group_sub_entities"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>group_sub_entities</span></h4><!-- HTML_TAG_END --> <a id="transformers.TokenClassificationPipeline.group_sub_entities" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TokenClassificationPipeline.group_sub_entities"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/token_classification.py#L395" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">entities<span class="opacity-60">: typing.List[dict]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.group_sub_entities.entities" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.group_sub_entities.entities"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>entities</strong> (<code>dict</code>) &#x2014; The entities predicted by the pipeline.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Group together the adjacent tokens with the same entity predicted.</p></div></div> <p>See <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.TokenClassificationPipeline">TokenClassificationPipeline</a> for all details.</p> <h3 class="relative group"><a id="transformers.ObjectDetectionPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ObjectDetectionPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ObjectDetectionPipeline </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ObjectDetectionPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ObjectDetectionPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ObjectDetectionPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ObjectDetectionPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/object_detection.py#L24" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ObjectDetectionPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ObjectDetectionPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ObjectDetectionPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ObjectDetectionPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ObjectDetectionPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ObjectDetectionPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ObjectDetectionPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ObjectDetectionPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ObjectDetectionPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ObjectDetectionPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ObjectDetectionPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ObjectDetectionPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ObjectDetectionPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ObjectDetectionPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ObjectDetectionPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ObjectDetectionPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ObjectDetectionPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ObjectDetectionPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ObjectDetectionPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ObjectDetectionPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Object detection pipeline using any <code>AutoModelForObjectDetection</code>. This pipeline predicts bounding boxes of objects and their classes.</p> <p>This object detection pipeline can currently be loaded from <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;object-detection&quot;</code>.</p> <p>See the list of available models on <a href="https://huggingface.co/models?filter=object-detection" rel="nofollow">huggingface.co/models</a>.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ObjectDetectionPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.ObjectDetectionPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ObjectDetectionPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/object_detection.py#L50" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ObjectDetectionPipeline.__call__.images" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ObjectDetectionPipeline.__call__.images"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>images</strong> (<code>str</code>, <code>List[str]</code>, <code>PIL.Image</code> or <code>List[PIL.Image]</code>) &#x2014; The pipeline handles three types of images:</p> <ul> <li>A string containing an HTTP(S) link pointing to an image</li> <li>A string containing a local path to an image</li> <li>An image loaded in PIL directly</li> </ul> <p>The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the same format: all as HTTP(S) links, all as local paths, or all as PIL images.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ObjectDetectionPipeline.__call__.threshold" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ObjectDetectionPipeline.__call__.threshold"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>threshold</strong> (<code>float</code>, <em>optional</em>, defaults to 0.9) &#x2014; The probability necessary to make a prediction.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Detect objects (bounding boxes &amp; classes) in the image(s) passed as inputs.</p></div></div> <h3 class="relative group"><a id="transformers.QuestionAnsweringPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>QuestionAnsweringPipeline </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.QuestionAnsweringPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">QuestionAnsweringPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.QuestionAnsweringPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.QuestionAnsweringPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/question_answering.py#L224" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60">: typing.Union[ForwardRef(&#39;PreTrainedModel&#39;), ForwardRef(&#39;TFPreTrainedModel&#39;)]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: PreTrainedTokenizer</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">modelcard<span class="opacity-60">: typing.Optional[transformers.modelcard.ModelCard] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">framework<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">device<span class="opacity-60">: int = -1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">task<span class="opacity-60">: str = &#39;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Question Answering pipeline using any <code>ModelForQuestionAnswering</code>. See the <a href="../task_summary#question-answering">question answering examples</a> for more information.</p> <p>This question answering pipeline can currently be loaded from <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;question-answering&quot;</code>.</p> <p>The models that this pipeline can use are models that have been fine-tuned on a question answering task. See the up-to-date list of available models on <a href="https://huggingface.co/models?filter=question-answering" rel="nofollow">huggingface.co/models</a>.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.QuestionAnsweringPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.QuestionAnsweringPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.QuestionAnsweringPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/question_answering.py#L330" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A <code>dict</code> or a list of <code>dict</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.__call__.args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.__call__.args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args</strong> (<code>SquadExample</code> or a list of <code>SquadExample</code>) &#x2014; One or several <code>SquadExample</code> containing the question and context.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.__call__.X" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.__call__.X"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>X</strong> (<code>SquadExample</code> or a list of <code>SquadExample</code>, <em>optional</em>) &#x2014; One or several <code>SquadExample</code> containing the question and context (will be treated the same way as if passed as the first positional argument).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.__call__.data" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.__call__.data"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>data</strong> (<code>SquadExample</code> or a list of <code>SquadExample</code>, <em>optional</em>) &#x2014; One or several <code>SquadExample</code> containing the question and context (will be treated the same way as if passed as the first positional argument).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.__call__.question" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.__call__.question"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>question</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several question(s) (must be used in conjunction with the <code>context</code> argument).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.__call__.context" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.__call__.context"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>context</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several context(s) associated with the question(s) (must be used in conjunction with the <code>question</code> argument).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.__call__.topk" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.__call__.topk"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>topk</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The number of answers to return (will be chosen by order of likelihood). Note that we return less than topk answers if there are not enough options available within the context.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.__call__.doc_stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.__call__.doc_stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>doc_stride</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; If the context is too long to fit with the question for the model, it will be split in several chunks with some overlap. This argument controls the size of that overlap.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.__call__.max_answer_len" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.__call__.max_answer_len"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_answer_len</strong> (<code>int</code>, <em>optional</em>, defaults to 15) &#x2014; The maximum length of predicted answers (e.g., only answers with a shorter length are considered).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.__call__.max_seq_len" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.__call__.max_seq_len"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_seq_len</strong> (<code>int</code>, <em>optional</em>, defaults to 384) &#x2014; The maximum length of the total sentence (context + question) in tokens of each chunk passed to the model. The context will be split in several chunks (using <code>doc_stride</code> as overlap) if needed.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.__call__.max_question_len" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.__call__.max_question_len"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_question_len</strong> (<code>int</code>, <em>optional</em>, defaults to 64) &#x2014; The maximum length of the question after tokenization. It will be truncated if needed.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.__call__.handle_impossible_answer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.__call__.handle_impossible_answer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>handle_impossible_answer</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not we accept impossible as an answer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.__call__.align_to_words" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.__call__.align_to_words"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>align_to_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Attempts to align the answer to real words. Improves quality on space separated langages. Might hurt on non-space-separated languages (like Japanese or Chinese)<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.QuestionAnsweringPipeline.__call__.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A <code>dict</code> or a list of <code>dict</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Each result comes as a dictionary with the following keys:</p> <ul> <li><strong>score</strong> (<code>float</code>) — The probability associated to the answer.</li> <li><strong>start</strong> (<code>int</code>) — The character start index of the answer (in the tokenized version of the input).</li> <li><strong>end</strong> (<code>int</code>) — The character end index of the answer (in the tokenized version of the input).</li> <li><strong>answer</strong> (<code>str</code>) — The answer to the question.</li> </ul> <!-- HTML_TAG_END --></p> </div></div> <p>Answer the question(s) given as inputs by using the context(s).</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.QuestionAnsweringPipeline.create_sample"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>create_sample</span></h4><!-- HTML_TAG_END --> <a id="transformers.QuestionAnsweringPipeline.create_sample" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.QuestionAnsweringPipeline.create_sample"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/question_answering.py#L265" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">question<span class="opacity-60">: typing.Union[str, typing.List[str]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">context<span class="opacity-60">: typing.Union[str, typing.List[str]]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>One or a list of <code>SquadExample</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.create_sample.question" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.create_sample.question"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>question</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; The question(s) asked.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.create_sample.context" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.create_sample.context"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>context</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; The context(s) in which we will look for the answer.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.QuestionAnsweringPipeline.create_sample.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>One or a list of <code>SquadExample</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The corresponding <code>SquadExample</code> grouping question and context.</p> <!-- HTML_TAG_END --></p> </div></div> <p>QuestionAnsweringPipeline leverages the <code>SquadExample</code> internally. This helper method encapsulate all the logic for converting question(s) and context(s) to <code>SquadExample</code>.</p> <p>We currently support extractive question answering.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.QuestionAnsweringPipeline.span_to_answer"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>span_to_answer</span></h4><!-- HTML_TAG_END --> <a id="transformers.QuestionAnsweringPipeline.span_to_answer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.QuestionAnsweringPipeline.span_to_answer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/question_answering.py#L606" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end<span class="opacity-60">: int</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>Dictionary like `{‘answer’</span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.span_to_answer.text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.span_to_answer.text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text</strong> (<code>str</code>) &#x2014; The actual context to extract the answer from.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.span_to_answer.start" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.span_to_answer.start"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start</strong> (<code>int</code>) &#x2014; The answer starting token index.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.span_to_answer.end" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.span_to_answer.end"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end</strong> (<code>int</code>) &#x2014; The answer end token index.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.QuestionAnsweringPipeline.span_to_answer.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>Dictionary like `{‘answer’</p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>str, ‘start’: int, ‘end’: int}`</p> <!-- HTML_TAG_END --></p> </div></div> <p>When decoding from token probabilities, this method maps token indexes to actual word in the initial context.</p></div></div> <h3 class="relative group"><a id="transformers.SummarizationPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>SummarizationPipeline </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SummarizationPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">SummarizationPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.SummarizationPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SummarizationPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/text2text_generation.py#L196" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SummarizationPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SummarizationPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SummarizationPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SummarizationPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SummarizationPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SummarizationPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SummarizationPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SummarizationPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SummarizationPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SummarizationPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Summarize news articles and other documents.</p> <p>This summarizing pipeline can currently be loaded from <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;summarization&quot;</code>.</p> <p>The models that this pipeline can use are models that have been fine-tuned on a summarization task, which is currently, ’<em>bart-large-cnn</em>’, ’<em>t5-small</em>’, ’<em>t5-base</em>’, ’<em>t5-large</em>’, ’<em>t5-3b</em>’, ’<em>t5-11b</em>’. See the up-to-date list of available models on <a href="https://huggingface.co/models?filter=summarization" rel="nofollow">huggingface.co/models</a>.</p> <div class="relative group rounded-md"><a id="transformers.SummarizationPipeline.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Usage:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment"># use bart in pytorch</span> summarizer = pipeline(<span class="hljs-string">&quot;summarization&quot;</span>) summarizer(<span class="hljs-string">&quot;An apple a day, keeps the doctor away&quot;</span>, min_length=<span class="hljs-number">5</span>, max_length=<span class="hljs-number">20</span>) <span class="hljs-comment"># use t5 in tf</span> summarizer = pipeline(<span class="hljs-string">&quot;summarization&quot;</span>, model=<span class="hljs-string">&quot;t5-base&quot;</span>, tokenizer=<span class="hljs-string">&quot;t5-base&quot;</span>, framework=<span class="hljs-string">&quot;tf&quot;</span>) summarizer(<span class="hljs-string">&quot;An apple a day, keeps the doctor away&quot;</span>, min_length=<span class="hljs-number">5</span>, max_length=<span class="hljs-number">20</span>)<!-- HTML_TAG_END --></pre></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SummarizationPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.SummarizationPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SummarizationPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/text2text_generation.py#L222" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A list or a list of list of <code>dict</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SummarizationPipeline.__call__.documents" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline.__call__.documents"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>documents</strong> (<em>str</em> or <code>List[str]</code>) &#x2014; One or several articles (or one list of articles) to summarize.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SummarizationPipeline.__call__.return_text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline.__call__.return_text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_text</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to include the decoded texts in the outputs<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SummarizationPipeline.__call__.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline.__call__.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to include the tensors of predictions (as token indices) in the outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SummarizationPipeline.__call__.clean_up_tokenization_spaces" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline.__call__.clean_up_tokenization_spaces"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clean up the potential extra spaces in the text output. generate_kwargs &#x2014; Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework <a href="./model#generative-models">here</a>).<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.SummarizationPipeline.__call__.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A list or a list of list of <code>dict</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Each result comes as a dictionary with the following keys:</p> <ul> <li><strong>summary_text</strong> (<code>str</code>, present when <code>return_text=True</code>) — The summary of the corresponding input.</li> <li><strong>summary_token_ids</strong> (<code>torch.Tensor</code> or <code>tf.Tensor</code>, present when <code>return_tensors=True</code>) — The token ids of the summary.</li> </ul> <!-- HTML_TAG_END --></p> </div></div> <p>Summarize the text(s) given as inputs.</p></div></div> <h3 class="relative group"><a id="transformers.TableQuestionAnsweringPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TableQuestionAnsweringPipeline </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TableQuestionAnsweringPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TableQuestionAnsweringPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TableQuestionAnsweringPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TableQuestionAnsweringPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/table_question_answering.py#L89" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args_parser<span class="opacity-60"> = &lt;transformers.pipelines.table_question_answering.TableQuestionAnsweringArgumentHandler object at 0x7f040156eb20&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TableQuestionAnsweringPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TableQuestionAnsweringPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TableQuestionAnsweringPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TableQuestionAnsweringPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TableQuestionAnsweringPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TableQuestionAnsweringPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TableQuestionAnsweringPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TableQuestionAnsweringPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TableQuestionAnsweringPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TableQuestionAnsweringPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Table Question Answering pipeline using a <code>ModelForTableQuestionAnswering</code>. This pipeline is only available in PyTorch.</p> <p>This tabular question answering pipeline can currently be loaded from <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;table-question-answering&quot;</code>.</p> <p>The models that this pipeline can use are models that have been fine-tuned on a tabular question answering task. See the up-to-date list of available models on <a href="https://huggingface.co/models?filter=table-question-answering" rel="nofollow">huggingface.co/models</a>.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TableQuestionAnsweringPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TableQuestionAnsweringPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TableQuestionAnsweringPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/table_question_answering.py#L256" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A dictionary or a list of dictionaries containing results</span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TableQuestionAnsweringPipeline.__call__.table" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.__call__.table"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>table</strong> (<code>pd.DataFrame</code> or <code>Dict</code>) &#x2014; Pandas DataFrame or dictionary that will be converted to a DataFrame containing all the table values. See above for an example of dictionary.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TableQuestionAnsweringPipeline.__call__.query" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.__call__.query"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>query</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; Query or list of queries that will be sent to the model alongside the table.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TableQuestionAnsweringPipeline.__call__.sequential" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.__call__.sequential"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequential</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to do inference sequentially or as a batch. Batching is faster, but models like SQA require the inference to be done sequentially to extract relations within sequences, given their conversational nature.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TableQuestionAnsweringPipeline.__call__.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.__call__.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TableQuestionAnsweringPipeline.__call__.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.__call__.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>, <code>str</code> or <code>TapasTruncationStrategy</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;drop_rows_to_fit&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate row by row, removing rows from the table.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.TableQuestionAnsweringPipeline.__call__.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A dictionary or a list of dictionaries containing results</p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Each result is a dictionary with the following keys:</p> <ul> <li><strong>answer</strong> (<code>str</code>) — The answer of the query given the table. If there is an aggregator, the answer will be preceded by <code>AGGREGATOR &gt;</code>.</li> <li><strong>coordinates</strong> (<code>List[Tuple[int, int]]</code>) — Coordinates of the cells of the answers.</li> <li><strong>cells</strong> (<code>List[str]</code>) — List of strings made up of the answer cell values.</li> <li><strong>aggregator</strong> (<code>str</code>) — If the model has an aggregator, this returns the aggregator.</li> </ul> <!-- HTML_TAG_END --></p> </div></div> <p>Answers queries according to a table. The pipeline accepts several types of inputs which are detailed below:</p> <ul><li><code>pipeline(table, query)</code></li> <li><code>pipeline(table, [query])</code></li> <li><code>pipeline(table=table, query=query)</code></li> <li><code>pipeline(table=table, query=[query])</code></li> <li><code>pipeline({&quot;table&quot;: table, &quot;query&quot;: query})</code></li> <li><code>pipeline({&quot;table&quot;: table, &quot;query&quot;: [query]})</code></li> <li><code>pipeline([{&quot;table&quot;: table, &quot;query&quot;: query}, {&quot;table&quot;: table, &quot;query&quot;: query}])</code></li></ul> <p>The <code>table</code> argument should be a dict or a DataFrame built from that dict, containing the whole table:</p> <div class="relative group rounded-md"><a id="transformers.TableQuestionAnsweringPipeline.__call__.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.__call__.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->data = { <span class="hljs-string">&quot;actors&quot;</span>: [<span class="hljs-string">&quot;brad pitt&quot;</span>, <span class="hljs-string">&quot;leonardo di caprio&quot;</span>, <span class="hljs-string">&quot;george clooney&quot;</span>], <span class="hljs-string">&quot;age&quot;</span>: [<span class="hljs-string">&quot;56&quot;</span>, <span class="hljs-string">&quot;45&quot;</span>, <span class="hljs-string">&quot;59&quot;</span>], <span class="hljs-string">&quot;number of movies&quot;</span>: [<span class="hljs-string">&quot;87&quot;</span>, <span class="hljs-string">&quot;53&quot;</span>, <span class="hljs-string">&quot;69&quot;</span>], <span class="hljs-string">&quot;date of birth&quot;</span>: [<span class="hljs-string">&quot;7 february 1967&quot;</span>, <span class="hljs-string">&quot;10 june 1996&quot;</span>, <span class="hljs-string">&quot;28 november 1967&quot;</span>], }<!-- HTML_TAG_END --></pre></div></div> <p>This dictionary can be passed in as such, or can be converted to a pandas DataFrame:</p> <div class="relative group rounded-md"><a id="transformers.TableQuestionAnsweringPipeline.__call__.example-2" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.__call__.example-2"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">import</span> pandas <span class="hljs-keyword">as</span> pd table = pd.DataFrame.from_dict(data)<!-- HTML_TAG_END --></pre></div></div></div></div> <h3 class="relative group"><a id="transformers.TextClassificationPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TextClassificationPipeline </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TextClassificationPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TextClassificationPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TextClassificationPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TextClassificationPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/text_classification.py#L48" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextClassificationPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextClassificationPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextClassificationPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextClassificationPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextClassificationPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextClassificationPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextClassificationPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextClassificationPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextClassificationPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextClassificationPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextClassificationPipeline.return_all_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline.return_all_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_all_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to return all prediction scores or just the one of the predicted class.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextClassificationPipeline.function_to_apply" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline.function_to_apply"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>function_to_apply</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;default&quot;</code>) &#x2014; The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:</p> <ul> <li><code>&quot;default&quot;</code>: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output.</li> <li><code>&quot;sigmoid&quot;</code>: Applies the sigmoid function on the output.</li> <li><code>&quot;softmax&quot;</code>: Applies the softmax function on the output.</li> <li><code>&quot;none&quot;</code>: Does not apply any function on the output.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Text classification pipeline using any <code>ModelForSequenceClassification</code>. See the <a href="../task_summary#sequence-classification">sequence classification examples</a> for more information.</p> <p>This text classification pipeline can currently be loaded from <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;sentiment-analysis&quot;</code> (for classifying sequences according to positive or negative sentiments).</p> <p>If multiple classification labels are available (<code>model.config.num_labels &gt;= 2</code>), the pipeline will run a softmax over the results. If there is a single label, the pipeline will run a sigmoid over the result.</p> <p>The models that this pipeline can use are models that have been fine-tuned on a sequence classification task. See the up-to-date list of available models on <a href="https://huggingface.co/models?filter=text-classification" rel="nofollow">huggingface.co/models</a>.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TextClassificationPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TextClassificationPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TextClassificationPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/text_classification.py#L106" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A list or a list of list of <code>dict</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextClassificationPipeline.__call__.args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline.__call__.args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args</strong> (<code>str</code> or <code>List[str]</code> or <code>Dict[str]</code>, or <code>List[Dict[str]]</code>) &#x2014; One or several texts to classify. In order to use text pairs for your classification, you can send a dictionnary containing <code>{&quot;text&quot;, &quot;text_pair&quot;}</code> keys, or a list of those.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextClassificationPipeline.__call__.top_k" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline.__call__.top_k"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to <code>1</code>) &#x2014; How many results to return.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextClassificationPipeline.__call__.function_to_apply" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline.__call__.function_to_apply"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>function_to_apply</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;default&quot;</code>) &#x2014; The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:</p> <p>If this argument is not specified, then it will apply the following functions according to the number of labels:</p> <ul> <li>If the model has a single label, will apply the sigmoid function on the output.</li> <li>If the model has several labels, will apply the softmax function on the output.</li> </ul> <p>Possible values are:</p> <ul> <li><code>&quot;sigmoid&quot;</code>: Applies the sigmoid function on the output.</li> <li><code>&quot;softmax&quot;</code>: Applies the softmax function on the output.</li> <li><code>&quot;none&quot;</code>: Does not apply any function on the output.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.TextClassificationPipeline.__call__.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A list or a list of list of <code>dict</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Each result comes as list of dictionaries with the following keys:</p> <ul> <li><strong>label</strong> (<code>str</code>) — The label predicted.</li> <li><strong>score</strong> (<code>float</code>) — The corresponding probability.</li> </ul> <p>If <code>top_k</code> is used, one such dictionary is returned per label.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Classify the text(s) given as inputs.</p></div></div> <h3 class="relative group"><a id="transformers.TextGenerationPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TextGenerationPipeline </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TextGenerationPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TextGenerationPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TextGenerationPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TextGenerationPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/text_generation.py#L21" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Language generation pipeline using any <code>ModelWithLMHead</code>. This pipeline predicts the words that will follow a specified text prompt.</p> <p>This language generation pipeline can currently be loaded from <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;text-generation&quot;</code>.</p> <p>The models that this pipeline can use are models that have been trained with an autoregressive language modeling objective, which includes the uni-directional models in the library (e.g. gpt2). See the list of available models on <a href="https://huggingface.co/models?filter=text-generation" rel="nofollow">huggingface.co/models</a>.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TextGenerationPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TextGenerationPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TextGenerationPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/text_generation.py#L148" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_inputs<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A list or a list of list of <code>dict</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.__call__.args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.__call__.args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several prompts (or one list of prompts) to complete.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.__call__.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.__call__.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to include the tensors of predictions (as token indices) in the outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.__call__.return_text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.__call__.return_text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_text</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to include the decoded texts in the outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.__call__.return_full_text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.__call__.return_full_text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_full_text</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>False</code> only added text is returned, otherwise the full text is returned Only meaningful if <em>return_text</em> is set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.__call__.clean_up_tokenization_spaces" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.__call__.clean_up_tokenization_spaces"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clean up the potential extra spaces in the text output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.__call__.prefix" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.__call__.prefix"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>prefix</strong> (<code>str</code>, <em>optional</em>) &#x2014; Prefix added to prompt.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.__call__.handle_long_generation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.__call__.handle_long_generation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>handle_long_generation</strong> (<code>str</code>, <em>optional</em>) &#x2014; By default, this pipelines does not handle long generation (ones that exceed in one form or the other the model maximum length). There is no perfect way to adress this (more info :<a href="https://github.com/huggingface/transformers/issues/14033#issuecomment-948385227" rel="nofollow">https://github.com/huggingface/transformers/issues/14033#issuecomment-948385227</a>). This provides common strategies to work around that problem depending on your use case.</p> <ul> <li><code>None</code> : default strategy where nothing in particular happens</li> <li><code>&quot;hole&quot;</code>: Truncates left of input, and leaves a gap wide enough to let generation happen (might truncate a lot of the prompt and not suitable when generation exceed the model capacity)</li> </ul> <p>generate_kwargs &#x2014; Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework <a href="./model#generative-models">here</a>).<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.TextGenerationPipeline.__call__.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A list or a list of list of <code>dict</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Each result comes as a dictionary with the following keys:</p> <ul> <li><strong>generated_text</strong> (<code>str</code>, present when <code>return_text=True</code>) — The generated text.</li> <li><strong>generated_token_ids</strong> (<code>torch.Tensor</code> or <code>tf.Tensor</code>, present when <code>return_tensors=True</code>) — The token ids of the generated text.</li> </ul> <!-- HTML_TAG_END --></p> </div></div> <p>Complete the prompt(s) given as inputs.</p></div></div> <h3 class="relative group"><a id="transformers.Text2TextGenerationPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Text2TextGenerationPipeline </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Text2TextGenerationPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Text2TextGenerationPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Text2TextGenerationPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Text2TextGenerationPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/text2text_generation.py#L26" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Text2TextGenerationPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Text2TextGenerationPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Text2TextGenerationPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Text2TextGenerationPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Text2TextGenerationPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Text2TextGenerationPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Text2TextGenerationPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Text2TextGenerationPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Text2TextGenerationPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Text2TextGenerationPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Pipeline for text to text generation using seq2seq models.</p> <p>This Text2TextGenerationPipeline pipeline can currently be loaded from <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;text2text-generation&quot;</code>.</p> <p>The models that this pipeline can use are models that have been fine-tuned on a translation task. See the up-to-date list of available models on <a href="https://huggingface.co/models?filter=text2text-generation" rel="nofollow">huggingface.co/models</a>.</p> <div class="relative group rounded-md"><a id="transformers.Text2TextGenerationPipeline.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Usage:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->text2text_generator = pipeline(<span class="hljs-string">&quot;text2text-generation&quot;</span>) text2text_generator(<span class="hljs-string">&quot;question: What is 42 ? context: 42 is the answer to life, the universe and everything&quot;</span>)<!-- HTML_TAG_END --></pre></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Text2TextGenerationPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.Text2TextGenerationPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Text2TextGenerationPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/text2text_generation.py#L119" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A list or a list of list of <code>dict</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Text2TextGenerationPipeline.__call__.args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.__call__.args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; Input text for the encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Text2TextGenerationPipeline.__call__.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.__call__.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to include the tensors of predictions (as token indices) in the outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Text2TextGenerationPipeline.__call__.return_text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.__call__.return_text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_text</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to include the decoded texts in the outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Text2TextGenerationPipeline.__call__.clean_up_tokenization_spaces" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.__call__.clean_up_tokenization_spaces"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clean up the potential extra spaces in the text output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Text2TextGenerationPipeline.__call__.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.__call__.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>TruncationStrategy</code>, <em>optional</em>, defaults to <code>TruncationStrategy.DO_NOT_TRUNCATE</code>) &#x2014; The truncation strategy for the tokenization within the pipeline. <code>TruncationStrategy.DO_NOT_TRUNCATE</code> (default) will never truncate, but it is sometimes desirable to truncate the input to fit the model&#x2019;s max_length instead of throwing an error down the line. generate_kwargs &#x2014; Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework <a href="./model#generative-models">here</a>).<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.Text2TextGenerationPipeline.__call__.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A list or a list of list of <code>dict</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Each result comes as a dictionary with the following keys:</p> <ul> <li><strong>generated_text</strong> (<code>str</code>, present when <code>return_text=True</code>) — The generated text.</li> <li><strong>generated_token_ids</strong> (<code>torch.Tensor</code> or <code>tf.Tensor</code>, present when <code>return_tensors=True</code>) — The token ids of the generated text.</li> </ul> <!-- HTML_TAG_END --></p> </div></div> <p>Generate the output text(s) using text(s) given as inputs.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Text2TextGenerationPipeline.check_inputs"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>check_inputs</span></h4><!-- HTML_TAG_END --> <a id="transformers.Text2TextGenerationPipeline.check_inputs" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Text2TextGenerationPipeline.check_inputs"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/text2text_generation.py#L92" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_length<span class="opacity-60">: int</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_length<span class="opacity-60">: int</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Checks whether there might be something wrong with given input with regard to the model.</p></div></div> <h3 class="relative group"><a id="transformers.TokenClassificationPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TokenClassificationPipeline </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TokenClassificationPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TokenClassificationPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TokenClassificationPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TokenClassificationPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/token_classification.py#L86" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args_parser<span class="opacity-60"> = &lt;transformers.pipelines.token_classification.TokenClassificationArgumentHandler object at 0x7f0401585430&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.ignore_labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.ignore_labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ignore_labels</strong> (<code>List[str]</code>, defaults to <code>[&quot;O&quot;]</code>) &#x2014; A list of labels to ignore.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.grouped_entities" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.grouped_entities"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>grouped_entities</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; DEPRECATED, use <code>aggregation_strategy</code> instead. Whether or not to group the tokens corresponding to the same entity together in the predictions or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.aggregation_strategy" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.aggregation_strategy"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>aggregation_strategy</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;none&quot;</code>) &#x2014; The strategy to fuse (or not) tokens based on the model prediction.</p> <ul> <li>&#x201C;none&#x201D; : Will simply not do any aggregation and simply return raw results from the model</li> <li>&#x201C;simple&#x201D; : Will attempt to group entities following the default schema. (A, B-TAG), (B, I-TAG), (C, I-TAG), (D, B-TAG2) (E, B-TAG2) will end up being [{&#x201C;word&#x201D;: ABC, &#x201C;entity&#x201D;: &#x201C;TAG&#x201D;}, {&#x201C;word&#x201D;: &#x201C;D&#x201D;, &#x201C;entity&#x201D;: &#x201C;TAG2&#x201D;}, {&#x201C;word&#x201D;: &#x201C;E&#x201D;, &#x201C;entity&#x201D;: &#x201C;TAG2&#x201D;}] Notice that two consecutive B tags will end up as different entities. On word based languages, we might end up splitting words undesirably : Imagine Microsoft being tagged as [{&#x201C;word&#x201D;: &#x201C;Micro&#x201D;, &#x201C;entity&#x201D;: &#x201C;ENTERPRISE&#x201D;}, {&#x201C;word&#x201D;: &#x201C;soft&#x201D;, &#x201C;entity&#x201D;: &#x201C;NAME&#x201D;}]. Look for FIRST, MAX, AVERAGE for ways to mitigate that and disambiguate words (on languages that support that meaning, which is basically tokens separated by a space). These mitigations will only work on real words, &#x201C;New york&#x201D; might still be tagged with two different entities.</li> <li>&#x201C;first&#x201D; : (works only on word based models) Will use the <code>SIMPLE</code> strategy except that words, cannot end up with different tags. Words will simply use the tag of the first token of the word when there is ambiguity.</li> <li>&#x201C;average&#x201D; : (works only on word based models) Will use the <code>SIMPLE</code> strategy except that words, cannot end up with different tags. scores will be averaged first across tokens, and then the maximum label is applied.</li> <li>&#x201C;max&#x201D; : (works only on word based models) Will use the <code>SIMPLE</code> strategy except that words, cannot end up with different tags. Word entity will simply be the token with the maximum score.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Named Entity Recognition pipeline using any <code>ModelForTokenClassification</code>. See the <a href="../task_summary#named-entity-recognition">named entity recognition examples</a> for more information.</p> <p>This token recognition pipeline can currently be loaded from <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;ner&quot;</code> (for predicting the classes of tokens in a sequence: person, organisation, location or miscellaneous).</p> <p>The models that this pipeline can use are models that have been fine-tuned on a token classification task. See the up-to-date list of available models on <a href="https://huggingface.co/models?filter=token-classification" rel="nofollow">huggingface.co/models</a>.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TokenClassificationPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TokenClassificationPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TokenClassificationPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/token_classification.py#L162" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Union[str, typing.List[str]]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A list or a list of list of <code>dict</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.__call__.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.__call__.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several texts (or one list of texts) for token classification.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.TokenClassificationPipeline.__call__.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A list or a list of list of <code>dict</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Each result comes as a list of dictionaries (one for each token in the corresponding input, or each entity if this pipeline was instantiated with an aggregation_strategy) with the following keys:</p> <ul> <li><strong>word</strong> (<code>str</code>) — The token/word classified. This is obtained by decoding the selected tokens. If you want to have the exact string in the original sentence, use <code>start</code> and <code>stop</code>.</li> <li><strong>score</strong> (<code>float</code>) — The corresponding probability for <code>entity</code>.</li> <li><strong>entity</strong> (<code>str</code>) — The entity predicted for that token/word (it is named <em>entity_group</em> when <em>aggregation_strategy</em> is not <code>"none"</code>.</li> <li><strong>index</strong> (<code>int</code>, only present when <code>aggregation_strategy="none"</code>) — The index of the corresponding token in the sentence.</li> <li><strong>start</strong> (<code>int</code>, <em>optional</em>) — The index of the start of the corresponding entity in the sentence. Only exists if the offsets are available within the tokenizer</li> <li><strong>end</strong> (<code>int</code>, <em>optional</em>) — The index of the end of the corresponding entity in the sentence. Only exists if the offsets are available within the tokenizer</li> </ul> <!-- HTML_TAG_END --></p> </div></div> <p>Classify each token of the text(s) given as inputs.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TokenClassificationPipeline.aggregate_words"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>aggregate_words</span></h4><!-- HTML_TAG_END --> <a id="transformers.TokenClassificationPipeline.aggregate_words" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TokenClassificationPipeline.aggregate_words"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/token_classification.py#L368" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">entities<span class="opacity-60">: typing.List[dict]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">aggregation_strategy<span class="opacity-60">: AggregationStrategy</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Override tokens from a given word that disagree to force agreement on word boundaries.</p> <p>Example: micro|soft| com|pany| B-ENT I-NAME I-ENT I-ENT will be rewritten with first strategy as microsoft| company| B-ENT I-ENT</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TokenClassificationPipeline.gather_pre_entities"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>gather_pre_entities</span></h4><!-- HTML_TAG_END --> <a id="transformers.TokenClassificationPipeline.gather_pre_entities" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TokenClassificationPipeline.gather_pre_entities"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/token_classification.py#L254" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sentence<span class="opacity-60">: str</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">offset_mapping<span class="opacity-60">: typing.Union[typing.List[typing.Tuple[int, int]], NoneType]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">special_tokens_mask<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">aggregation_strategy<span class="opacity-60">: AggregationStrategy</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Fuse various numpy arrays into dicts with all the information needed for aggregation</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TokenClassificationPipeline.group_entities"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>group_entities</span></h4><!-- HTML_TAG_END --> <a id="transformers.TokenClassificationPipeline.group_entities" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TokenClassificationPipeline.group_entities"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/token_classification.py#L430" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">entities<span class="opacity-60">: typing.List[dict]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.group_entities.entities" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.group_entities.entities"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>entities</strong> (<code>dict</code>) &#x2014; The entities predicted by the pipeline.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Find and group together the adjacent tokens with the same entity predicted.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TokenClassificationPipeline.group_sub_entities"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>group_sub_entities</span></h4><!-- HTML_TAG_END --> <a id="transformers.TokenClassificationPipeline.group_sub_entities" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TokenClassificationPipeline.group_sub_entities"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/token_classification.py#L395" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">entities<span class="opacity-60">: typing.List[dict]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.group_sub_entities.entities" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.group_sub_entities.entities"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>entities</strong> (<code>dict</code>) &#x2014; The entities predicted by the pipeline.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Group together the adjacent tokens with the same entity predicted.</p></div></div> <h3 class="relative group"><a id="transformers.TranslationPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TranslationPipeline </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TranslationPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TranslationPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TranslationPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TranslationPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/text2text_generation.py#L263" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Translates from one language to another.</p> <p>This translation pipeline can currently be loaded from <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;translation_xx_to_yy&quot;</code>.</p> <p>The models that this pipeline can use are models that have been fine-tuned on a translation task. See the up-to-date list of available models on <a href="https://huggingface.co/models?filter=translation" rel="nofollow">huggingface.co/models</a>.</p> <div class="relative group rounded-md"><a id="transformers.TranslationPipeline.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Usage:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->en_fr_translator = pipeline(<span class="hljs-string">&quot;translation_en_to_fr&quot;</span>) en_fr_translator(<span class="hljs-string">&quot;How old are you?&quot;</span>)<!-- HTML_TAG_END --></pre></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TranslationPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TranslationPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TranslationPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/text2text_generation.py#L315" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A list or a list of list of <code>dict</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.__call__.args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.__call__.args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; Texts to be translated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.__call__.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.__call__.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to include the tensors of predictions (as token indices) in the outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.__call__.return_text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.__call__.return_text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_text</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to include the decoded texts in the outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.__call__.clean_up_tokenization_spaces" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.__call__.clean_up_tokenization_spaces"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clean up the potential extra spaces in the text output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.__call__.src_lang" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.__call__.src_lang"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>src_lang</strong> (<code>str</code>, <em>optional</em>) &#x2014; The language of the input. Might be required for multilingual models. Will not have any effect for single pair translation models<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.__call__.tgt_lang" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.__call__.tgt_lang"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tgt_lang</strong> (<code>str</code>, <em>optional</em>) &#x2014; The language of the desired output. Might be required for multilingual models. Will not have any effect for single pair translation models generate_kwargs &#x2014; Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework <a href="./model#generative-models">here</a>).<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.TranslationPipeline.__call__.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A list or a list of list of <code>dict</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Each result comes as a dictionary with the following keys:</p> <ul> <li><strong>translation_text</strong> (<code>str</code>, present when <code>return_text=True</code>) — The translation.</li> <li><strong>translation_token_ids</strong> (<code>torch.Tensor</code> or <code>tf.Tensor</code>, present when <code>return_tensors=True</code>) — The token ids of the translation.</li> </ul> <!-- HTML_TAG_END --></p> </div></div> <p>Translate the text(s) given as inputs.</p></div></div> <h3 class="relative group"><a id="transformers.VisualQuestionAnsweringPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisualQuestionAnsweringPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>VisualQuestionAnsweringPipeline </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.VisualQuestionAnsweringPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">VisualQuestionAnsweringPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.VisualQuestionAnsweringPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.VisualQuestionAnsweringPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/visual_question_answering.py#L19" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.VisualQuestionAnsweringPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisualQuestionAnsweringPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.VisualQuestionAnsweringPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisualQuestionAnsweringPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.VisualQuestionAnsweringPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisualQuestionAnsweringPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.VisualQuestionAnsweringPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisualQuestionAnsweringPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.VisualQuestionAnsweringPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisualQuestionAnsweringPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.VisualQuestionAnsweringPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisualQuestionAnsweringPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.VisualQuestionAnsweringPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisualQuestionAnsweringPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.VisualQuestionAnsweringPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisualQuestionAnsweringPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.VisualQuestionAnsweringPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisualQuestionAnsweringPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.VisualQuestionAnsweringPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisualQuestionAnsweringPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Visual Question Answering pipeline using a <code>AutoModelForVisualQuestionAnswering</code>. This pipeline is currently only available in PyTorch.</p> <p>This visual question answering pipeline can currently be loaded from <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifiers: <code>&quot;visual-question-answering&quot;, &quot;vqa&quot;</code>.</p> <p>The models that this pipeline can use are models that have been fine-tuned on a visual question answering task. See the up-to-date list of available models on <a href="https://huggingface.co/models?filter=visual-question-answering" rel="nofollow">huggingface.co/models</a>.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.VisualQuestionAnsweringPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.VisualQuestionAnsweringPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.VisualQuestionAnsweringPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/visual_question_answering.py#L46" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image<span class="opacity-60">: typing.Union[ForwardRef(&#39;Image.Image&#39;), str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">question<span class="opacity-60">: str = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A dictionary or a list of dictionaries containing the result. The dictionaries contain the following keys</span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.VisualQuestionAnsweringPipeline.__call__.image" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisualQuestionAnsweringPipeline.__call__.image"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image</strong> (<code>str</code>, <code>List[str]</code>, <code>PIL.Image</code> or <code>List[PIL.Image]</code>) &#x2014; The pipeline handles three types of images:</p> <ul> <li>A string containing a http link pointing to an image</li> <li>A string containing a local path to an image</li> <li>An image loaded in PIL directly</li> </ul> <p>The pipeline accepts either a single image or a batch of images. If given a single image, it can be broadcasted to multiple questions.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.VisualQuestionAnsweringPipeline.__call__.question" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisualQuestionAnsweringPipeline.__call__.question"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>question</strong> (<code>str</code>, <code>List[str]</code>) &#x2014; The question(s) asked. If given a single question, it can be broadcasted to multiple images.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.VisualQuestionAnsweringPipeline.__call__.top_k" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.VisualQuestionAnsweringPipeline.__call__.top_k"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to 5) &#x2014; The number of top labels that will be returned by the pipeline. If the provided number is higher than the number of labels available in the model configuration, it will default to the number of labels.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.VisualQuestionAnsweringPipeline.__call__.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A dictionary or a list of dictionaries containing the result. The dictionaries contain the following keys</p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <ul> <li><strong>label</strong> (<code>str</code>) — The label identified by the model.</li> <li><strong>score</strong> (<code>int</code>) — The score attributed by the model for that label.</li> </ul> <!-- HTML_TAG_END --></p> </div></div> <p>Answers open-ended questions about images. The pipeline accepts several types of inputs which are detailed below:</p> <ul><li><code>pipeline(image=image, question=question)</code></li> <li><code>pipeline({&quot;image&quot;: image, &quot;question&quot;: question})</code></li> <li><code>pipeline([{&quot;image&quot;: image, &quot;question&quot;: question}])</code></li> <li><code>pipeline([{&quot;image&quot;: image, &quot;question&quot;: question}, {&quot;image&quot;: image, &quot;question&quot;: question}])</code></li></ul></div></div> <h3 class="relative group"><a id="transformers.ZeroShotClassificationPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotClassificationPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ZeroShotClassificationPipeline </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ZeroShotClassificationPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ZeroShotClassificationPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ZeroShotClassificationPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ZeroShotClassificationPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/zero_shot_classification.py#L46" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args_parser<span class="opacity-60"> = &lt;transformers.pipelines.zero_shot_classification.ZeroShotClassificationArgumentHandler object at 0x7f040158f040&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotClassificationPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotClassificationPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotClassificationPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotClassificationPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotClassificationPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotClassificationPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotClassificationPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotClassificationPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotClassificationPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotClassificationPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotClassificationPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotClassificationPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotClassificationPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotClassificationPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotClassificationPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotClassificationPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotClassificationPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotClassificationPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotClassificationPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotClassificationPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>NLI-based zero-shot classification pipeline using a <code>ModelForSequenceClassification</code> trained on NLI (natural language inference) tasks.</p> <p>Any combination of sequences and labels can be passed and each combination will be posed as a premise/hypothesis pair and passed to the pretrained model. Then, the logit for <em>entailment</em> is taken as the logit for the candidate label being valid. Any NLI model can be used, but the id of the <em>entailment</em> label must be included in the model config’s :attr:<em>~transformers.PretrainedConfig.label2id</em>.</p> <p>This NLI pipeline can currently be loaded from <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;zero-shot-classification&quot;</code>.</p> <p>The models that this pipeline can use are models that have been fine-tuned on an NLI task. See the up-to-date list of available models on <a href="https://huggingface.co/models?search=nli" rel="nofollow">huggingface.co/models</a>.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ZeroShotClassificationPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.ZeroShotClassificationPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ZeroShotClassificationPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/zero_shot_classification.py#L139" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences<span class="opacity-60">: typing.Union[str, typing.List[str]]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A <code>dict</code> or a list of <code>dict</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotClassificationPipeline.__call__.sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotClassificationPipeline.__call__.sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; The sequence(s) to classify, will be truncated if the model input is too large.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotClassificationPipeline.__call__.candidate_labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotClassificationPipeline.__call__.candidate_labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>candidate_labels</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; The set of possible class labels to classify each sequence into. Can be a single label, a string of comma-separated labels, or a list of labels.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotClassificationPipeline.__call__.hypothesis_template" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotClassificationPipeline.__call__.hypothesis_template"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hypothesis_template</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;This example is {}.&quot;</code>) &#x2014; The template used to turn each label into an NLI-style hypothesis. This template must include a {} or similar syntax for the candidate label to be inserted into the template. For example, the default template is <code>&quot;This example is {}.&quot;</code> With the candidate label <code>&quot;sports&quot;</code>, this would be fed into the model like <code>&quot;&lt;cls&gt; sequence to classify &lt;sep&gt; This example is sports . &lt;sep&gt;&quot;</code>. The default template works well in many cases, but it may be worthwhile to experiment with different templates depending on the task setting.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotClassificationPipeline.__call__.multi_label" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotClassificationPipeline.__call__.multi_label"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>multi_label</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not multiple candidate labels can be true. If <code>False</code>, the scores are normalized such that the sum of the label likelihoods for each sequence is 1. If <code>True</code>, the labels are considered independent and probabilities are normalized for each candidate by doing a softmax of the entailment score vs. the contradiction score.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.ZeroShotClassificationPipeline.__call__.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A <code>dict</code> or a list of <code>dict</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Each result comes as a dictionary with the following keys:</p> <ul> <li><strong>sequence</strong> (<code>str</code>) — The sequence for which this is the output.</li> <li><strong>labels</strong> (<code>List[str]</code>) — The labels sorted by order of likelihood.</li> <li><strong>scores</strong> (<code>List[float]</code>) — The probabilities for each of the labels.</li> </ul> <!-- HTML_TAG_END --></p> </div></div> <p>Classify the sequence(s) given as inputs. See the <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.ZeroShotClassificationPipeline">ZeroShotClassificationPipeline</a> documentation for more information.</p></div></div> <h3 class="relative group"><a id="transformers.ZeroShotImageClassificationPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotImageClassificationPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ZeroShotImageClassificationPipeline </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ZeroShotImageClassificationPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ZeroShotImageClassificationPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ZeroShotImageClassificationPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ZeroShotImageClassificationPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/zero_shot_image_classification.py#L31" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotImageClassificationPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotImageClassificationPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotImageClassificationPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotImageClassificationPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotImageClassificationPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotImageClassificationPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotImageClassificationPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotImageClassificationPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotImageClassificationPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotImageClassificationPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotImageClassificationPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotImageClassificationPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotImageClassificationPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotImageClassificationPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotImageClassificationPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotImageClassificationPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotImageClassificationPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotImageClassificationPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotImageClassificationPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotImageClassificationPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Zero shot image classification pipeline using <code>CLIPModel</code>. This pipeline predicts the class of an image when you provide an image and a set of <code>candidate_labels</code>.</p> <p>This image classification pipeline can currently be loaded from <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;zero-shot-image-classification&quot;</code>.</p> <p>See the list of available models on <a href="https://huggingface.co/models?filter=zero-shot-image-classification" rel="nofollow">huggingface.co/models</a>.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ZeroShotImageClassificationPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.ZeroShotImageClassificationPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ZeroShotImageClassificationPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/zero_shot_image_classification.py#L50" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">images<span class="opacity-60">: typing.Union[str, typing.List[str], ForwardRef(&#39;Image&#39;), typing.List[ForwardRef(&#39;Image&#39;)]]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotImageClassificationPipeline.__call__.images" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotImageClassificationPipeline.__call__.images"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>images</strong> (<code>str</code>, <code>List[str]</code>, <code>PIL.Image</code> or <code>List[PIL.Image]</code>) &#x2014; The pipeline handles three types of images:</p> <ul> <li>A string containing a http link pointing to an image</li> <li>A string containing a local path to an image</li> <li>An image loaded in PIL directly</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotImageClassificationPipeline.__call__.candidate_labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotImageClassificationPipeline.__call__.candidate_labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>candidate_labels</strong> (<code>List[str]</code>) &#x2014; The candidate labels for this image<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotImageClassificationPipeline.__call__.hypothesis_template" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotImageClassificationPipeline.__call__.hypothesis_template"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hypothesis_template</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;This is a photo of {}&quot;</code>) &#x2014; The sentence used in cunjunction with <em>candidate_labels</em> to attempt the image classification by replacing the placeholder with the candidate_labels. Then likelihood is estimated by using logits_per_image<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Assign labels to the image(s) passed as inputs.</p></div></div> <h3 class="relative group"><a id="transformers.ZeroShotObjectDetectionPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotObjectDetectionPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ZeroShotObjectDetectionPipeline </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ZeroShotObjectDetectionPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ZeroShotObjectDetectionPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ZeroShotObjectDetectionPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ZeroShotObjectDetectionPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/zero_shot_object_detection.py#L31" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotObjectDetectionPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotObjectDetectionPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotObjectDetectionPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotObjectDetectionPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotObjectDetectionPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotObjectDetectionPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotObjectDetectionPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotObjectDetectionPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotObjectDetectionPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotObjectDetectionPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotObjectDetectionPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotObjectDetectionPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotObjectDetectionPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotObjectDetectionPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotObjectDetectionPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotObjectDetectionPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotObjectDetectionPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotObjectDetectionPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotObjectDetectionPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotObjectDetectionPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Zero shot object detection pipeline using <code>OwlViTForObjectDetection</code>. This pipeline predicts bounding boxes of objects when you provide an image and a set of <code>candidate_labels</code>.</p> <p>This object detection pipeline can currently be loaded from <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;zero-shot-object-detection&quot;</code>.</p> <p>See the list of available models on <a href="https://huggingface.co/models?filter=zero-shot-object-detection" rel="nofollow">huggingface.co/models</a>.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ZeroShotObjectDetectionPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.ZeroShotObjectDetectionPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ZeroShotObjectDetectionPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/zero_shot_object_detection.py#L52" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">images<span class="opacity-60">: typing.Union[str, typing.List[str], ForwardRef(&#39;Image.Image&#39;), typing.List[ForwardRef(&#39;Image.Image&#39;)]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_queries<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[typing.List[str]]] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotObjectDetectionPipeline.__call__.images" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotObjectDetectionPipeline.__call__.images"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>images</strong> (<code>str</code>, <code>List[str]</code>, <code>PIL.Image</code> or <code>List[PIL.Image]</code>) &#x2014; The pipeline handles three types of images:</p> <ul> <li>A string containing an http url pointing to an image</li> <li>A string containing a local path to an image</li> <li>An image loaded in PIL directly</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotObjectDetectionPipeline.__call__.text_queries" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotObjectDetectionPipeline.__call__.text_queries"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text_queries</strong> (<code>str</code> or <code>List[str]</code> or <code>List[List[str]]</code>) &#x2014; Text queries to query the target image with.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotObjectDetectionPipeline.__call__.If" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotObjectDetectionPipeline.__call__.If"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>If</strong> given multiple images, <code>text_queries</code> should be provided as a list of lists, where each nested list &#x2014;<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotObjectDetectionPipeline.__call__.contains" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotObjectDetectionPipeline.__call__.contains"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>contains</strong> the text queries for the corresponding image. &#x2014;<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotObjectDetectionPipeline.__call__.threshold" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotObjectDetectionPipeline.__call__.threshold"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>threshold</strong> (<code>float</code>, <em>optional</em>, defaults to 0.1) &#x2014; The probability necessary to make a prediction.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotObjectDetectionPipeline.__call__.top_k" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotObjectDetectionPipeline.__call__.top_k"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to None) &#x2014; The number of top predictions that will be returned by the pipeline. If the provided number is <code>None</code> or higher than the number of predictions available, it will default to the number of predictions.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Detect objects (bounding boxes &amp; classes) in the image(s) passed as inputs.</p></div></div> <h2 class="relative group"><a id="transformers.Pipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Parent class: <code>Pipeline</code></span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Pipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Pipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Pipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Pipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L722" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60">: typing.Union[ForwardRef(&#39;PreTrainedModel&#39;), ForwardRef(&#39;TFPreTrainedModel&#39;)]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: typing.Optional[transformers.tokenization_utils.PreTrainedTokenizer] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">feature_extractor<span class="opacity-60">: typing.Optional[ForwardRef(&#39;SequenceFeatureExtractor&#39;)] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">modelcard<span class="opacity-60">: typing.Optional[transformers.modelcard.ModelCard] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">framework<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">task<span class="opacity-60">: str = &#39;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args_parser<span class="opacity-60">: ArgumentHandler = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">device<span class="opacity-60">: typing.Union[int, str, ForwardRef(&#39;torch.device&#39;)] = -1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">binary_output<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Pipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Pipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Pipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code>, <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Pipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Pipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Pipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Pipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Pipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Pipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native <code>torch.device</code> or a <code>str</code> too.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Pipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The Pipeline class is the class from which all pipelines inherit. Refer to this class for methods shared across different pipelines.</p> <p>Base class implementing pipelined operations. Pipeline workflow is defined as a sequence of the following operations:</p> <p>Input -&gt; Tokenization -&gt; Model Inference -&gt; Post-Processing (task dependent) -&gt; Output</p> <p>Pipeline supports running on CPU or GPU through the device argument (see below).</p> <p>Some pipeline, like for instance <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.FeatureExtractionPipeline">FeatureExtractionPipeline</a> (<code>&#39;feature-extraction&#39;</code>) output large tensor object as nested-lists. In order to avoid dumping such large structure as textual data we provide the <code>binary_output</code> constructor argument. If set to <code>True</code>, the output will be stored in the pickle format.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Pipeline.check_model_type"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>check_model_type</span></h4><!-- HTML_TAG_END --> <a id="transformers.Pipeline.check_model_type" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Pipeline.check_model_type"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L907" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">supported_models<span class="opacity-60">: typing.Union[typing.List[str], dict]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Pipeline.check_model_type.supported_models" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline.check_model_type.supported_models"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>supported_models</strong> (<code>List[str]</code> or <code>dict</code>) &#x2014; The list of models supported by the pipeline, or a dictionary with model class values.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Check if the model class is in supported by the pipeline.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Pipeline.device_placement"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>device_placement</span></h4><!-- HTML_TAG_END --> <a id="transformers.Pipeline.device_placement" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Pipeline.device_placement"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L847" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Context Manager allowing tensor allocation on the user-specified device in framework agnostic way.</p> <div class="relative group rounded-md"><a id="transformers.Pipeline.device_placement.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline.device_placement.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment"># Explicitly ask for tensor allocation on CUDA device :0</span> pipe = pipeline(..., device=<span class="hljs-number">0</span>) <span class="hljs-keyword">with</span> pipe.device_placement(): <span class="hljs-comment"># Every framework specific tensor allocation will be done on the request device</span> output = pipe(...)<!-- HTML_TAG_END --></pre></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Pipeline.ensure_tensor_on_device"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>ensure_tensor_on_device</span></h4><!-- HTML_TAG_END --> <a id="transformers.Pipeline.ensure_tensor_on_device" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Pipeline.ensure_tensor_on_device"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L873" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**inputs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Dict[str, torch.Tensor]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Pipeline.ensure_tensor_on_device.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline.ensure_tensor_on_device.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (keyword arguments that should be <code>torch.Tensor</code>, the rest is ignored) &#x2014; The tensors to place on <code>self.device</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Pipeline.ensure_tensor_on_device.Recursive" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline.ensure_tensor_on_device.Recursive"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>Recursive</strong> on lists <strong>only</strong>. &#x2014;<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.Pipeline.ensure_tensor_on_device.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Dict[str, torch.Tensor]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The same as <code>inputs</code> but on the proper device.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Ensure PyTorch tensors are on the specified device.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Pipeline.postprocess"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>postprocess</span></h4><!-- HTML_TAG_END --> <a id="transformers.Pipeline.postprocess" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Pipeline.postprocess"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L964" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model_outputs<span class="opacity-60">: ModelOutput</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**postprocess_parameters<span class="opacity-60">: typing.Dict</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Postprocess will receive the raw outputs of the <code>_forward</code> method, generally tensors, and reformat them into something more friendly. Generally it will output a list or a dict or results (containing just strings and numbers).</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Pipeline.predict"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>predict</span></h4><!-- HTML_TAG_END --> <a id="transformers.Pipeline.predict" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Pipeline.predict"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L841" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">X<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Scikit / Keras interface to transformers’ pipelines. This method will forward to <strong>call</strong>().</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Pipeline.preprocess"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>preprocess</span></h4><!-- HTML_TAG_END --> <a id="transformers.Pipeline.preprocess" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Pipeline.preprocess"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L943" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_<span class="opacity-60">: typing.Any</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**preprocess_parameters<span class="opacity-60">: typing.Dict</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Preprocess will take the <code>input_</code> of a specific pipeline and return a dictionnary of everything necessary for <code>_forward</code> to run properly. It should contain at least one tensor, but might have arbitrary other items.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Pipeline.save_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.Pipeline.save_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Pipeline.save_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L790" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_directory<span class="opacity-60">: str</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Pipeline.save_pretrained.save_directory" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline.save_pretrained.save_directory"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_directory</strong> (<code>str</code>) &#x2014; A path to the directory where to saved. It will be created if it doesn&#x2019;t exist.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Save the pipeline’s model and tokenizer.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Pipeline.transform"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transform</span></h4><!-- HTML_TAG_END --> <a id="transformers.Pipeline.transform" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Pipeline.transform"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L835" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">X<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Scikit / Keras interface to transformers’ pipelines. This method will forward to <strong>call</strong>().</p></div></div> <script type="module" data-hydrate="8s31v"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="8s31v"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/main_classes/pipelines.mdx-hf-doc-builder.js") ], params: {} } }); </script>
47
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/main_classes/model.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;models&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.PreTrainedModel&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;large-model-loading&quot;,&quot;title&quot;:&quot;Large model loading&quot;},{&quot;local&quot;:&quot;model-instantiation-dtype&quot;,&quot;title&quot;:&quot;Model Instantiation dtype&quot;}],&quot;title&quot;:&quot;PreTrainedModel&quot;},{&quot;local&quot;:&quot;transformers.modeling_utils.ModuleUtilsMixin&quot;,&quot;title&quot;:&quot;ModuleUtilsMixin&quot;},{&quot;local&quot;:&quot;transformers.TFPreTrainedModel&quot;,&quot;title&quot;:&quot;TFPreTrainedModel&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_utils.TFModelUtilsMixin&quot;,&quot;title&quot;:&quot;TFModelUtilsMixin&quot;},{&quot;local&quot;:&quot;transformers.FlaxPreTrainedModel&quot;,&quot;title&quot;:&quot;FlaxPreTrainedModel&quot;},{&quot;local&quot;:&quot;transformers.utils.PushToHubMixin&quot;,&quot;title&quot;:&quot;Pushing to the Hub&quot;},{&quot;local&quot;:&quot;transformers.modeling_utils.load_sharded_checkpoint&quot;,&quot;title&quot;:&quot;Sharded checkpoints&quot;}],&quot;title&quot;:&quot;Models&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/main_classes/model.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Docstring-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/ExampleCodeBlock-hf-doc-builder.js"> <h1 class="relative group"><a id="models" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#models"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Models </span></h1> <p>The base classes <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>, <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>, and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.FlaxPreTrainedModel">FlaxPreTrainedModel</a> implement the common methods for loading/saving a model either from a local file or directory, or from a pretrained model configuration provided by the library (downloaded from HuggingFace’s AWS S3 repository).</p> <p><a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> also implement a few methods which are common among all the models to:</p> <ul><li>resize the input token embeddings when new tokens are added to the vocabulary</li> <li>prune the attention heads of the model.</li></ul> <p>The other methods that are common to each model are defined in <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.modeling_utils.ModuleUtilsMixin">ModuleUtilsMixin</a> (for the PyTorch models) and <code>~modeling_tf_utils.TFModuleUtilsMixin</code> (for the TensorFlow models) or for text generation, <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin">GenerationMixin</a> (for the PyTorch models), <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_tf_utils.TFGenerationMixin">TFGenerationMixin</a> (for the TensorFlow models) and <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_flax_utils.FlaxGenerationMixin">FlaxGenerationMixin</a> (for the Flax/JAX models).</p> <h2 class="relative group"><a id="transformers.PreTrainedModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PreTrainedModel </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PreTrainedModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PreTrainedModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L906" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: PretrainedConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*inputs<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Base class for all models.</p> <p><a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> takes care of storing the configuration of the models and handles methods for loading, downloading and saving models as well as a few methods common to all models to:</p> <ul><li>resize the input embeddings,</li> <li>prune heads in the self-attention heads.</li></ul> <p>Class attributes (overridden by derived classes):</p> <ul><li><p><strong>config_class</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) — A subclass of <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> to use as configuration class for this model architecture.</p></li> <li><p><strong>load_tf_weights</strong> (<code>Callable</code>) — A python <em>method</em> for loading a TensorFlow checkpoint in a PyTorch model, taking as arguments:</p> <ul><li><strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>) — An instance of the model on which to load the TensorFlow checkpoint.</li> <li><strong>config</strong> (<code>PreTrainedConfig</code>) — An instance of the configuration associated to the model.</li> <li><strong>path</strong> (<code>str</code>) — A path to the TensorFlow checkpoint.</li></ul></li> <li><p><strong>base_model_prefix</strong> (<code>str</code>) — A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.</p></li> <li><p><strong>is_parallelizable</strong> (<code>bool</code>) — A flag indicating whether this model supports model parallelization.</p></li> <li><p><strong>main_input_name</strong> (<code>str</code>) — The name of the principal input to the model (often <code>input_ids</code> for NLP models, <code>pixel_values</code> for vision models and <code>input_values</code> for speech models).</p></li></ul> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedModel.push_to_hub"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>push_to_hub</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedModel.push_to_hub" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedModel.push_to_hub"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/hub.py#L712" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repo_id<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_temp_dir<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">commit_message<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">private<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_auth_token<span class="opacity-60">: typing.Union[bool, str, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_shard_size<span class="opacity-60">: typing.Union[int, str, NoneType] = &#39;10GB&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">create_pr<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**deprecated_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.push_to_hub.repo_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.push_to_hub.repo_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repo_id</strong> (<code>str</code>) &#x2014; The name of the repository you want to push your model to. It should contain your organization name when pushing to a given organization.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.push_to_hub.use_temp_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.push_to_hub.use_temp_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to use a temporary directory to store the files saved before they are pushed to the Hub. Will default to <code>True</code> if there is no directory named like <code>repo_id</code>, <code>False</code> otherwise.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.push_to_hub.commit_message" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.push_to_hub.commit_message"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;Upload model&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.push_to_hub.private" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.push_to_hub.private"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.push_to_hub.use_auth_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.push_to_hub.use_auth_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>huggingface-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.push_to_hub.max_shard_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.push_to_hub.max_shard_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_shard_size</strong> (<code>int</code> or <code>str</code>, <em>optional</em>, defaults to <code>&quot;10GB&quot;</code>) &#x2014; Only applicable for models. The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like <code>&quot;5MB&quot;</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.push_to_hub.create_pr" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.push_to_hub.create_pr"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>create_pr</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to create a PR with the uploaded files or directly commit.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Upload the model file to the 🤗 Model Hub while synchronizing a local clone of the repo in <code>repo_path_or_name</code>.</p> <div class="relative group rounded-md"><a id="transformers.PreTrainedModel.push_to_hub.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.push_to_hub.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModel model = AutoModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the model to your namespace with the name &quot;my-finetuned-bert&quot;.</span> model.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the model to an organization with the name &quot;my-finetuned-bert&quot;.</span> model.push_to_hub(<span class="hljs-string">&quot;huggingface/my-finetuned-bert&quot;</span>)<!-- HTML_TAG_END --></pre></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedModel.from_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedModel.from_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedModel.from_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L1661" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pretrained_model_name_or_path<span class="opacity-60">: typing.Union[str, os.PathLike, NoneType]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*model_args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained.pretrained_model_name_or_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.pretrained_model_name_or_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>, <em>optional</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <code>./tf_model/model.ckpt.index</code>). In this case, <code>from_tf</code> should be set to <code>True</code> and a configuration object should be provided as <code>config</code> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> <li>A path or url to a model folder containing a <em>flax checkpoint file</em> in <em>.msgpack</em> format (e.g, <code>./flax_model/</code> containing <code>flax_model.msgpack</code>). In this case, <code>from_flax</code> should be set to <code>True</code>.</li> <li><code>None</code> if you are both providing the configuration and state dictionary (resp. with keyword arguments <code>config</code> and <code>state_dict</code>).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained.model_args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.model_args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_args</strong> (sequence of positional arguments, <em>optional</em>) &#x2014; All remaining positional arguments will be passed to the underlying model&#x2019;s <code>__init__</code> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<code>Union[PretrainedConfig, str, os.PathLike]</code>, <em>optional</em>) &#x2014; Can be either:</p> <ul> <li>an instance of a class derived from <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>,</li> <li>a string or path valid as input to <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig.from_pretrained">from_pretrained()</a>.</li> </ul> <p>Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.save_pretrained">save_pretrained()</a> and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <code>pretrained_model_name_or_path</code> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained.state_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.state_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>state_dict</strong> (<code>Dict[str, torch.Tensor]</code>, <em>optional</em>) &#x2014; A state dictionary to use instead of a state dictionary loaded from saved weights file.</p> <p>This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.save_pretrained">save_pretrained()</a> and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> is not a simpler option.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained.cache_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.cache_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cache_dir</strong> (<code>Union[str, os.PathLike]</code>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained.from_tf" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.from_tf"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>from_tf</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Load the model weights from a TensorFlow checkpoint save file (see docstring of <code>pretrained_model_name_or_path</code> argument).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained.from_flax" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.from_flax"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>from_flax</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Load the model weights from a Flax checkpoint save file (see docstring of <code>pretrained_model_name_or_path</code> argument).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained.ignore_mismatched_sizes" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.ignore_mismatched_sizes"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ignore_mismatched_sizes</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to raise an error if some of the weights from the checkpoint do not have the same size as the weights of the model (if for instance, you are instantiating a model with 10 labels from a checkpoint with 3 labels).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained.force_download" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.force_download"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained.resume_download" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.resume_download"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>resume_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained.proxies" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.proxies"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>proxies</strong> (<code>Dict[str, str]</code>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <code>{&apos;http&apos;: &apos;foo.bar:3128&apos;, &apos;http://hostname&apos;: &apos;foo.bar:4012&apos;}</code>. The proxies are used on each request.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained.output_loading_info(bool," class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.output_loading_info(bool,"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_loading_info(<code>bool</code>,</strong> <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained.local_files_only(bool," class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.local_files_only(bool,"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>local_files_only(<code>bool</code>,</strong> <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to only look at local files (i.e., do not try to download the model).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained.use_auth_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.use_auth_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_auth_token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>huggingface-cli login</code> (stored in <code>~/.huggingface</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained.revision" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.revision"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;main&quot;</code>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <code>revision</code> can be any identifier allowed by git.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained.mirror" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.mirror"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mirror</strong> (<code>str</code>, <em>optional</em>) &#x2014; Mirror source to accelerate downloads in China. If you are from China and have an accessibility problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. Please refer to the mirror site for more information.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained._fast_init(bool," class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained._fast_init(bool,"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>_fast_init(<code>bool</code>,</strong> <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to disable fast initialization.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>One should only disable <em>_fast_init</em> to ensure backwards compatibility with <code>transformers.__version__ &lt; 4.6.0</code> for seeded model initialization. This argument will be removed at the next major version. See <a href="https://github.com/huggingface/transformers/pull/11471" rel="nofollow">pull request 11471</a> for more information.</p> </div><!-- HTML_TAG_END --> </span></span> </li></ul> <p class="flex items-center font-semibold">Parameters for big model inference <span class="flex-auto border-t-2 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained.low_cpu_mem_usage(bool," class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.low_cpu_mem_usage(bool,"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>low_cpu_mem_usage(<code>bool</code>,</strong> <em>optional</em>) &#x2014; Tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. This is an experimental feature and a subject to change at any moment.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained.torch_dtype" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.torch_dtype"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>torch_dtype</strong> (<code>str</code> or <code>torch.dtype</code>, <em>optional</em>) &#x2014; Override the default <code>torch.dtype</code> and load the model under this dtype. If <code>&quot;auto&quot;</code> is passed the dtype will be automatically derived from the model&#x2019;s weights.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained.device_map" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.device_map"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device_map</strong> (<code>str</code> or <code>Dict[str, Union[int, str, torch.device]]</code>, <em>optional</em>) &#x2014; A map that specifies where each submodule should go. It doesn&#x2019;t need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device.</p> <p>To have Accelerate compute the most optimized <code>device_map</code> automatically, set <code>device_map=&quot;auto&quot;</code>. For more information about each option see <a href="https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map" rel="nofollow">designing a device map</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained.max_memory" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.max_memory"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_memory</strong> (<code>Dict</code>, <em>optional</em>) &#x2014; A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained.offload_folder" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.offload_folder"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>offload_folder</strong> (<code>str</code> or <code>os.PathLike</code>, <em>optional</em>) &#x2014; If the <code>device_map</code> contains any value <code>&quot;disk&quot;</code>, the folder where we will offload weights.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained.offload_state_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.offload_state_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>offload_state_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014; If <code>True</code>, will temporarily offload the CPU state dict to the hard drive to avoid getting out of CPU RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to <code>True</code> when there is some disk offload.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained.load_in_8bit" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.load_in_8bit"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>load_in_8bit</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If <code>True</code>, will convert the loaded model into mixed-8bit quantized model. To use this feature please install <code>bitsandbytes</code> compiled with your CUDA version by running <code>pip install -i https://test.pypi.org/simple/ bitsandbytes-cudaXXX</code> where XXX is your CUDA version (e.g. 11.6 = 116). Make also sure that you have enough GPU RAM to store half of the model size since the 8bit modules are not compiled and adapted for CPUs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained.load_in_8bit_threshold" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.load_in_8bit_threshold"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>load_in_8bit_threshold</strong> (<code>float</code>, <em>optional</em>, defaults to 6) &#x2014; Works together with <code>load_in_8bit</code>. This corresponds to the outlier threshold for outlier detection as described in <code>GPT3.int8() : 8-bit Matrix Multiplication for Transformers at Scale</code> paper. Any hidden states value that is above this threshold will be considered an outlier and the operation on those values will be done in fp16. Values are usually normally distributed, that is, most values are in the range [-3.5, 3.5], but there are some exceptional systematic outliers that are very differently distributed for large models. These outliers are often in the interval [-60, -6] or [6, 60]. Int8 quantization works well for values of magnitude ~5, but beyond that, there is a significant performance penalty. A good default threshold is 6, but a lower threshold might be needed for more unstable models (small models, fine-tuning).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained.load_in_8bit_skip_modules" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.load_in_8bit_skip_modules"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>load_in_8bit_skip_modules</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; An explicit list of the modules that we do not want to convert in 8-bit. This is useful for models such as Jukebox that has several heads in different places and not necessarily at the last position.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained.subfolder" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.subfolder"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>subfolder</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&quot;</code>) &#x2014; In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (remaining dictionary of keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <code>output_attentions=True</code>). Behaves differently depending on whether a <code>config</code> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <code>config</code>, <code>**kwargs</code> will be directly passed to the underlying model&#x2019;s <code>__init__</code> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <code>kwargs</code> will be first passed to the configuration class initialization function (<a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig.from_pretrained">from_pretrained()</a>). Each key of <code>kwargs</code> that corresponds to a configuration attribute will be used to override said attribute with the supplied <code>kwargs</code> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <code>__init__</code> function.</li> </ul><!-- HTML_TAG_END --> </span></span> </li> </ul> </div></div> <p>Instantiate a pretrained pytorch model from a pre-trained model configuration.</p> <p>The model is set in evaluation mode by default using <code>model.eval()</code> (Dropout modules are deactivated). To train the model, you should first set it back in training mode with <code>model.train()</code>.</p> <p>The warning <em>Weights from XXX not initialized from pretrained model</em> means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task.</p> <p>The warning <em>Weights from XXX not used in YYY</em> means that the layer XXX is not used by YYY, therefore those weights are discarded.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Passing `use_auth_token=True“ is required when you want to use a private model.</p></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Activate the special <a href="https://huggingface.co/transformers/installation.html#offline-mode" rel="nofollow">“offline-mode”</a> to use this method in a firewalled environment.</p></div> <div class="relative group rounded-md"><a id="transformers.PreTrainedModel.from_pretrained.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertConfig, BertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Model was saved using *save_pretrained(&#x27;./test/saved_model/&#x27;)* (for example purposes, not runnable).</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertModel.from_pretrained(<span class="hljs-string">&quot;./test/saved_model/&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> model.config.output_attentions == <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a TF checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable).</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = BertConfig.from_json_file(<span class="hljs-string">&quot;./tf_model/my_tf_model_config.json&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertModel.from_pretrained(<span class="hljs-string">&quot;./tf_model/my_tf_checkpoint.ckpt.index&quot;</span>, from_tf=<span class="hljs-literal">True</span>, config=config) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a Flax checkpoint file instead of a PyTorch model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>, from_flax=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div></div> <ul><li><code>low_cpu_mem_usage</code> algorithm:</li></ul> <p>This is an experimental function that loads the model using ~1x model size CPU memory</p> <p>Here is how it works:</p> <ol><li>save which state_dict keys we have</li> <li>drop state_dict before the model is created, since the latter takes 1x model size CPU memory</li> <li>after the model has been instantiated switch to the meta device all params/buffers that are going to be replaced from the loaded state_dict</li> <li>load state_dict 2nd time</li> <li>replace the params/buffers from the state_dict</li></ol> <p>Currently, it can’t handle deepspeed ZeRO stage 3 and ignores loading errors</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedModel.get_input_embeddings"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_input_embeddings</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedModel.get_input_embeddings" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedModel.get_input_embeddings"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L1061" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>nn.Module</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div id="transformers.PreTrainedModel.get_input_embeddings.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>nn.Module</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A torch module mapping vocabulary to hidden states.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Returns the model’s input embeddings.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedModel.get_memory_footprint"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_memory_footprint</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedModel.get_memory_footprint" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedModel.get_memory_footprint"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L1643" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_buffers<span class="opacity-60"> = True</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.get_memory_footprint.return_buffers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.get_memory_footprint.return_buffers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_buffers</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to return the size of the buffer tensors in the computation of the memory footprint. Buffers are tensors that do not require gradients and not registered as parameters. E.g. mean and std in batch norm layers. Please see: <a href="https://discuss.pytorch.org/t/what-pytorch-means-by-buffers/120266/2" rel="nofollow">https://discuss.pytorch.org/t/what-pytorch-means-by-buffers/120266/2</a><!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Get the memory footprint of a model. This will return the memory footprint of the current model in bytes. Useful to benchmark the memory footprint of the current model and design some tests. Solution inspired from the PyTorch discussions: <a href="https://discuss.pytorch.org/t/gpu-memory-that-model-uses/56822/2" rel="nofollow">https://discuss.pytorch.org/t/gpu-memory-that-model-uses/56822/2</a></p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedModel.get_output_embeddings"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_output_embeddings</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedModel.get_output_embeddings" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedModel.get_output_embeddings"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L1087" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>nn.Module</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div id="transformers.PreTrainedModel.get_output_embeddings.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>nn.Module</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A torch module mapping hidden states to vocabulary.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Returns the model’s output embeddings.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedModel.gradient_checkpointing_disable"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>gradient_checkpointing_disable</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedModel.gradient_checkpointing_disable" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedModel.gradient_checkpointing_disable"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L1471" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Deactivates gradient checkpointing for the current model.</p> <p>Note that in other frameworks this feature can be referred to as “activation checkpointing” or “checkpoint activations”.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedModel.gradient_checkpointing_enable"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>gradient_checkpointing_enable</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedModel.gradient_checkpointing_enable" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedModel.gradient_checkpointing_enable"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L1460" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Activates gradient checkpointing for the current model.</p> <p>Note that in other frameworks this feature can be referred to as “activation checkpointing” or “checkpoint activations”.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedModel.init_weights"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>init_weights</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedModel.init_weights" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedModel.init_weights"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L1427" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>If needed prunes and maybe initializes weights.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedModel.post_init"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>post_init</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedModel.post_init" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedModel.post_init"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L980" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A method executed at the end of each Transformer model initialization, to execute code that needs the model’s modules properly initialized (such as weight initialization).</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedModel.prune_heads"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>prune_heads</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedModel.prune_heads" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedModel.prune_heads"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L1443" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">heads_to_prune<span class="opacity-60">: typing.Dict[int, typing.List[int]]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.prune_heads.heads_to_prune" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.prune_heads.heads_to_prune"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>heads_to_prune</strong> (<code>Dict[int, List[int]]</code>) &#x2014; Dictionary with keys being selected layer indices (<code>int</code>) and associated values being the list of heads to prune in said layer (list of <code>int</code>). For instance {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Prunes heads of the base model.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedModel.register_for_auto_class"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>register_for_auto_class</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedModel.register_for_auto_class" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedModel.register_for_auto_class"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L2680" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">auto_class<span class="opacity-60"> = &#39;AutoModel&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.register_for_auto_class.auto_class" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.register_for_auto_class.auto_class"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>auto_class</strong> (<code>str</code> or <code>type</code>, <em>optional</em>, defaults to <code>&quot;AutoModel&quot;</code>) &#x2014; The auto class to register this new model with.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>This API is experimental and may have some slight breaking changes in the next releases.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedModel.resize_token_embeddings"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>resize_token_embeddings</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedModel.resize_token_embeddings" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedModel.resize_token_embeddings"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L1218" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">new_num_tokens<span class="opacity-60">: typing.Optional[int] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>torch.nn.Embedding</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.resize_token_embeddings.new_num_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.resize_token_embeddings.new_num_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>new_num_tokens</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of new tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or <code>None</code>, just returns a pointer to the input tokens <code>torch.nn.Embedding</code> module of the model without doing anything.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedModel.resize_token_embeddings.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>torch.nn.Embedding</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Pointer to the input tokens Embeddings Module of the model.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Resizes input token embeddings matrix of the model if <code>new_num_tokens != config.vocab_size</code>.</p> <p>Takes care of tying weights embeddings afterwards if the model class has a <code>tie_weights()</code> method.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedModel.save_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedModel.save_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedModel.save_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L1491" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_directory<span class="opacity-60">: typing.Union[str, os.PathLike]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_main_process<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state_dict<span class="opacity-60">: typing.Optional[dict] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_function<span class="opacity-60">: typing.Callable = &lt;function save at 0x7f045fb23ca0&gt;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_shard_size<span class="opacity-60">: typing.Union[int, str] = &#39;10GB&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">safe_serialization<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.save_pretrained.save_directory" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.save_pretrained.save_directory"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Directory to which to save. Will be created if it doesn&#x2019;t exist.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.save_pretrained.is_main_process" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.save_pretrained.is_main_process"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_main_process</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether the process calling this is the main process or not. Useful when in distributed training like TPUs and need to call this function on all processes. In this case, set <code>is_main_process=True</code> only on the main process to avoid race conditions.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.save_pretrained.state_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.save_pretrained.state_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>state_dict</strong> (nested dictionary of <code>torch.Tensor</code>) &#x2014; The state dictionary of the model to save. Will default to <code>self.state_dict()</code>, but can be used to only save parts of the model or if special precautions need to be taken when recovering the state dictionary of a model (like when using model parallelism).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.save_pretrained.save_function" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.save_pretrained.save_function"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_function</strong> (<code>Callable</code>) &#x2014; The function to use to save the state dictionary. Useful on distributed training like TPUs when one need to replace <code>torch.save</code> by another method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.save_pretrained.push_to_hub" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.save_pretrained.push_to_hub"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with <code>repo_id</code> (will default to the name of <code>save_directory</code> in your namespace).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.save_pretrained.max_shard_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.save_pretrained.max_shard_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_shard_size</strong> (<code>int</code> or <code>str</code>, <em>optional</em>, defaults to <code>&quot;10GB&quot;</code>) &#x2014; The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like <code>&quot;5MB&quot;</code>).</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>If a single weight of the model is bigger than <code>max_shard_size</code>, it will be in its own checkpoint shard which will be bigger than <code>max_shard_size</code>.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.save_pretrained.safe_serialization" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.save_pretrained.safe_serialization"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>safe_serialization</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to save the model using <code>safetensors</code> or the traditional PyTorch way (that uses <code>pickle</code>).</p> <p>kwargs &#x2014; Additional key word arguments passed along to the <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.push_to_hub">push_to_hub()</a> method.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Save a model and its configuration file to a directory, so that it can be re-loaded using the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> class method.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedModel.set_input_embeddings"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>set_input_embeddings</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedModel.set_input_embeddings" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedModel.set_input_embeddings"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L1074" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">value<span class="opacity-60">: Module</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.set_input_embeddings.value" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.set_input_embeddings.value"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>value</strong> (<code>nn.Module</code>) &#x2014; A module mapping vocabulary to hidden states.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Set model’s input embeddings.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedModel.tie_weights"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>tie_weights</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedModel.tie_weights" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedModel.tie_weights"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L1102" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Tie the weights between the input embeddings and the output embeddings.</p> <p>If the <code>torchscript</code> flag is set in the configuration, can’t handle parameter sharing so we are cloning the weights instead.</p></div></div> <a id="from_pretrained-torch-dtype"></a> <h3 class="relative group"><a id="large-model-loading" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#large-model-loading"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Large model loading </span></h3> <p>In Transformers 4.20.0, the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">from_pretrained()</a> method has been reworked to accommodate large models using <a href="https://huggingface.co/docs/accelerate/big_modeling" rel="nofollow">Accelerate</a>. This requires Accelerate &gt;= 0.9.0 and PyTorch &gt;= 1.9.0. Instead of creating the full model, then loading the pretrained weights inside it (which takes twice the size of the model in RAM, one for the randomly initialized model, one for the weights), there is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded.</p> <p>This option can be activated with <code>low_cpu_mem_usage=True</code>. The model is first created on the Meta device (with empty weights) and the state dict is then loaded inside it (shard by shard in the case of a sharded checkpoint). This way the maximum RAM used is the full size of the model only.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSeq2SeqLM t0pp = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;bigscience/T0pp&quot;</span>, low_cpu_mem_usage=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <p>Moreover, you can directly place the model on different devices if it doesn’t fully fit in RAM (only works for inference for now). With <code>device_map=&quot;auto&quot;</code>, Accelerate will determine where to put each layer to maximize the use of your fastest devices (GPUs) and offload the rest on the CPU, or even the hard drive if you don’t have enough GPU RAM (or CPU RAM). Even if the model is split across several devices, it will run as you would normally expect.</p> <p>When passing a <code>device_map</code>, <code>low_cpu_mem_usage</code> is automatically set to <code>True</code>, so you don’t need to specify it:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSeq2SeqLM t0pp = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;bigscience/T0pp&quot;</span>, device_map=<span class="hljs-string">&quot;auto&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>You can inspect how the model was split across devices by looking at its <code>hf_device_map</code> attribute:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->t0pp.hf_device_map<!-- HTML_TAG_END --></pre></div> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->{<span class="hljs-string">&#x27;shared&#x27;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&#x27;decoder.embed_tokens&#x27;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&#x27;encoder&#x27;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&#x27;decoder.block.0&#x27;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&#x27;decoder.block.1&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.2&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.3&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.4&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.5&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.6&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.7&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.8&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.9&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.10&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.11&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.12&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.13&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.14&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.15&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.16&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.17&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.18&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.19&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.20&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.21&#x27;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;decoder.block.22&#x27;</span>: <span class="hljs-string">&#x27;cpu&#x27;</span>, <span class="hljs-string">&#x27;decoder.block.23&#x27;</span>: <span class="hljs-string">&#x27;cpu&#x27;</span>, <span class="hljs-string">&#x27;decoder.final_layer_norm&#x27;</span>: <span class="hljs-string">&#x27;cpu&#x27;</span>, <span class="hljs-string">&#x27;decoder.dropout&#x27;</span>: <span class="hljs-string">&#x27;cpu&#x27;</span>, <span class="hljs-string">&#x27;lm_head&#x27;</span>: <span class="hljs-string">&#x27;cpu&#x27;</span>}<!-- HTML_TAG_END --></pre></div> <p>You can also write your own device map following the same format (a dictionary layer name to device). It should map all parameters of the model to a given device, but you don’t have to detail where all the submosules of one layer go if that layer is entirely on the same device. For instance, the following device map would work properly for T0pp (as long as you have the GPU memory):</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->device_map = {<span class="hljs-string">&quot;shared&quot;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&quot;encoder&quot;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&quot;decoder&quot;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&quot;lm_head&quot;</span>: <span class="hljs-number">1</span>}<!-- HTML_TAG_END --></pre></div> <p>Another way to minimize the memory impact of your model is to instantiate it at a lower precision dtype (like <code>torch.float16</code>) or use direct quantization techniques as described below.</p> <h3 class="relative group"><a id="model-instantiation-dtype" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#model-instantiation-dtype"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Model Instantiation dtype </span></h3> <p>Under Pytorch a model normally gets instantiated with <code>torch.float32</code> format. This can be an issue if one tries to load a model whose weights are in fp16, since it’d require twice as much memory. To overcome this limitation, you can either explicitly pass the desired <code>dtype</code> using <code>torch_dtype</code> argument:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->model = T5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;t5&quot;</span>, torch_dtype=torch.float16)<!-- HTML_TAG_END --></pre></div> <p>or, if you want the model to always load in the most optimal memory pattern, you can use the special value <code>&quot;auto&quot;</code>, and then <code>dtype</code> will be automatically derived from the model’s weights:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->model = T5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;t5&quot;</span>, torch_dtype=<span class="hljs-string">&quot;auto&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Models instantiated from scratch can also be told which <code>dtype</code> to use with:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->config = T5Config.from_pretrained(<span class="hljs-string">&quot;t5&quot;</span>) model = AutoModel.from_config(config)<!-- HTML_TAG_END --></pre></div> <p>Due to Pytorch design, this functionality is only available for floating dtypes.</p> <h2 class="relative group"><a id="transformers.modeling_utils.ModuleUtilsMixin" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.ModuleUtilsMixin"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ModuleUtilsMixin </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.ModuleUtilsMixin"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_utils.</span><span class="font-semibold">ModuleUtilsMixin</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.ModuleUtilsMixin" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.ModuleUtilsMixin"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L623" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A few utilities for <code>torch.nn.Modules</code>, to be used as a mixin.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.ModuleUtilsMixin.add_memory_hooks"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>add_memory_hooks</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.ModuleUtilsMixin.add_memory_hooks" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.ModuleUtilsMixin.add_memory_hooks"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L654" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Add a memory hook before and after each sub-module forward pass to record increase in memory consumption.</p> <p>Increase in memory consumption is stored in a <code>mem_rss_diff</code> attribute for each module and can be reset to zero with <code>model.reset_memory_hooks_state()</code>.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.ModuleUtilsMixin.estimate_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>estimate_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.ModuleUtilsMixin.estimate_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.ModuleUtilsMixin.estimate_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L858" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_dict<span class="opacity-60">: typing.Dict[str, typing.Union[torch.Tensor, typing.Any]]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.ModuleUtilsMixin.estimate_tokens.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.ModuleUtilsMixin.estimate_tokens.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>dict</code>) &#x2014; The model inputs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.modeling_utils.ModuleUtilsMixin.estimate_tokens.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The total number of tokens.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Helper function to estimate the total number of tokens from the model inputs.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.ModuleUtilsMixin.floating_point_ops"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>floating_point_ops</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.ModuleUtilsMixin.floating_point_ops" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.ModuleUtilsMixin.floating_point_ops"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L879" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_dict<span class="opacity-60">: typing.Dict[str, typing.Union[torch.Tensor, typing.Any]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">exclude_embeddings<span class="opacity-60">: bool = True</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.ModuleUtilsMixin.floating_point_ops.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.ModuleUtilsMixin.floating_point_ops.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>) &#x2014; The batch size for the forward pass.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.ModuleUtilsMixin.floating_point_ops.sequence_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.ModuleUtilsMixin.floating_point_ops.sequence_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequence_length</strong> (<code>int</code>) &#x2014; The number of tokens in each line of the batch.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.ModuleUtilsMixin.floating_point_ops.exclude_embeddings" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.ModuleUtilsMixin.floating_point_ops.exclude_embeddings"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>exclude_embeddings</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to count embedding and softmax operations.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.modeling_utils.ModuleUtilsMixin.floating_point_ops.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The number of floating-point operations.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Get number of (optionally, non-embeddings) floating-point operations for the forward and backward passes of a batch with this transformer model. Default approximation neglects the quadratic dependency on the number of tokens (valid if <code>12 * d_model &lt;&lt; sequence_length</code>) as laid out in <a href="https://arxiv.org/pdf/2001.08361.pdf" rel="nofollow">this paper</a> section 2.1. Should be overridden for transformers with parameter re-use e.g. Albert or Universal Transformers, or if doing long-range modeling with very high sequence lengths.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.ModuleUtilsMixin.get_extended_attention_mask"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_extended_attention_mask</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.ModuleUtilsMixin.get_extended_attention_mask" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.ModuleUtilsMixin.get_extended_attention_mask"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L742" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_shape<span class="opacity-60">: typing.Tuple[int]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">device<span class="opacity-60">: &lt;property object at 0x7f04003b2810&gt; = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dtype<span class="opacity-60">: torch.float32 = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.ModuleUtilsMixin.get_extended_attention_mask.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.ModuleUtilsMixin.get_extended_attention_mask.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.Tensor</code>) &#x2014; Mask with ones indicating tokens to attend to, zeros for tokens to ignore.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.ModuleUtilsMixin.get_extended_attention_mask.input_shape" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.ModuleUtilsMixin.get_extended_attention_mask.input_shape"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_shape</strong> (<code>Tuple[int]</code>) &#x2014; The shape of the input to the model.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Makes broadcastable attention and causal masks so that future and masked tokens are ignored.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.ModuleUtilsMixin.get_head_mask"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_head_mask</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.ModuleUtilsMixin.get_head_mask" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.ModuleUtilsMixin.get_head_mask"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L794" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Optional[torch.Tensor]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_hidden_layers<span class="opacity-60">: int</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_attention_chunked<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.ModuleUtilsMixin.get_head_mask.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.ModuleUtilsMixin.get_head_mask.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.Tensor</code> with shape <code>[num_heads]</code> or <code>[num_hidden_layers x num_heads]</code>, <em>optional</em>) &#x2014; The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.ModuleUtilsMixin.get_head_mask.num_hidden_layers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.ModuleUtilsMixin.get_head_mask.num_hidden_layers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_hidden_layers</strong> (<code>int</code>) &#x2014; The number of hidden layers in the model. is_attention_chunked &#x2014; (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>): Whether or not the attentions scores are computed by chunks or not.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Prepare the head mask if needed.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.ModuleUtilsMixin.invert_attention_mask"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>invert_attention_mask</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.ModuleUtilsMixin.invert_attention_mask" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.ModuleUtilsMixin.invert_attention_mask"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L690" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60">: Tensor</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>torch.Tensor</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.ModuleUtilsMixin.invert_attention_mask.encoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.ModuleUtilsMixin.invert_attention_mask.encoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_mask</strong> (<code>torch.Tensor</code>) &#x2014; An attention mask.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.modeling_utils.ModuleUtilsMixin.invert_attention_mask.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>torch.Tensor</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The inverted attention mask.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Invert an attention mask (e.g., switches 0. and 1.).</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.ModuleUtilsMixin.num_parameters"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>num_parameters</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.ModuleUtilsMixin.num_parameters" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.ModuleUtilsMixin.num_parameters"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L832" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">only_trainable<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">exclude_embeddings<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.ModuleUtilsMixin.num_parameters.only_trainable" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.ModuleUtilsMixin.num_parameters.only_trainable"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>only_trainable</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return only the number of trainable parameters<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.ModuleUtilsMixin.num_parameters.exclude_embeddings" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.ModuleUtilsMixin.num_parameters.exclude_embeddings"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>exclude_embeddings</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return only the number of non-embeddings parameters<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.modeling_utils.ModuleUtilsMixin.num_parameters.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The number of parameters.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Get number of (optionally, trainable or non-embeddings) parameters in the module.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.ModuleUtilsMixin.reset_memory_hooks_state"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>reset_memory_hooks_state</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.ModuleUtilsMixin.reset_memory_hooks_state" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.ModuleUtilsMixin.reset_memory_hooks_state"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L666" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Reset the <code>mem_rss_diff</code> attribute of each module (see <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.modeling_utils.ModuleUtilsMixin.add_memory_hooks">add_memory_hooks()</a>).</p></div></div> <h2 class="relative group"><a id="transformers.TFPreTrainedModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFPreTrainedModel </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFPreTrainedModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L988" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Base class for all TF models.</p> <p><a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> takes care of storing the configuration of the models and handles methods for loading, downloading and saving models as well as a few methods common to all models to:</p> <ul><li>resize the input embeddings,</li> <li>prune heads in the self-attention heads.</li></ul> <p>Class attributes (overridden by derived classes):</p> <ul><li><strong>config_class</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) — A subclass of <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> to use as configuration class for this model architecture.</li> <li><strong>base_model_prefix</strong> (<code>str</code>) — A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.</li> <li><strong>main_input_name</strong> (<code>str</code>) — The name of the principal input to the model (often <code>input_ids</code> for NLP models, <code>pixel_values</code> for vision models and <code>input_values</code> for speech models).</li></ul> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.push_to_hub"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>push_to_hub</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.push_to_hub" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.push_to_hub"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L2642" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repo_id<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_temp_dir<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">commit_message<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">private<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_auth_token<span class="opacity-60">: typing.Union[bool, str, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_shard_size<span class="opacity-60">: typing.Union[int, str, NoneType] = &#39;10GB&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**model_card_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.push_to_hub.repo_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.push_to_hub.repo_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repo_id</strong> (<code>str</code>) &#x2014; The name of the repository you want to push your model to. It should contain your organization name when pushing to a given organization.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.push_to_hub.use_temp_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.push_to_hub.use_temp_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to use a temporary directory to store the files saved before they are pushed to the Hub. Will default to <code>True</code> if there is no directory named like <code>repo_id</code>, <code>False</code> otherwise.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.push_to_hub.commit_message" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.push_to_hub.commit_message"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;Upload model&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.push_to_hub.private" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.push_to_hub.private"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.push_to_hub.use_auth_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.push_to_hub.use_auth_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>huggingface-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.push_to_hub.max_shard_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.push_to_hub.max_shard_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_shard_size</strong> (<code>int</code> or <code>str</code>, <em>optional</em>, defaults to <code>&quot;10GB&quot;</code>) &#x2014; Only applicable for models. The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like <code>&quot;5MB&quot;</code>). model_card_kwargs &#x2014; Additional keyword arguments passed along to the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.create_model_card">create_model_card()</a> method.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Upload the model files to the 🤗 Model Hub while synchronizing a local clone of the repo in <code>repo_path_or_name</code>.</p> <div class="relative group rounded-md"><a id="transformers.TFPreTrainedModel.push_to_hub.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.push_to_hub.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModel model = TFAutoModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the model to your namespace with the name &quot;my-finetuned-bert&quot;.</span> model.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the model to an organization with the name &quot;my-finetuned-bert&quot;.</span> model.push_to_hub(<span class="hljs-string">&quot;huggingface/my-finetuned-bert&quot;</span>)<!-- HTML_TAG_END --></pre></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.compile"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>compile</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.compile" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.compile"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1282" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">optimizer<span class="opacity-60"> = &#39;rmsprop&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60"> = &#39;passthrough&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">metrics<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss_weights<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">weighted_metrics<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">run_eagerly<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">steps_per_execution<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>This is a thin wrapper that sets the model’s loss output head as the loss if the user does not specify a loss function themselves.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.create_model_card"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>create_model_card</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.create_model_card" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.create_model_card"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1573" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_dir<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model_name<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">language<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">license<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tags<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">finetuned_from<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tasks<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dataset_tags<span class="opacity-60">: typing.Union[str, typing.List[str], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dataset<span class="opacity-60">: typing.Union[str, typing.List[str], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dataset_args<span class="opacity-60">: typing.Union[str, typing.List[str], NoneType] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.create_model_card.output_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.create_model_card.output_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_dir</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; The folder in which to create the model card.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.create_model_card.model_name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.create_model_card.model_name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; The name of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.create_model_card.language" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.create_model_card.language"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>language</strong> (<code>str</code>, <em>optional</em>) &#x2014; The language of the model (if applicable)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.create_model_card.license" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.create_model_card.license"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>license</strong> (<code>str</code>, <em>optional</em>) &#x2014; The license of the model. Will default to the license of the pretrained model used, if the original model given to the <code>Trainer</code> comes from a repo on the Hub.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.create_model_card.tags" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.create_model_card.tags"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tags</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014; Some tags to be included in the metadata of the model card.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.create_model_card.finetuned_from" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.create_model_card.finetuned_from"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>finetuned_from</strong> (<code>str</code>, <em>optional</em>) &#x2014; The name of the model used to fine-tune this one (if applicable). Will default to the name of the repo of the original model given to the <code>Trainer</code> (if it comes from the Hub).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.create_model_card.tasks" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.create_model_card.tasks"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tasks</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014; One or several task identifiers, to be included in the metadata of the model card.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.create_model_card.dataset_tags" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.create_model_card.dataset_tags"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dataset_tags</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014; One or several dataset tags, to be included in the metadata of the model card.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.create_model_card.dataset" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.create_model_card.dataset"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dataset</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014; One or several dataset identifiers, to be included in the metadata of the model card.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.create_model_card.dataset_args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.create_model_card.dataset_args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dataset_args</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014; One or several dataset arguments, to be included in the metadata of the model card.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Creates a draft of a model card using the information available to the <code>Trainer</code>.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.from_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.from_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.from_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L2215" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pretrained_model_name_or_path<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*model_args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.from_pretrained.pretrained_model_name_or_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.from_pretrained.pretrained_model_name_or_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pretrained_model_name_or_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> <li>A path or url to a <em>PyTorch state_dict save file</em> (e.g, <code>./pt_model/pytorch_model.bin</code>). In this case, <code>from_pt</code> should be set to <code>True</code> and a configuration object should be provided as <code>config</code> argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards.</li> <li><code>None</code> if you are both providing the configuration and state dictionary (resp. with keyword arguments <code>config</code> and <code>state_dict</code>).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.from_pretrained.model_args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.from_pretrained.model_args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_args</strong> (sequence of positional arguments, <em>optional</em>) &#x2014; All remaining positional arguments will be passed to the underlying model&#x2019;s <code>__init__</code> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.from_pretrained.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.from_pretrained.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<code>Union[PretrainedConfig, str]</code>, <em>optional</em>) &#x2014; Can be either:</p> <ul> <li>an instance of a class derived from <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>,</li> <li>a string valid as input to <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig.from_pretrained">from_pretrained()</a>.</li> </ul> <p>Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.save_pretrained">save_pretrained()</a> and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <code>pretrained_model_name_or_path</code> and a configuration JSON file named <em>config.json</em> is found in the directory. from_pt &#x2014; (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>): Load the model weights from a PyTorch state_dict save file (see docstring of <code>pretrained_model_name_or_path</code> argument).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.from_pretrained.ignore_mismatched_sizes" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.from_pretrained.ignore_mismatched_sizes"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ignore_mismatched_sizes</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to raise an error if some of the weights from the checkpoint do not have the same size as the weights of the model (if for instance, you are instantiating a model with 10 labels from a checkpoint with 3 labels).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.from_pretrained.cache_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.from_pretrained.cache_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cache_dir</strong> (<code>str</code>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.from_pretrained.force_download" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.from_pretrained.force_download"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.from_pretrained.resume_download" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.from_pretrained.resume_download"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>resume_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists. proxies &#x2014; (<code>Dict[str, str], </code>optional<code>): A dictionary of proxy servers to use by protocol or endpoint, e.g., </code>{&#x2018;http&#x2019;: &#x2018;foo.bar:3128&#x2019;, &#x2018;http://hostname&#x2019;: &#x2018;foo.bar:4012&#x2019;}<code>. The proxies are used on each request. output_loading_info(</code>bool<code>, *optional*, defaults to </code>False`): Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.from_pretrained.local_files_only(bool," class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.from_pretrained.local_files_only(bool,"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>local_files_only(<code>bool</code>,</strong> <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to only look at local files (e.g., not try doanloading the model).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.from_pretrained.use_auth_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.from_pretrained.use_auth_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_auth_token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>huggingface-cli login</code> (stored in <code>~/.huggingface</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.from_pretrained.revision" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.from_pretrained.revision"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;main&quot;</code>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <code>revision</code> can be any identifier allowed by git.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.from_pretrained.mirror" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.from_pretrained.mirror"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mirror</strong> (<code>str</code>, <em>optional</em>) &#x2014; Mirror source to accelerate downloads in China. If you are from China and have an accessibility problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. Please refer to the mirror site for more information.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.from_pretrained.subfolder" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.from_pretrained.subfolder"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>subfolder</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&quot;</code>) &#x2014; In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.from_pretrained.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.from_pretrained.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (remaining dictionary of keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <code>output_attentions=True</code>). Behaves differently depending on whether a <code>config</code> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <code>config</code>, <code>**kwargs</code> will be directly passed to the underlying model&#x2019;s <code>__init__</code> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <code>kwargs</code> will be first passed to the configuration class initialization function (<a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig.from_pretrained">from_pretrained()</a>). Each key of <code>kwargs</code> that corresponds to a configuration attribute will be used to override said attribute with the supplied <code>kwargs</code> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <code>__init__</code> function.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Instantiate a pretrained TF 2.0 model from a pre-trained model configuration.</p> <p>The warning <em>Weights from XXX not initialized from pretrained model</em> means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task.</p> <p>The warning <em>Weights from XXX not used in YYY</em> means that the layer XXX is not used by YYY, therefore those weights are discarded.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Passing <code>use_auth_token=True</code> is required when you want to use a private model.</p></div> <div class="relative group rounded-md"><a id="transformers.TFPreTrainedModel.from_pretrained.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.from_pretrained.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertConfig, TFBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Model was saved using *save_pretrained(&#x27;./test/saved_model/&#x27;)* (for example purposes, not runnable).</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFBertModel.from_pretrained(<span class="hljs-string">&quot;./test/saved_model/&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> model.config.output_attentions == <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a Pytorch model file instead of a TensorFlow checkpoint (slower, for example purposes, not runnable).</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = BertConfig.from_json_file(<span class="hljs-string">&quot;./pt_model/my_pt_model_config.json&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFBertModel.from_pretrained(<span class="hljs-string">&quot;./pt_model/my_pytorch_model.bin&quot;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)<!-- HTML_TAG_END --></pre></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.get_bias"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_bias</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.get_bias" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.get_bias"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1713" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>tf.Variable</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div id="transformers.TFPreTrainedModel.get_bias.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>tf.Variable</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The weights representing the bias, None if not an LM model.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Dict of bias attached to an LM head. The key represents the name of the bias attribute.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.get_input_embeddings"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_input_embeddings</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.get_input_embeddings" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.get_input_embeddings"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1102" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>tf.Variable</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div id="transformers.TFPreTrainedModel.get_input_embeddings.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>tf.Variable</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The embeddings layer mapping vocabulary to hidden states.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Returns the model’s input embeddings layer.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.get_lm_head"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_lm_head</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.get_lm_head" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.get_lm_head"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1746" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>tf.keras.layers.Layer</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div id="transformers.TFPreTrainedModel.get_lm_head.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>tf.keras.layers.Layer</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The LM head layer if the model has one, None if not.</p> <!-- HTML_TAG_END --></p> </div></div> <p>The LM Head layer. This method must be overwritten by all the models that have a lm head.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.get_output_embeddings"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_output_embeddings</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.get_output_embeddings" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.get_output_embeddings"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1653" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>tf.Variable</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div id="transformers.TFPreTrainedModel.get_output_embeddings.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>tf.Variable</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The new weights mapping vocabulary to hidden states.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Returns the model’s output embeddings</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.get_output_layer_with_bias"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_output_layer_with_bias</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.get_output_layer_with_bias" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.get_output_layer_with_bias"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1690" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>tf.keras.layers.Layer</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div id="transformers.TFPreTrainedModel.get_output_layer_with_bias.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>tf.keras.layers.Layer</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The layer that handles the bias, None if not an LM model.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Get the layer that handles a bias attribute in case the model has an LM head with weights tied to the embeddings</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.get_prefix_bias_name"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_prefix_bias_name</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.get_prefix_bias_name" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.get_prefix_bias_name"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1703" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>str</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div id="transformers.TFPreTrainedModel.get_prefix_bias_name.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>str</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The _prefix name of the bias.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Get the concatenated _prefix name of the bias from the model name to the parent layer</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.load_repo_checkpoint"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>load_repo_checkpoint</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.load_repo_checkpoint" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.load_repo_checkpoint"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1129" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repo_path_or_name<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>dict</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.load_repo_checkpoint.repo_path_or_name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.load_repo_checkpoint.repo_path_or_name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repo_path_or_name</strong> (<code>str</code>) &#x2014; Can either be a repository name for your {object} in the Hub or a path to a local folder (in which case the repository will have the name of that local folder).<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.TFPreTrainedModel.load_repo_checkpoint.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>dict</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A dictionary of extra metadata from the checkpoint, most commonly an “epoch” count.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Loads a saved checkpoint (model weights and optimizer state) from a repo. Returns the current epoch count when the checkpoint was made.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.prepare_tf_dataset"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>prepare_tf_dataset</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.prepare_tf_dataset" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.prepare_tf_dataset"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1183" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dataset<span class="opacity-60">: datasets.Dataset</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_size<span class="opacity-60">: int = 8</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">shuffle<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: typing.Optional[ForwardRef(&#39;PreTrainedTokenizerBase&#39;)] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">collate_fn<span class="opacity-60">: typing.Optional[typing.Callable] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">collate_fn_args<span class="opacity-60">: typing.Union[typing.Dict[str, typing.Any], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">drop_remainder<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">prefetch<span class="opacity-60">: bool = True</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Dataset</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.prepare_tf_dataset.dataset" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.prepare_tf_dataset.dataset"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dataset</strong> (<code>Any</code>) &#x2014; A [~<code>datasets.Dataset</code>] to be wrapped as a <code>tf.data.Dataset</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.prepare_tf_dataset.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.prepare_tf_dataset.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, defaults to 8) &#x2014; The size of batches to return.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.prepare_tf_dataset.shuffle" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.prepare_tf_dataset.shuffle"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>shuffle</strong> (<code>bool</code>, defaults to <code>True</code>) &#x2014; Whether to return samples from the dataset in random order. Usually <code>True</code> for training datasets and <code>False</code> for validation/test datasets.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.prepare_tf_dataset.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.prepare_tf_dataset.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase">PreTrainedTokenizerBase</a>, <em>optional</em>) &#x2014; A <code>PreTrainedTokenizer</code> that will be used to pad samples to create batches. Has no effect if a specific <code>collate_fn</code> is passed instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.prepare_tf_dataset.collate_fn" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.prepare_tf_dataset.collate_fn"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>collate_fn</strong> (<code>Callable</code>, <em>optional</em>) &#x2014; A function that collates samples from the dataset into a single batch. Defaults to <code>DefaultDataCollator</code> if no <code>tokenizer</code> is supplied or <code>DataCollatorWithPadding</code> if a <code>tokenizer</code> is passed.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.prepare_tf_dataset.collate_fn_args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.prepare_tf_dataset.collate_fn_args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>collate_fn_args</strong> (<code>Dict[str, Any]</code>, <em>optional</em>) &#x2014; A dict of arguments to pass to the <code>collate_fn</code> alongside the list of samples.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.prepare_tf_dataset.drop_remainder" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.prepare_tf_dataset.drop_remainder"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>drop_remainder</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to drop the final batch, if the batch_size does not evenly divide the dataset length. Defaults to the same setting as <code>shuffle</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.prepare_tf_dataset.prefetch" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.prepare_tf_dataset.prefetch"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>prefetch</strong> (<code>bool</code>, defaults to <code>True</code>) &#x2014; Whether to add prefetching to the end of the <code>tf.data</code> pipeline. This is almost always beneficial for performance, but can be disabled in edge cases.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.TFPreTrainedModel.prepare_tf_dataset.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Dataset</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <code>tf.data.Dataset</code> which is ready to pass to the Keras API.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Wraps a HuggingFace <a href="https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset" rel="nofollow">Dataset</a> as a <code>tf.data.Dataset</code> with collation and batching. This method is designed to create a “ready-to-use” dataset that can be passed directly to Keras methods like <code>fit()</code> without further modification. The method will drop columns from the dataset if they don’t match input names for the model. If you want to specify the column names to return rather than using the names that match this model, we recommend using <code>Dataset.to_tf_dataset()</code> instead.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.prune_heads"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>prune_heads</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.prune_heads" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.prune_heads"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L2082" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">heads_to_prune<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.prune_heads.heads_to_prune" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.prune_heads.heads_to_prune"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>heads_to_prune</strong> (<code>Dict[int, List[int]]</code>) &#x2014; Dictionary with keys being selected layer indices (<code>int</code>) and associated values being the list of heads to prune in said layer (list of <code>int</code>). For instance {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Prunes heads of the base model.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.register_for_auto_class"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>register_for_auto_class</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.register_for_auto_class" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.register_for_auto_class"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L2732" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">auto_class<span class="opacity-60"> = &#39;TFAutoModel&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.register_for_auto_class.auto_class" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.register_for_auto_class.auto_class"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>auto_class</strong> (<code>str</code> or <code>type</code>, <em>optional</em>, defaults to <code>&quot;TFAutoModel&quot;</code>) &#x2014; The auto class to register this new model with.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>This API is experimental and may have some slight breaking changes in the next releases.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.resize_token_embeddings"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>resize_token_embeddings</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.resize_token_embeddings" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.resize_token_embeddings"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1755" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">new_num_tokens<span class="opacity-60">: typing.Optional[int] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>tf.Variable</code> or <code>tf.keras.layers.Embedding</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.resize_token_embeddings.new_num_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.resize_token_embeddings.new_num_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>new_num_tokens</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of new tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or <code>None</code>, just returns a pointer to the input tokens without doing anything.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.TFPreTrainedModel.resize_token_embeddings.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>tf.Variable</code> or <code>tf.keras.layers.Embedding</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Pointer to the input tokens of the model.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Resizes input token embeddings matrix of the model if <code>new_num_tokens != config.vocab_size</code>.</p> <p>Takes care of tying weights embeddings afterwards if the model class has a <code>tie_weights()</code> method.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.save_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.save_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.save_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L2094" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_directory<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">saved_model<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">version<span class="opacity-60"> = 1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_shard_size<span class="opacity-60">: typing.Union[int, str] = &#39;10GB&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">create_pr<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.save_pretrained.save_directory" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.save_pretrained.save_directory"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_directory</strong> (<code>str</code>) &#x2014; Directory to which to save. Will be created if it doesn&#x2019;t exist.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.save_pretrained.saved_model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.save_pretrained.saved_model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>saved_model</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If the model has to be saved in saved model format as well or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.save_pretrained.version" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.save_pretrained.version"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>version</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The version of the saved model. A saved model needs to be versioned in order to be properly loaded by TensorFlow Serving as detailed in the official documentation <a href="https://www.tensorflow.org/tfx/serving/serving_basic" rel="nofollow">https://www.tensorflow.org/tfx/serving/serving_basic</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.save_pretrained.push_to_hub" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.save_pretrained.push_to_hub"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with <code>repo_id</code> (will default to the name of <code>save_directory</code> in your namespace).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.save_pretrained.max_shard_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.save_pretrained.max_shard_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_shard_size</strong> (<code>int</code> or <code>str</code>, <em>optional</em>, defaults to <code>&quot;10GB&quot;</code>) &#x2014; The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like <code>&quot;5MB&quot;</code>).</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>If a single weight of the model is bigger than <code>max_shard_size</code>, it will be in its own checkpoint shard which will be bigger than <code>max_shard_size</code>.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.save_pretrained.create_pr" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.save_pretrained.create_pr"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>create_pr</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to create a PR with the uploaded files or directly commit.</p> <p>kwargs &#x2014; Additional key word arguments passed along to the <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.push_to_hub">push_to_hub()</a> method.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Save a model and its configuration file to a directory, so that it can be re-loaded using the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> class method.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.serving"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>serving</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.serving" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.serving"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> </span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.serving.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.serving.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>Dict[str, tf.Tensor]</code>) &#x2014; The input of the saved model as a dictionary of tensors.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Method used for serving the model.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.serving_output"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>serving_output</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.serving_output" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.serving_output"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1092" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.serving_output.output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.serving_output.output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output</strong> (<code>TFBaseModelOutput</code>) &#x2014; The output returned by the model.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Prepare the output of the saved model. Each model must implement this function.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.set_bias"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>set_bias</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.set_bias" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.set_bias"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1730" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">value<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.set_bias.value" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.set_bias.value"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>value</strong> (<code>Dict[tf.Variable]</code>) &#x2014; All the new bias attached to an LM head.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Set all the bias in the LM head.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.set_input_embeddings"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>set_input_embeddings</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.set_input_embeddings" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.set_input_embeddings"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1633" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">value<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.set_input_embeddings.value" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.set_input_embeddings.value"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>value</strong> (<code>tf.Variable</code>) &#x2014; The new weights mapping hidden states to vocabulary.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Set model’s input embeddings</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.set_output_embeddings"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>set_output_embeddings</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.set_output_embeddings" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.set_output_embeddings"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1673" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">value<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.set_output_embeddings.value" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.set_output_embeddings.value"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>value</strong> (<code>tf.Variable</code>) &#x2014; The new weights mapping hidden states to vocabulary.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Set model’s output embeddings</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.test_step"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>test_step</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.test_step" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.test_step"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1470" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A modification of Keras’s default <code>train_step</code> that correctly handles matching outputs to labels for our models and supports directly training on the loss output head. In addition, it ensures input keys are copied to the labels where appropriate. It will also copy label keys into the input dict when using the dummy loss, to ensure that they are available to the model during the forward pass.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.train_step"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>train_step</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.train_step" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.train_step"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L1362" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A modification of Keras’s default <code>train_step</code> that correctly handles matching outputs to labels for our models and supports directly training on the loss output head. In addition, it ensures input keys are copied to the labels where appropriate. It will also copy label keys into the input dict when using the dummy loss, to ensure that they are available to the model during the forward pass.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_tf_utils.TFModelUtilsMixin" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_utils.TFModelUtilsMixin"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFModelUtilsMixin </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_utils.TFModelUtilsMixin"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_utils.</span><span class="font-semibold">TFModelUtilsMixin</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_utils.TFModelUtilsMixin" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_utils.TFModelUtilsMixin"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L92" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A few utilities for <code>tf.keras.Model</code>, to be used as a mixin.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_utils.TFModelUtilsMixin.num_parameters"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>num_parameters</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_utils.TFModelUtilsMixin.num_parameters" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_utils.TFModelUtilsMixin.num_parameters"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L97" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">only_trainable<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_utils.TFModelUtilsMixin.num_parameters.only_trainable" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_utils.TFModelUtilsMixin.num_parameters.only_trainable"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>only_trainable</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return only the number of trainable parameters<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.modeling_tf_utils.TFModelUtilsMixin.num_parameters.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The number of parameters.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Get the number of (optionally, trainable) parameters in the model.</p></div></div> <h2 class="relative group"><a id="transformers.FlaxPreTrainedModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxPreTrainedModel </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxPreTrainedModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxPreTrainedModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxPreTrainedModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxPreTrainedModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_utils.py#L159" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: PretrainedConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">module<span class="opacity-60">: Module</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_shape<span class="opacity-60">: typing.Tuple = (1, 1)</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dtype<span class="opacity-60">: dtype = &lt;class &#39;jax.numpy.float32&#39;&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">_do_init<span class="opacity-60">: bool = True</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Base class for all models.</p> <p><a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.FlaxPreTrainedModel">FlaxPreTrainedModel</a> takes care of storing the configuration of the models and handles methods for loading, downloading and saving models.</p> <p>Class attributes (overridden by derived classes):</p> <ul><li><strong>config_class</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) — A subclass of <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> to use as configuration class for this model architecture.</li> <li><strong>base_model_prefix</strong> (<code>str</code>) — A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.</li> <li><strong>main_input_name</strong> (<code>str</code>) — The name of the principal input to the model (often <code>input_ids</code> for NLP models, <code>pixel_values</code> for vision models and <code>input_values</code> for speech models).</li></ul> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxPreTrainedModel.push_to_hub"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>push_to_hub</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxPreTrainedModel.push_to_hub" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxPreTrainedModel.push_to_hub"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/hub.py#L712" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repo_id<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_temp_dir<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">commit_message<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">private<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_auth_token<span class="opacity-60">: typing.Union[bool, str, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_shard_size<span class="opacity-60">: typing.Union[int, str, NoneType] = &#39;10GB&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">create_pr<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**deprecated_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.push_to_hub.repo_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.push_to_hub.repo_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repo_id</strong> (<code>str</code>) &#x2014; The name of the repository you want to push your model to. It should contain your organization name when pushing to a given organization.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.push_to_hub.use_temp_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.push_to_hub.use_temp_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to use a temporary directory to store the files saved before they are pushed to the Hub. Will default to <code>True</code> if there is no directory named like <code>repo_id</code>, <code>False</code> otherwise.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.push_to_hub.commit_message" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.push_to_hub.commit_message"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;Upload model&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.push_to_hub.private" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.push_to_hub.private"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.push_to_hub.use_auth_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.push_to_hub.use_auth_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>huggingface-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.push_to_hub.max_shard_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.push_to_hub.max_shard_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_shard_size</strong> (<code>int</code> or <code>str</code>, <em>optional</em>, defaults to <code>&quot;10GB&quot;</code>) &#x2014; Only applicable for models. The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like <code>&quot;5MB&quot;</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.push_to_hub.create_pr" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.push_to_hub.create_pr"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>create_pr</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to create a PR with the uploaded files or directly commit.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Upload the model checkpoint to the 🤗 Model Hub while synchronizing a local clone of the repo in <code>repo_path_or_name</code>.</p> <div class="relative group rounded-md"><a id="transformers.FlaxPreTrainedModel.push_to_hub.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.push_to_hub.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaxAutoModel model = FlaxAutoModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the model to your namespace with the name &quot;my-finetuned-bert&quot;.</span> model.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the model to an organization with the name &quot;my-finetuned-bert&quot;.</span> model.push_to_hub(<span class="hljs-string">&quot;huggingface/my-finetuned-bert&quot;</span>)<!-- HTML_TAG_END --></pre></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxPreTrainedModel.from_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxPreTrainedModel.from_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxPreTrainedModel.from_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_utils.py#L472" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pretrained_model_name_or_path<span class="opacity-60">: typing.Union[str, os.PathLike]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dtype<span class="opacity-60">: dtype = &lt;class &#39;jax.numpy.float32&#39;&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*model_args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.from_pretrained.pretrained_model_name_or_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.from_pretrained.pretrained_model_name_or_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.FlaxPreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> <li>A path or url to a <em>pt index checkpoint file</em> (e.g, <code>./tf_model/model.ckpt.index</code>). In this case, <code>from_pt</code> should be set to <code>True</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.from_pretrained.dtype" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.from_pretrained.dtype"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.from_pretrained.model_args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.from_pretrained.model_args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_args</strong> (sequence of positional arguments, <em>optional</em>) &#x2014; All remaining positional arguments will be passed to the underlying model&#x2019;s <code>__init__</code> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.from_pretrained.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.from_pretrained.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<code>Union[PretrainedConfig, str, os.PathLike]</code>, <em>optional</em>) &#x2014; Can be either:</p> <ul> <li>an instance of a class derived from <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>,</li> <li>a string or path valid as input to <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig.from_pretrained">from_pretrained()</a>.</li> </ul> <p>Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:</p> <ul> <li>The model is a model provided by the library (loaded with the <em>model id</em> string of a pretrained model).</li> <li>The model was saved using <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.save_pretrained">save_pretrained()</a> and is reloaded by supplying the save directory.</li> <li>The model is loaded by supplying a local directory as <code>pretrained_model_name_or_path</code> and a configuration JSON file named <em>config.json</em> is found in the directory.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.from_pretrained.cache_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.from_pretrained.cache_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cache_dir</strong> (<code>Union[str, os.PathLike]</code>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.from_pretrained.from_pt" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.from_pretrained.from_pt"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>from_pt</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Load the model weights from a PyTorch checkpoint save file (see docstring of <code>pretrained_model_name_or_path</code> argument).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.from_pretrained.ignore_mismatched_sizes" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.from_pretrained.ignore_mismatched_sizes"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ignore_mismatched_sizes</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to raise an error if some of the weights from the checkpoint do not have the same size as the weights of the model (if for instance, you are instantiating a model with 10 labels from a checkpoint with 3 labels).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.from_pretrained.force_download" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.from_pretrained.force_download"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.from_pretrained.resume_download" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.from_pretrained.resume_download"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>resume_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.from_pretrained.proxies" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.from_pretrained.proxies"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>proxies</strong> (<code>Dict[str, str]</code>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <code>{&apos;http&apos;: &apos;foo.bar:3128&apos;, &apos;http://hostname&apos;: &apos;foo.bar:4012&apos;}</code>. The proxies are used on each request.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.from_pretrained.local_files_only(bool," class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.from_pretrained.local_files_only(bool,"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>local_files_only(<code>bool</code>,</strong> <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to only look at local files (i.e., do not try to download the model).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.from_pretrained.revision" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.from_pretrained.revision"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;main&quot;</code>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <code>revision</code> can be any identifier allowed by git.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.from_pretrained.subfolder" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.from_pretrained.subfolder"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>subfolder</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&quot;</code>) &#x2014; In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.from_pretrained.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.from_pretrained.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (remaining dictionary of keyword arguments, <em>optional</em>) &#x2014; Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., <code>output_attentions=True</code>). Behaves differently depending on whether a <code>config</code> is provided or automatically loaded:</p> <ul> <li>If a configuration is provided with <code>config</code>, <code>**kwargs</code> will be directly passed to the underlying model&#x2019;s <code>__init__</code> method (we assume all relevant updates to the configuration have already been done)</li> <li>If a configuration is not provided, <code>kwargs</code> will be first passed to the configuration class initialization function (<a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig.from_pretrained">from_pretrained()</a>). Each key of <code>kwargs</code> that corresponds to a configuration attribute will be used to override said attribute with the supplied <code>kwargs</code> value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model&#x2019;s <code>__init__</code> function.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Instantiate a pretrained flax model from a pre-trained model configuration.</p> <p>The warning <em>Weights from XXX not initialized from pretrained model</em> means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task.</p> <p>The warning <em>Weights from XXX not used in YYY</em> means that the layer XXX is not used by YYY, therefore those weights are discarded.</p> <div class="relative group rounded-md"><a id="transformers.FlaxPreTrainedModel.from_pretrained.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.from_pretrained.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertConfig, FlaxBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Model was saved using *save_pretrained(&#x27;./test/saved_model/&#x27;)* (for example purposes, not runnable).</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;./test/saved_model/&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a PyTorch checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable).</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = BertConfig.from_json_file(<span class="hljs-string">&quot;./pt_model/config.json&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;./pt_model/pytorch_model.bin&quot;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)<!-- HTML_TAG_END --></pre></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxPreTrainedModel.load_flax_sharded_weights"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>load_flax_sharded_weights</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxPreTrainedModel.load_flax_sharded_weights" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxPreTrainedModel.load_flax_sharded_weights"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_utils.py#L425" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">shard_files<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Dict</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.load_flax_sharded_weights.shard_files" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.load_flax_sharded_weights.shard_files"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>shard_files</strong> (<code>List[str]</code> &#x2014; The list of shard files to load.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.FlaxPreTrainedModel.load_flax_sharded_weights.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Dict</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A nested dictionary of the model parameters, in the expected format for flax models : <code>&#123;'model': &#123;'params': &#123;'...'&#125;&#125;&#125;</code>.</p> <!-- HTML_TAG_END --></p> </div></div> <p>This is the same as <code>flax.serialization.from_bytes</code> (https:lax.readthedocs.io/en/latest/_modules/flax/serialization.html#from_bytes) but for a sharded checkpoint.</p> <p>This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being loaded in the model.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxPreTrainedModel.register_for_auto_class"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>register_for_auto_class</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxPreTrainedModel.register_for_auto_class" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxPreTrainedModel.register_for_auto_class"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_utils.py#L1034" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">auto_class<span class="opacity-60"> = &#39;FlaxAutoModel&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.register_for_auto_class.auto_class" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.register_for_auto_class.auto_class"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>auto_class</strong> (<code>str</code> or <code>type</code>, <em>optional</em>, defaults to <code>&quot;FlaxAutoModel&quot;</code>) &#x2014; The auto class to register this new model with.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>This API is experimental and may have some slight breaking changes in the next releases.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxPreTrainedModel.save_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxPreTrainedModel.save_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxPreTrainedModel.save_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_utils.py#L937" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_directory<span class="opacity-60">: typing.Union[str, os.PathLike]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_shard_size<span class="opacity-60"> = &#39;10GB&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.save_pretrained.save_directory" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.save_pretrained.save_directory"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Directory to which to save. Will be created if it doesn&#x2019;t exist.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.save_pretrained.push_to_hub" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.save_pretrained.push_to_hub"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with <code>repo_id</code> (will default to the name of <code>save_directory</code> in your namespace).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.save_pretrained.max_shard_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.save_pretrained.max_shard_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_shard_size</strong> (<code>int</code> or <code>str</code>, <em>optional</em>, defaults to <code>&quot;10GB&quot;</code>) &#x2014; The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like <code>&quot;5MB&quot;</code>).</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>If a single weight of the model is bigger than <code>max_shard_size</code>, it will be in its own checkpoint shard which will be bigger than <code>max_shard_size</code>.</p> </div> <p>kwargs &#x2014; Additional key word arguments passed along to the <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.push_to_hub">push_to_hub()</a> method.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Save a model and its configuration file to a directory, so that it can be re-loaded using the <code>[from_pretrained()](/docs/transformers/pr_19429/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained)</code> class method</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxPreTrainedModel.to_bf16"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_bf16</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxPreTrainedModel.to_bf16" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxPreTrainedModel.to_bf16"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_utils.py#L320" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: typing.Union[typing.Dict, flax.core.frozen_dict.FrozenDict]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask<span class="opacity-60">: typing.Any = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.to_bf16.params" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.to_bf16.params"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>params</strong> (<code>Union[Dict, FrozenDict]</code>) &#x2014; A <code>PyTree</code> of model parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.to_bf16.mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.to_bf16.mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask</strong> (<code>Union[Dict, FrozenDict]</code>) &#x2014; A <code>PyTree</code> with same structure as the <code>params</code> tree. The leaves should be booleans, <code>True</code> for params you want to cast, and should be <code>False</code> for those you want to skip.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Cast the floating-point <code>params</code> to <code>jax.numpy.bfloat16</code>. This returns a new <code>params</code> tree and does not cast the <code>params</code> in place.</p> <p>This method can be used on TPU to explicitly convert the model parameters to bfloat16 precision to do full half-precision training or to save weights in bfloat16 for inference in order to save memory and improve speed.</p> <div class="relative group rounded-md"><a id="transformers.FlaxPreTrainedModel.to_bf16.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.to_bf16.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaxBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># By default, the model parameters will be in fp32 precision, to cast these to bfloat16 precision</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.params = model.to_bf16(model.params) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># If you want don&#x27;t want to cast certain parameters (for example layer norm bias and scale)</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># then pass the mask as follows</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> flax <span class="hljs-keyword">import</span> traverse_util <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>flat_params = traverse_util.flatten_dict(model.params) <span class="hljs-meta">&gt;&gt;&gt; </span>mask = { <span class="hljs-meta">... </span> path: (path[-<span class="hljs-number">2</span>] != (<span class="hljs-string">&quot;LayerNorm&quot;</span>, <span class="hljs-string">&quot;bias&quot;</span>) <span class="hljs-keyword">and</span> path[-<span class="hljs-number">2</span>:] != (<span class="hljs-string">&quot;LayerNorm&quot;</span>, <span class="hljs-string">&quot;scale&quot;</span>)) <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> path <span class="hljs-keyword">in</span> flat_params <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span>mask = traverse_util.unflatten_dict(mask) <span class="hljs-meta">&gt;&gt;&gt; </span>model.params = model.to_bf16(model.params, mask)<!-- HTML_TAG_END --></pre></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxPreTrainedModel.to_fp16"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_fp16</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxPreTrainedModel.to_fp16" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxPreTrainedModel.to_fp16"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_utils.py#L386" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: typing.Union[typing.Dict, flax.core.frozen_dict.FrozenDict]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask<span class="opacity-60">: typing.Any = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.to_fp16.params" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.to_fp16.params"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>params</strong> (<code>Union[Dict, FrozenDict]</code>) &#x2014; A <code>PyTree</code> of model parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.to_fp16.mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.to_fp16.mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask</strong> (<code>Union[Dict, FrozenDict]</code>) &#x2014; A <code>PyTree</code> with same structure as the <code>params</code> tree. The leaves should be booleans, <code>True</code> for params you want to cast, and should be <code>False</code> for those you want to skip<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Cast the floating-point <code>parmas</code> to <code>jax.numpy.float16</code>. This returns a new <code>params</code> tree and does not cast the <code>params</code> in place.</p> <p>This method can be used on GPU to explicitly convert the model parameters to float16 precision to do full half-precision training or to save weights in float16 for inference in order to save memory and improve speed.</p> <div class="relative group rounded-md"><a id="transformers.FlaxPreTrainedModel.to_fp16.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.to_fp16.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaxBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># By default, the model params will be in fp32, to cast these to float16</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.params = model.to_fp16(model.params) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># If you want don&#x27;t want to cast certain parameters (for example layer norm bias and scale)</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># then pass the mask as follows</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> flax <span class="hljs-keyword">import</span> traverse_util <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>flat_params = traverse_util.flatten_dict(model.params) <span class="hljs-meta">&gt;&gt;&gt; </span>mask = { <span class="hljs-meta">... </span> path: (path[-<span class="hljs-number">2</span>] != (<span class="hljs-string">&quot;LayerNorm&quot;</span>, <span class="hljs-string">&quot;bias&quot;</span>) <span class="hljs-keyword">and</span> path[-<span class="hljs-number">2</span>:] != (<span class="hljs-string">&quot;LayerNorm&quot;</span>, <span class="hljs-string">&quot;scale&quot;</span>)) <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> path <span class="hljs-keyword">in</span> flat_params <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span>mask = traverse_util.unflatten_dict(mask) <span class="hljs-meta">&gt;&gt;&gt; </span>model.params = model.to_fp16(model.params, mask)<!-- HTML_TAG_END --></pre></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxPreTrainedModel.to_fp32"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_fp32</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxPreTrainedModel.to_fp32" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxPreTrainedModel.to_fp32"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_utils.py#L359" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: typing.Union[typing.Dict, flax.core.frozen_dict.FrozenDict]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask<span class="opacity-60">: typing.Any = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.to_fp32.params" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.to_fp32.params"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>params</strong> (<code>Union[Dict, FrozenDict]</code>) &#x2014; A <code>PyTree</code> of model parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.to_fp32.mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.to_fp32.mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask</strong> (<code>Union[Dict, FrozenDict]</code>) &#x2014; A <code>PyTree</code> with same structure as the <code>params</code> tree. The leaves should be booleans, <code>True</code> for params you want to cast, and should be <code>False</code> for those you want to skip<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Cast the floating-point <code>parmas</code> to <code>jax.numpy.float32</code>. This method can be used to explicitly convert the model parameters to fp32 precision. This returns a new <code>params</code> tree and does not cast the <code>params</code> in place.</p> <div class="relative group rounded-md"><a id="transformers.FlaxPreTrainedModel.to_fp32.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.to_fp32.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaxBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># By default, the model params will be in fp32, to illustrate the use of this method,</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># we&#x27;ll first cast to fp16 and back to fp32</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.params = model.to_f16(model.params) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># now cast back to fp32</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.params = model.to_fp32(model.params)<!-- HTML_TAG_END --></pre></div></div></div></div> <h2 class="relative group"><a id="transformers.utils.PushToHubMixin" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.utils.PushToHubMixin"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Pushing to the Hub </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.PushToHubMixin"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.utils.</span><span class="font-semibold">PushToHubMixin</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.utils.PushToHubMixin" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.PushToHubMixin"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/hub.py#L627" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A Mixin containing the functionality to push a model or tokenizer to the hub.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.PushToHubMixin.push_to_hub"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>push_to_hub</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.PushToHubMixin.push_to_hub" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.PushToHubMixin.push_to_hub"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/hub.py#L712" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repo_id<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_temp_dir<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">commit_message<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">private<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_auth_token<span class="opacity-60">: typing.Union[bool, str, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_shard_size<span class="opacity-60">: typing.Union[int, str, NoneType] = &#39;10GB&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">create_pr<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**deprecated_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.utils.PushToHubMixin.push_to_hub.repo_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.utils.PushToHubMixin.push_to_hub.repo_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repo_id</strong> (<code>str</code>) &#x2014; The name of the repository you want to push your {object} to. It should contain your organization name when pushing to a given organization.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.utils.PushToHubMixin.push_to_hub.use_temp_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.utils.PushToHubMixin.push_to_hub.use_temp_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to use a temporary directory to store the files saved before they are pushed to the Hub. Will default to <code>True</code> if there is no directory named like <code>repo_id</code>, <code>False</code> otherwise.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.utils.PushToHubMixin.push_to_hub.commit_message" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.utils.PushToHubMixin.push_to_hub.commit_message"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;Upload {object}&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.utils.PushToHubMixin.push_to_hub.private" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.utils.PushToHubMixin.push_to_hub.private"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.utils.PushToHubMixin.push_to_hub.use_auth_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.utils.PushToHubMixin.push_to_hub.use_auth_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>huggingface-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.utils.PushToHubMixin.push_to_hub.max_shard_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.utils.PushToHubMixin.push_to_hub.max_shard_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_shard_size</strong> (<code>int</code> or <code>str</code>, <em>optional</em>, defaults to <code>&quot;10GB&quot;</code>) &#x2014; Only applicable for models. The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like <code>&quot;5MB&quot;</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.utils.PushToHubMixin.push_to_hub.create_pr" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.utils.PushToHubMixin.push_to_hub.create_pr"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>create_pr</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to create a PR with the uploaded files or directly commit.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Upload the {object_files} to the 🤗 Model Hub while synchronizing a local clone of the repo in <code>repo_path_or_name</code>.</p> <div class="relative group rounded-md"><a id="transformers.utils.PushToHubMixin.push_to_hub.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.utils.PushToHubMixin.push_to_hub.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> {object_class} {<span class="hljs-built_in">object</span>} = {object_class}.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the {object} to your namespace with the name &quot;my-finetuned-bert&quot;.</span> {<span class="hljs-built_in">object</span>}.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the {object} to an organization with the name &quot;my-finetuned-bert&quot;.</span> {<span class="hljs-built_in">object</span>}.push_to_hub(<span class="hljs-string">&quot;huggingface/my-finetuned-bert&quot;</span>)<!-- HTML_TAG_END --></pre></div></div></div></div> <h2 class="relative group"><a id="transformers.modeling_utils.load_sharded_checkpoint" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.load_sharded_checkpoint"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Sharded checkpoints </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.load_sharded_checkpoint"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.modeling_utils.load_sharded_checkpoint</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.load_sharded_checkpoint" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.load_sharded_checkpoint"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L323" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">folder<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">strict<span class="opacity-60"> = True</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>NamedTuple</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.load_sharded_checkpoint.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.load_sharded_checkpoint.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<code>torch.nn.Module</code>) &#x2014; The model in which to load the checkpoint.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.load_sharded_checkpoint.folder" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.load_sharded_checkpoint.folder"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>folder</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; A path to a folder containing the sharded checkpoint.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.load_sharded_checkpoint.strict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.load_sharded_checkpoint.strict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>strict</strong> (<code>bool</code>, *optional<code>, defaults to </code>True`) &#x2014; Whether to strictly enforce that the keys in the model state dict match the keys in the sharded checkpoint.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.modeling_utils.load_sharded_checkpoint.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>NamedTuple</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A named tuple with <code>missing_keys</code> and <code>unexpected_keys</code> fields</p> <ul> <li><code>missing_keys</code> is a list of str containing the missing keys</li> <li><code>unexpected_keys</code> is a list of str containing the unexpected keys</li> </ul> <!-- HTML_TAG_END --></p> </div></div> <p>This is the same as <a href="https://pytorch.org/docs/stable/generated/torch.nn.Module.html?highlight=load_state_dict#torch.nn.Module.load_state_dict" rel="nofollow"><code>torch.nn.Module.load_state_dict</code></a> but for a sharded checkpoint.</p> <p>This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being loaded in the model.</p></div> <script type="module" data-hydrate="h5945p"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="h5945p"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/main_classes/model.mdx-hf-doc-builder.js") ], params: {} } }); </script>
48
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/main_classes/configuration.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;configuration&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.PretrainedConfig&quot;,&quot;title&quot;:&quot;PretrainedConfig&quot;}],&quot;title&quot;:&quot;Configuration&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/main_classes/configuration.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Docstring-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/ExampleCodeBlock-hf-doc-builder.js"> <h1 class="relative group"><a id="configuration" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#configuration"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Configuration </span></h1> <p>The base class <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> implements the common methods for loading/saving a configuration either from a local file or directory, or from a pretrained model configuration provided by the library (downloaded from HuggingFace’s AWS S3 repository).</p> <p>Each derived config class implements model specific attributes. Common attributes present in all config classes are: <code>hidden_size</code>, <code>num_attention_heads</code>, and <code>num_hidden_layers</code>. Text models further implement: <code>vocab_size</code>.</p> <h2 class="relative group"><a id="transformers.PretrainedConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PretrainedConfig </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PretrainedConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PretrainedConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PretrainedConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PretrainedConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/configuration_utils.py#L48" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.name_or_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.name_or_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>name_or_path</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&quot;</code>) &#x2014; Store the string that was passed to <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">PreTrainedModel.from_pretrained()</a> or <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">TFPreTrainedModel.from_pretrained()</a> as <code>pretrained_model_name_or_path</code> if the configuration was created with such a method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the model should return all hidden-states.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the model should returns all attentions.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.is_encoder_decoder" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.is_encoder_decoder"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_encoder_decoder</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the model is used as an encoder/decoder or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.is_decoder" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.is_decoder"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_decoder</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the model is used as decoder or not (in which case it&#x2019;s used as an encoder).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.cross_attention_hidden_size**" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.cross_attention_hidden_size**"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attention_hidden_size**</strong> (<code>bool</code>, <em>optional</em>) &#x2014; The hidden size of the cross-attention layer in case the model is used as a decoder in an encoder-decoder setting and the cross-attention hidden dimension differs from <code>self.config.hidden_size</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.add_cross_attention" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.add_cross_attention"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_cross_attention</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether cross-attention layers should be added to the model. Note, this option is only relevant for models that can be used as decoder models within the <a href="/docs/transformers/pr_19429/en/model_doc/encoder-decoder#transformers.EncoderDecoderModel">EncoderDecoderModel</a> class, which consists of all models in <code>AUTO_MODELS_FOR_CAUSAL_LM</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.tie_encoder_decoder" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.tie_encoder_decoder"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tie_encoder_decoder</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether all encoder weights should be tied to their equivalent decoder weights. This requires the encoder and decoder model to have the exact same parameter names.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.prune_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.prune_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>prune_heads</strong> (<code>Dict[int, List[int]]</code>, <em>optional</em>, defaults to <code>{}</code>) &#x2014; Pruned heads of the model. The keys are the selected layer indices and the associated values, the list of heads to prune in said layer.</p> <p>For instance <code>{1: [0, 2], 2: [2, 3]}</code> will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.chunk_size_feed_forward" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.chunk_size_feed_forward"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>chunk_size_feed_forward</strong> (<code>int</code>, <em>optional</em>, defaults to <code>0</code>) &#x2014; The chunk size of all feed forward layers in the residual attention blocks. A chunk size of <code>0</code> means that the feed forward layer is not chunked. A chunk size of n means that the feed forward layer processes <code>n</code> &lt; sequence_length embeddings at a time. For more information on feed forward chunking, see <a href="../glossary.html#feed-forward-chunking">How does Feed Forward Chunking work?</a>.<!-- HTML_TAG_END --> </span></span> </li></ul> <p class="flex items-center font-semibold">Parameters for sequence generation <span class="flex-auto border-t-2 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; Maximum length that will be used by default in the <code>generate</code> method of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.min_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.min_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; Minimum length that will be used by default in the <code>generate</code> method of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.do_sample" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.do_sample"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_sample</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag that will be used by default in the <code>generate</code> method of the model. Whether or not to use sampling ; use greedy decoding otherwise.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.early_stopping" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.early_stopping"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>early_stopping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag that will be used by default in the <code>generate</code> method of the model. Whether to stop the beam search when at least <code>num_beams</code> sentences are finished per batch or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.num_beams" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.num_beams"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_beams</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of beams for beam search that will be used by default in the <code>generate</code> method of the model. 1 means no beam search.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.num_beam_groups" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.num_beam_groups"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_beam_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of groups to divide <code>num_beams</code> into in order to ensure diversity among different groups of beams that will be used by default in the <code>generate</code> method of the model. 1 means no group beam search.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.diversity_penalty" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.diversity_penalty"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>diversity_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; Value to control diversity for group beam search. that will be used by default in the <code>generate</code> method of the model. 0 means no diversity penalty. The higher the penalty, the more diverse are the outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.temperature" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.temperature"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>temperature</strong> (<code>float</code>, <em>optional</em>, defaults to 1) &#x2014; The value used to module the next token probabilities that will be used by default in the <code>generate</code> method of the model. Must be strictly positive.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.top_k" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.top_k"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to 50) &#x2014; Number of highest probability vocabulary tokens to keep for top-k-filtering that will be used by default in the <code>generate</code> method of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.top_p" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.top_p"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_p</strong> (<code>float</code>, <em>optional</em>, defaults to 1) &#x2014; Value that will be used by default in the <code>generate</code> method of the model for <code>top_p</code>. If set to float &lt; 1, only the most probable tokens with probabilities that add up to <code>top_p</code> or higher are kept for generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.repetition_penalty" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.repetition_penalty"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repetition_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1) &#x2014; Parameter for repetition penalty that will be used by default in the <code>generate</code> method of the model. 1.0 means no penalty.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.length_penalty" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.length_penalty"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>length_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1) &#x2014; Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log likelihood of the sequence (i.e. negative), <code>length_penalty</code> &gt; 0.0 promotes longer sequences, while <code>length_penalty</code> &lt; 0.0 encourages shorter sequences.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.no_repeat_ngram_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.no_repeat_ngram_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>no_repeat_ngram_size</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Value that will be used by default in the &#x2014; <code>generate</code> method of the model for <code>no_repeat_ngram_size</code>. If set to int &gt; 0, all ngrams of that size can only occur once.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.encoder_no_repeat_ngram_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.encoder_no_repeat_ngram_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_no_repeat_ngram_size</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Value that will be used by &#x2014; default in the <code>generate</code> method of the model for <code>encoder_no_repeat_ngram_size</code>. If set to int &gt; 0, all ngrams of that size that occur in the <code>encoder_input_ids</code> cannot occur in the <code>decoder_input_ids</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.bad_words_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.bad_words_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bad_words_ids</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; List of token ids that are not allowed to be generated that will be used by default in the <code>generate</code> method of the model. In order to get the tokens of the words that should not appear in the generated text, use <code>tokenizer.encode(bad_word, add_prefix_space=True)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.num_return_sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.num_return_sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_return_sequences</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of independently computed returned sequences for each element in the batch that will be used by default in the <code>generate</code> method of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.output_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.output_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the model should return the logits when used for generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.return_dict_in_generate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.return_dict_in_generate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the model should return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a <code>torch.LongTensor</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.forced_bos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.forced_bos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>forced_bos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the token to force as the first generated token after the <code>decoder_start_token_id</code>. Useful for multilingual models like <a href="../model_doc/mbart">mBART</a> where the first generated token needs to be the target language token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.forced_eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.forced_eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>forced_eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.remove_invalid_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.remove_invalid_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>remove_invalid_values</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to remove possible <em>nan</em> and <em>inf</em> outputs of the model to prevent the generation method to crash. Note that using <code>remove_invalid_values</code> can slow down generation.<!-- HTML_TAG_END --> </span></span> </li> </ul><p class="flex items-center font-semibold">Parameters for fine-tuning tasks <span class="flex-auto border-t-2 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.architectures" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.architectures"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>architectures</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; Model architectures that can be used with the model pretrained weights.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.finetuning_task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.finetuning_task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>finetuning_task</strong> (<code>str</code>, <em>optional</em>) &#x2014; Name of the task used to fine-tune the model. This can be used when converting from an original (TensorFlow or PyTorch) checkpoint.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.id2label" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.id2label"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>id2label</strong> (<code>Dict[int, str]</code>, <em>optional</em>) &#x2014; A map from index (for instance prediction index, or target index) to label.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.label2id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.label2id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>label2id</strong> (<code>Dict[str, int]</code>, <em>optional</em>) &#x2014; A map from label to index for the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.num_labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.num_labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_labels</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of labels to use in the last layer added to the model, typically for a classification task.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.task_specific_params" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.task_specific_params"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task_specific_params</strong> (<code>Dict[str, Any]</code>, <em>optional</em>) &#x2014; Additional keyword arguments to store for the current task.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.problem_type" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.problem_type"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>problem_type</strong> (<code>str</code>, <em>optional</em>) &#x2014; Problem type for <code>XxxForSequenceClassification</code> models. Can be one of <code>&quot;regression&quot;</code>, <code>&quot;single_label_classification&quot;</code> or <code>&quot;multi_label_classification&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li> </ul><p class="flex items-center font-semibold">Parameters linked to the tokenizer <span class="flex-auto border-t-2 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.tokenizer_class" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.tokenizer_class"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer_class</strong> (<code>str</code>, <em>optional</em>) &#x2014; The name of the associated tokenizer class to use (if none is set, will use the tokenizer associated to the model by default).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.prefix" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.prefix"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>prefix</strong> (<code>str</code>, <em>optional</em>) &#x2014; A specific prompt that should be added at the beginning of each text before calling the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.bos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.bos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>beginning-of-stream</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-stream</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.decoder_start_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.decoder_start_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_start_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; If an encoder-decoder model starts decoding with a different token than <em>bos</em>, the id of that token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.sep_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.sep_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sep_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>separation</em> token.<!-- HTML_TAG_END --> </span></span> </li> </ul><p class="flex items-center font-semibold">PyTorch specific parameters <span class="flex-auto border-t-2 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.torchscript" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.torchscript"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>torchscript</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the model should be used with Torchscript.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.tie_word_embeddings" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.tie_word_embeddings"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tie_word_embeddings</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether the model&#x2019;s input and output word embeddings should be tied. Note that this is only relevant if the model has a output word embedding layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.torch_dtype" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.torch_dtype"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>torch_dtype</strong> (<code>str</code>, <em>optional</em>) &#x2014; The <code>dtype</code> of the weights. This attribute can be used to initialize the model to a non-default <code>dtype</code> (which is normally <code>float32</code>) and thus allow for optimal storage allocation. For example, if the saved model is <code>float16</code>, ideally we want to load it back using the minimal amount of memory needed to load <code>float16</code> weights. Since the config object is stored in plain text, this attribute contains just the floating type string without the <code>torch.</code> prefix. For example, for <code>torch.float16</code> `<code>torch_dtype</code> is the <code>&quot;float16&quot;</code> string.</p> <p>This attribute is currently not being used during model loading time, but this may change in the future versions. But we can already start preparing for the future by saving the dtype with save_pretrained.<!-- HTML_TAG_END --> </span></span> </li> </ul><p class="flex items-center font-semibold">TensorFlow specific parameters <span class="flex-auto border-t-2 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.use_bfloat16" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.use_bfloat16"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_bfloat16</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the model should use BFloat16 scalars (only used by some TensorFlow models).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.tf_legacy_loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.tf_legacy_loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tf_legacy_loss</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the model should use legacy TensorFlow losses. Legacy losses have variable output shapes and may not be XLA-compatible. This option is here for backward compatibility and will be removed in Transformers v5.<!-- HTML_TAG_END --> </span></span> </li> </ul> </div></div> <p>Base class for all configuration classes. Handles a few parameters common to all models’ configurations as well as methods for loading/downloading/saving configurations.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to initialize a model does <strong>not</strong> load the model weights. It only affects the model’s configuration.</p></div> <p>Class attributes (overridden by derived classes):</p> <ul><li><strong>model_type</strong> (<code>str</code>) — An identifier for the model type, serialized into the JSON file, and used to recreate the correct object in <a href="/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoConfig">AutoConfig</a>.</li> <li><strong>is_composition</strong> (<code>bool</code>) — Whether the config class is composed of multiple sub-configs. In this case the config has to be initialized from two or more configs of type <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> like: <a href="/docs/transformers/pr_19429/en/model_doc/encoder-decoder#transformers.EncoderDecoderConfig">EncoderDecoderConfig</a> or <a href="/docs/transformers/pr_19429/en/model_doc/rag#transformers.RagConfig">~RagConfig</a>.</li> <li><strong>keys_to_ignore_at_inference</strong> (<code>List[str]</code>) — A list of keys to ignore by default when looking at dictionary outputs of the model during inference.</li> <li><strong>attribute_map</strong> (<code>Dict[str, str]</code>) — A dict that maps model specific attribute names to the standardized naming of attributes.</li></ul> <p>Common attributes (present in all subclasses):</p> <ul><li><strong>vocab_size</strong> (<code>int</code>) — The number of tokens in the vocabulary, which is also the first dimension of the embeddings matrix (this attribute may be missing for models that don’t have a text modality like ViT).</li> <li><strong>hidden_size</strong> (<code>int</code>) — The hidden size of the model.</li> <li><strong>num_attention_heads</strong> (<code>int</code>) — The number of attention heads used in the multi-head attention layers of the model.</li> <li><strong>num_hidden_layers</strong> (<code>int</code>) — The number of blocks in the model.</li></ul> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PretrainedConfig.push_to_hub"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>push_to_hub</span></h4><!-- HTML_TAG_END --> <a id="transformers.PretrainedConfig.push_to_hub" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PretrainedConfig.push_to_hub"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/hub.py#L712" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repo_id<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_temp_dir<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">commit_message<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">private<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_auth_token<span class="opacity-60">: typing.Union[bool, str, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_shard_size<span class="opacity-60">: typing.Union[int, str, NoneType] = &#39;10GB&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">create_pr<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**deprecated_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.push_to_hub.repo_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.push_to_hub.repo_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repo_id</strong> (<code>str</code>) &#x2014; The name of the repository you want to push your config to. It should contain your organization name when pushing to a given organization.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.push_to_hub.use_temp_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.push_to_hub.use_temp_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to use a temporary directory to store the files saved before they are pushed to the Hub. Will default to <code>True</code> if there is no directory named like <code>repo_id</code>, <code>False</code> otherwise.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.push_to_hub.commit_message" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.push_to_hub.commit_message"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;Upload config&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.push_to_hub.private" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.push_to_hub.private"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.push_to_hub.use_auth_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.push_to_hub.use_auth_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>huggingface-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.push_to_hub.max_shard_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.push_to_hub.max_shard_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_shard_size</strong> (<code>int</code> or <code>str</code>, <em>optional</em>, defaults to <code>&quot;10GB&quot;</code>) &#x2014; Only applicable for models. The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like <code>&quot;5MB&quot;</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.push_to_hub.create_pr" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.push_to_hub.create_pr"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>create_pr</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to create a PR with the uploaded files or directly commit.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Upload the configuration file to the 🤗 Model Hub while synchronizing a local clone of the repo in <code>repo_path_or_name</code>.</p> <div class="relative group rounded-md"><a id="transformers.PretrainedConfig.push_to_hub.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.push_to_hub.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig config = AutoConfig.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the config to your namespace with the name &quot;my-finetuned-bert&quot;.</span> config.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the config to an organization with the name &quot;my-finetuned-bert&quot;.</span> config.push_to_hub(<span class="hljs-string">&quot;huggingface/my-finetuned-bert&quot;</span>)<!-- HTML_TAG_END --></pre></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PretrainedConfig.dict_torch_dtype_to_str"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>dict_torch_dtype_to_str</span></h4><!-- HTML_TAG_END --> <a id="transformers.PretrainedConfig.dict_torch_dtype_to_str" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PretrainedConfig.dict_torch_dtype_to_str"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/configuration_utils.py#L873" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">d<span class="opacity-60">: typing.Dict[str, typing.Any]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Checks whether the passed dictionary and its nested dicts have a <em>torch_dtype</em> key and if it’s not None, converts torch.dtype to a string of just the type. For example, <code>torch.float32</code> get converted into <em>“float32”</em> string, which can then be stored in the json format.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PretrainedConfig.from_dict"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_dict</span></h4><!-- HTML_TAG_END --> <a id="transformers.PretrainedConfig.from_dict" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PretrainedConfig.from_dict"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/configuration_utils.py#L657" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config_dict<span class="opacity-60">: typing.Dict[str, typing.Any]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig" >PretrainedConfig</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.from_dict.config_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.from_dict.config_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config_dict</strong> (<code>Dict[str, Any]</code>) &#x2014; Dictionary that will be used to instantiate the configuration object. Such a dictionary can be retrieved from a pretrained checkpoint by leveraging the <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig.get_config_dict">get_config_dict()</a> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.from_dict.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.from_dict.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (<code>Dict[str, Any]</code>) &#x2014; Additional parameters from which to initialize the configuration object.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PretrainedConfig.from_dict.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig" >PretrainedConfig</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The configuration object instantiated from those parameters.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Instantiates a <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> from a Python dictionary of parameters.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PretrainedConfig.from_json_file"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_json_file</span></h4><!-- HTML_TAG_END --> <a id="transformers.PretrainedConfig.from_json_file" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PretrainedConfig.from_json_file"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/configuration_utils.py#L711" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">json_file<span class="opacity-60">: typing.Union[str, os.PathLike]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig" >PretrainedConfig</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.from_json_file.json_file" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.from_json_file.json_file"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>json_file</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Path to the JSON file containing the parameters.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PretrainedConfig.from_json_file.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig" >PretrainedConfig</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The configuration object instantiated from that JSON file.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Instantiates a <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> from the path to a JSON file of parameters.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PretrainedConfig.from_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.PretrainedConfig.from_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PretrainedConfig.from_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/configuration_utils.py#L454" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pretrained_model_name_or_path<span class="opacity-60">: typing.Union[str, os.PathLike]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig" >PretrainedConfig</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.from_pretrained.pretrained_model_name_or_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.from_pretrained.pretrained_model_name_or_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; This can be either:</p> <ul> <li>a string, the <em>model id</em> of a pretrained model configuration hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>a path to a <em>directory</em> containing a configuration file saved using the <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig.save_pretrained">save_pretrained()</a> method, e.g., <code>./my_model_directory/</code>.</li> <li>a path or url to a saved configuration JSON <em>file</em>, e.g., <code>./my_model_directory/configuration.json</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.from_pretrained.cache_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.from_pretrained.cache_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cache_dir</strong> (<code>str</code> or <code>os.PathLike</code>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.from_pretrained.force_download" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.from_pretrained.force_download"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to force to (re-)download the configuration files and override the cached versions if they exist.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.from_pretrained.resume_download" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.from_pretrained.resume_download"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>resume_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.from_pretrained.proxies" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.from_pretrained.proxies"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>proxies</strong> (<code>Dict[str, str]</code>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <code>{&apos;http&apos;: &apos;foo.bar:3128&apos;, &apos;http://hostname&apos;: &apos;foo.bar:4012&apos;}.</code> The proxies are used on each request.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.from_pretrained.use_auth_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.from_pretrained.use_auth_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_auth_token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>huggingface-cli login</code> (stored in <code>~/.huggingface</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.from_pretrained.revision" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.from_pretrained.revision"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;main&quot;</code>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <code>revision</code> can be any identifier allowed by git.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.from_pretrained.return_unused_kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.from_pretrained.return_unused_kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_unused_kwargs</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If <code>False</code>, then this function returns just the final configuration object.</p> <p>If <code>True</code>, then this functions returns a <code>Tuple(config, unused_kwargs)</code> where <em>unused_kwargs</em> is a dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the part of <code>kwargs</code> which has not been used to update <code>config</code> and is otherwise ignored.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.from_pretrained.subfolder" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.from_pretrained.subfolder"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>subfolder</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&quot;</code>) &#x2014; In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.from_pretrained.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.from_pretrained.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (<code>Dict[str, Any]</code>, <em>optional</em>) &#x2014; The values in kwargs of any keys which are configuration attributes will be used to override the loaded values. Behavior concerning key/value pairs whose keys are <em>not</em> configuration attributes is controlled by the <code>return_unused_kwargs</code> keyword parameter.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PretrainedConfig.from_pretrained.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig" >PretrainedConfig</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The configuration object instantiated from this pretrained model.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Instantiate a <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> (or a derived class) from a pretrained model configuration.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Passing <code>use_auth_token=True</code> is required when you want to use a private model.</p></div> <div class="relative group rounded-md"><a id="transformers.PretrainedConfig.from_pretrained.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.from_pretrained.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment"># We can&#x27;t instantiate directly the base class *PretrainedConfig* so let&#x27;s show the examples on a</span> <span class="hljs-comment"># derived class: BertConfig</span> config = BertConfig.from_pretrained( <span class="hljs-string">&quot;bert-base-uncased&quot;</span> ) <span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> config = BertConfig.from_pretrained( <span class="hljs-string">&quot;./test/saved_model/&quot;</span> ) <span class="hljs-comment"># E.g. config (or model) was saved using *save_pretrained(&#x27;./test/saved_model/&#x27;)*</span> config = BertConfig.from_pretrained(<span class="hljs-string">&quot;./test/saved_model/my_configuration.json&quot;</span>) config = BertConfig.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>, output_attentions=<span class="hljs-literal">True</span>, foo=<span class="hljs-literal">False</span>) <span class="hljs-keyword">assert</span> config.output_attentions == <span class="hljs-literal">True</span> config, unused_kwargs = BertConfig.from_pretrained( <span class="hljs-string">&quot;bert-base-uncased&quot;</span>, output_attentions=<span class="hljs-literal">True</span>, foo=<span class="hljs-literal">False</span>, return_unused_kwargs=<span class="hljs-literal">True</span> ) <span class="hljs-keyword">assert</span> config.output_attentions == <span class="hljs-literal">True</span> <span class="hljs-keyword">assert</span> unused_kwargs == {<span class="hljs-string">&quot;foo&quot;</span>: <span class="hljs-literal">False</span>}<!-- HTML_TAG_END --></pre></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PretrainedConfig.get_config_dict"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_config_dict</span></h4><!-- HTML_TAG_END --> <a id="transformers.PretrainedConfig.get_config_dict" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PretrainedConfig.get_config_dict"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/configuration_utils.py#L540" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pretrained_model_name_or_path<span class="opacity-60">: typing.Union[str, os.PathLike]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Tuple[Dict, Dict]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.get_config_dict.pretrained_model_name_or_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.get_config_dict.pretrained_model_name_or_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PretrainedConfig.get_config_dict.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Tuple[Dict, Dict]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The dictionary(ies) that will be used to instantiate the configuration object.</p> <!-- HTML_TAG_END --></p> </div></div> <p>From a <code>pretrained_model_name_or_path</code>, resolve to a dictionary of parameters, to be used for instantiating a <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> using <code>from_dict</code>.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PretrainedConfig.register_for_auto_class"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>register_for_auto_class</span></h4><!-- HTML_TAG_END --> <a id="transformers.PretrainedConfig.register_for_auto_class" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PretrainedConfig.register_for_auto_class"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/configuration_utils.py#L885" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">auto_class<span class="opacity-60"> = &#39;AutoConfig&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.register_for_auto_class.auto_class" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.register_for_auto_class.auto_class"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>auto_class</strong> (<code>str</code> or <code>type</code>, <em>optional</em>, defaults to <code>&quot;AutoConfig&quot;</code>) &#x2014; The auto class to register this new configuration with.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Register this class with a given auto class. This should only be used for custom configurations as the ones in the library are already mapped with <code>AutoConfig</code>.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>This API is experimental and may have some slight breaking changes in the next releases.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PretrainedConfig.save_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.PretrainedConfig.save_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PretrainedConfig.save_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/configuration_utils.py#L412" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_directory<span class="opacity-60">: typing.Union[str, os.PathLike]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.save_pretrained.save_directory" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.save_pretrained.save_directory"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Directory where the configuration JSON file will be saved (will be created if it does not exist).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.save_pretrained.push_to_hub" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.save_pretrained.push_to_hub"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with <code>repo_id</code> (will default to the name of <code>save_directory</code> in your namespace). kwargs &#x2014; Additional key word arguments passed along to the <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.push_to_hub">push_to_hub()</a> method.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Save a configuration object to the directory <code>save_directory</code>, so that it can be re-loaded using the <a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig.from_pretrained">from_pretrained()</a> class method.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PretrainedConfig.to_dict"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_dict</span></h4><!-- HTML_TAG_END --> <a id="transformers.PretrainedConfig.to_dict" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PretrainedConfig.to_dict"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/configuration_utils.py#L771" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Dict[str, Any]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div id="transformers.PretrainedConfig.to_dict.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Dict[str, Any]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Dictionary of all the attributes that make up this configuration instance.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Serializes this instance to a Python dictionary.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PretrainedConfig.to_diff_dict"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_diff_dict</span></h4><!-- HTML_TAG_END --> <a id="transformers.PretrainedConfig.to_diff_dict" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PretrainedConfig.to_diff_dict"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/configuration_utils.py#L739" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Dict[str, Any]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div id="transformers.PretrainedConfig.to_diff_dict.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Dict[str, Any]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Dictionary of all the attributes that make up this configuration instance,</p> <!-- HTML_TAG_END --></p> </div></div> <p>Removes all attributes from config which correspond to the default config attributes for better readability and serializes to a Python dictionary.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PretrainedConfig.to_json_file"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_json_file</span></h4><!-- HTML_TAG_END --> <a id="transformers.PretrainedConfig.to_json_file" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PretrainedConfig.to_json_file"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/configuration_utils.py#L811" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">json_file_path<span class="opacity-60">: typing.Union[str, os.PathLike]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_diff<span class="opacity-60">: bool = True</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.to_json_file.json_file_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.to_json_file.json_file_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>json_file_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Path to the JSON file in which this configuration instance&#x2019;s parameters will be saved.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.to_json_file.use_diff" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.to_json_file.use_diff"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_diff</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, only the difference between the config instance and the default <code>PretrainedConfig()</code> is serialized to JSON file.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Save this instance to a JSON file.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PretrainedConfig.to_json_string"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_json_string</span></h4><!-- HTML_TAG_END --> <a id="transformers.PretrainedConfig.to_json_string" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PretrainedConfig.to_json_string"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/configuration_utils.py#L793" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_diff<span class="opacity-60">: bool = True</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>str</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.to_json_string.use_diff" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.to_json_string.use_diff"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_diff</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, only the difference between the config instance and the default <code>PretrainedConfig()</code> is serialized to JSON string.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PretrainedConfig.to_json_string.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>str</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>String containing all the attributes that make up this configuration instance in JSON format.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Serializes this instance to a JSON string.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PretrainedConfig.update"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>update</span></h4><!-- HTML_TAG_END --> <a id="transformers.PretrainedConfig.update" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PretrainedConfig.update"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/configuration_utils.py#L825" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config_dict<span class="opacity-60">: typing.Dict[str, typing.Any]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.update.config_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.update.config_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config_dict</strong> (<code>Dict[str, Any]</code>) &#x2014; Dictionary of attributes that should be updated for this class.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Updates attributes of this class with attributes from <code>config_dict</code>.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PretrainedConfig.update_from_string"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>update_from_string</span></h4><!-- HTML_TAG_END --> <a id="transformers.PretrainedConfig.update_from_string" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PretrainedConfig.update_from_string"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/configuration_utils.py#L835" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">update_str<span class="opacity-60">: str</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.update_from_string.update_str" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.update_from_string.update_str"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>update_str</strong> (<code>str</code>) &#x2014; String with attributes that should be updated for this class.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Updates attributes of this class with attributes from <code>update_str</code>.</p> <p>The expected format is ints, floats and strings as is, and for booleans use <code>true</code> or <code>false</code>. For example: “n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index”</p> <p>The keys to change have to already exist in the config object.</p></div></div> <script type="module" data-hydrate="1j42qde"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="1j42qde"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/main_classes/configuration.mdx-hf-doc-builder.js") ], params: {} } }); </script>
49
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/main_classes/processors.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;processors&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.ProcessorMixin&quot;,&quot;title&quot;:&quot;Multi-modal processors&quot;},{&quot;local&quot;:&quot;transformers.DataProcessor&quot;,&quot;title&quot;:&quot;Deprecated processors&quot;},{&quot;local&quot;:&quot;transformers.glue_convert_examples_to_features&quot;,&quot;title&quot;:&quot;GLUE&quot;},{&quot;local&quot;:&quot;xnli&quot;,&quot;title&quot;:&quot;XNLI&quot;},{&quot;local&quot;:&quot;squad&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.data.processors.squad.SquadProcessor&quot;,&quot;title&quot;:&quot;Processors&quot;},{&quot;local&quot;:&quot;example-usage&quot;,&quot;title&quot;:&quot;Example usage&quot;}],&quot;title&quot;:&quot;SQuAD&quot;}],&quot;title&quot;:&quot;Processors&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/main_classes/processors.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Docstring-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/ExampleCodeBlock-hf-doc-builder.js"> <h1 class="relative group"><a id="processors" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#processors"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Processors </span></h1> <p>Processors can mean two different things in the Transformers library:</p> <ul><li>the objects that pre-process inputs for multi-modal models such as <a href="../model_doc/wav2vec2">Wav2Vec2</a> (speech and text) or <a href="../model_doc/clip">CLIP</a> (text and vision)</li> <li>deprecated objects that were used in older versions of the library to preprocess data for GLUE or SQUAD.</li></ul> <h2 class="relative group"><a id="transformers.ProcessorMixin" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProcessorMixin"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Multi-modal processors </span></h2> <p>Any multi-modal model will require an object to encode or decode the data that groups several modalities (among text, vision and audio). This is handled by objects called processors, which group tokenizers (for the text modality) and feature extractors (for vision and audio).</p> <p>Those processors inherit from the following base class that implements the saving and loading functionality:</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ProcessorMixin"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ProcessorMixin</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ProcessorMixin" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ProcessorMixin"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/processing_utils.py#L43" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>This is a mixin used to provide saving/loading functionality for all processor classes.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ProcessorMixin.from_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.ProcessorMixin.from_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ProcessorMixin.from_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/processing_utils.py#L152" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pretrained_model_name_or_path<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ProcessorMixin.from_pretrained.pretrained_model_name_or_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProcessorMixin.from_pretrained.pretrained_model_name_or_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; This can be either:</p> <ul> <li>a string, the <em>model id</em> of a pretrained feature_extractor hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>a path to a <em>directory</em> containing a feature extractor file saved using the <a href="/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.save_pretrained">save_pretrained()</a> method, e.g., <code>./my_model_directory/</code>.</li> <li>a path or url to a saved feature extractor JSON <em>file</em>, e.g., <code>./my_model_directory/preprocessor_config.json</code>. **kwargs &#x2014; Additional keyword arguments passed along to both <a href="/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.from_pretrained">from_pretrained()</a> and <code>~tokenization_utils_base.PreTrainedTokenizer.from_pretrained</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Instantiate a processor associated with a pretrained model.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>This class method is simply calling the feature extractor <a href="/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.from_pretrained">from_pretrained()</a> and the tokenizer <code>~tokenization_utils_base.PreTrainedTokenizer.from_pretrained</code> methods. Please refer to the docstrings of the methods above for more information.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ProcessorMixin.push_to_hub"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>push_to_hub</span></h4><!-- HTML_TAG_END --> <a id="transformers.ProcessorMixin.push_to_hub" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ProcessorMixin.push_to_hub"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/hub.py#L712" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repo_id<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_temp_dir<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">commit_message<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">private<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_auth_token<span class="opacity-60">: typing.Union[bool, str, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_shard_size<span class="opacity-60">: typing.Union[int, str, NoneType] = &#39;10GB&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">create_pr<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**deprecated_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ProcessorMixin.push_to_hub.repo_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProcessorMixin.push_to_hub.repo_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repo_id</strong> (<code>str</code>) &#x2014; The name of the repository you want to push your processor to. It should contain your organization name when pushing to a given organization.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ProcessorMixin.push_to_hub.use_temp_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProcessorMixin.push_to_hub.use_temp_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to use a temporary directory to store the files saved before they are pushed to the Hub. Will default to <code>True</code> if there is no directory named like <code>repo_id</code>, <code>False</code> otherwise.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ProcessorMixin.push_to_hub.commit_message" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProcessorMixin.push_to_hub.commit_message"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;Upload processor&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ProcessorMixin.push_to_hub.private" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProcessorMixin.push_to_hub.private"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ProcessorMixin.push_to_hub.use_auth_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProcessorMixin.push_to_hub.use_auth_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>huggingface-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ProcessorMixin.push_to_hub.max_shard_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProcessorMixin.push_to_hub.max_shard_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_shard_size</strong> (<code>int</code> or <code>str</code>, <em>optional</em>, defaults to <code>&quot;10GB&quot;</code>) &#x2014; Only applicable for models. The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like <code>&quot;5MB&quot;</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ProcessorMixin.push_to_hub.create_pr" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProcessorMixin.push_to_hub.create_pr"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>create_pr</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to create a PR with the uploaded files or directly commit.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Upload the processor files to the 🤗 Model Hub while synchronizing a local clone of the repo in <code>repo_path_or_name</code>.</p> <div class="relative group rounded-md"><a id="transformers.ProcessorMixin.push_to_hub.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProcessorMixin.push_to_hub.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoProcessor processor = AutoProcessor.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the processor to your namespace with the name &quot;my-finetuned-bert&quot;.</span> processor.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the processor to an organization with the name &quot;my-finetuned-bert&quot;.</span> processor.push_to_hub(<span class="hljs-string">&quot;huggingface/my-finetuned-bert&quot;</span>)<!-- HTML_TAG_END --></pre></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ProcessorMixin.register_for_auto_class"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>register_for_auto_class</span></h4><!-- HTML_TAG_END --> <a id="transformers.ProcessorMixin.register_for_auto_class" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ProcessorMixin.register_for_auto_class"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/processing_utils.py#L185" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">auto_class<span class="opacity-60"> = &#39;AutoProcessor&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ProcessorMixin.register_for_auto_class.auto_class" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProcessorMixin.register_for_auto_class.auto_class"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>auto_class</strong> (<code>str</code> or <code>type</code>, <em>optional</em>, defaults to <code>&quot;AutoProcessor&quot;</code>) &#x2014; The auto class to register this new feature extractor with.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Register this class with a given auto class. This should only be used for custom feature extractors as the ones in the library are already mapped with <code>AutoProcessor</code>.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>This API is experimental and may have some slight breaking changes in the next releases.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ProcessorMixin.save_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.ProcessorMixin.save_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ProcessorMixin.save_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/processing_utils.py#L94" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_directory<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ProcessorMixin.save_pretrained.save_directory" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProcessorMixin.save_pretrained.save_directory"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Directory where the feature extractor JSON file and the tokenizer files will be saved (directory will be created if it does not exist).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ProcessorMixin.save_pretrained.push_to_hub" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProcessorMixin.save_pretrained.push_to_hub"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with <code>repo_id</code> (will default to the name of <code>save_directory</code> in your namespace). kwargs &#x2014; Additional key word arguments passed along to the <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.push_to_hub">push_to_hub()</a> method.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Saves the attributes of this processor (feature extractor, tokenizer…) in the specified directory so that it can be reloaded using the <a href="/docs/transformers/pr_19429/en/model_doc/trocr#transformers.TrOCRProcessor.from_pretrained">from_pretrained()</a> method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>This class method is simply calling <a href="/docs/transformers/pr_19429/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.save_pretrained">save_pretrained()</a> and <code>~tokenization_utils_base.PreTrainedTokenizer.save_pretrained</code>. Please refer to the docstrings of the methods above for more information.</p></div></div></div> <h2 class="relative group"><a id="transformers.DataProcessor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataProcessor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Deprecated processors </span></h2> <p>All processors follow the same architecture which is that of the <a href="/docs/transformers/pr_19429/en/main_classes/processors#transformers.DataProcessor">DataProcessor</a>. The processor returns a list of <a href="/docs/transformers/pr_19429/en/main_classes/processors#transformers.InputExample">InputExample</a>. These <a href="/docs/transformers/pr_19429/en/main_classes/processors#transformers.InputExample">InputExample</a> can be converted to <a href="/docs/transformers/pr_19429/en/main_classes/processors#transformers.InputFeatures">InputFeatures</a> in order to be fed to the model.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DataProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DataProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/utils.py#L80" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Base class for data converters for sequence classification data sets.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataProcessor.get_dev_examples"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_dev_examples</span></h4><!-- HTML_TAG_END --> <a id="transformers.DataProcessor.get_dev_examples" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataProcessor.get_dev_examples"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/utils.py#L97" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data_dir<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Gets a collection of <a href="/docs/transformers/pr_19429/en/main_classes/processors#transformers.InputExample">InputExample</a> for the dev set.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataProcessor.get_example_from_tensor_dict"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_example_from_tensor_dict</span></h4><!-- HTML_TAG_END --> <a id="transformers.DataProcessor.get_example_from_tensor_dict" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataProcessor.get_example_from_tensor_dict"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/utils.py#L83" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tensor_dict<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Gets an example from a dict with tensorflow tensors.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataProcessor.get_labels"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_labels</span></h4><!-- HTML_TAG_END --> <a id="transformers.DataProcessor.get_labels" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataProcessor.get_labels"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/utils.py#L105" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Gets the list of labels for this data set.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataProcessor.get_test_examples"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_test_examples</span></h4><!-- HTML_TAG_END --> <a id="transformers.DataProcessor.get_test_examples" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataProcessor.get_test_examples"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/utils.py#L101" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data_dir<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Gets a collection of <a href="/docs/transformers/pr_19429/en/main_classes/processors#transformers.InputExample">InputExample</a> for the test set.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataProcessor.get_train_examples"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_train_examples</span></h4><!-- HTML_TAG_END --> <a id="transformers.DataProcessor.get_train_examples" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataProcessor.get_train_examples"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/utils.py#L93" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data_dir<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Gets a collection of <a href="/docs/transformers/pr_19429/en/main_classes/processors#transformers.InputExample">InputExample</a> for the train set.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataProcessor.tfds_map"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>tfds_map</span></h4><!-- HTML_TAG_END --> <a id="transformers.DataProcessor.tfds_map" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataProcessor.tfds_map"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/utils.py#L109" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">example<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are. This method converts examples to the correct format.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.InputExample"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">InputExample</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.InputExample" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.InputExample"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/utils.py#L30" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">guid<span class="opacity-60">: str</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_a<span class="opacity-60">: str</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_b<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">label<span class="opacity-60">: typing.Optional[str] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A single training/test example for simple sequence classification.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.InputExample.to_json_string"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_json_string</span></h4><!-- HTML_TAG_END --> <a id="transformers.InputExample.to_json_string" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.InputExample.to_json_string"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/utils.py#L49" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Serializes this instance to a JSON string.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.InputFeatures"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">InputFeatures</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.InputFeatures" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.InputFeatures"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/utils.py#L55" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">label<span class="opacity-60">: typing.Union[int, float, NoneType] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A single set of features of data. Property names are the same names as the corresponding inputs to a model.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.InputFeatures.to_json_string"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_json_string</span></h4><!-- HTML_TAG_END --> <a id="transformers.InputFeatures.to_json_string" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.InputFeatures.to_json_string"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/utils.py#L75" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Serializes this instance to a JSON string.</p></div></div> <h2 class="relative group"><a id="transformers.glue_convert_examples_to_features" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.glue_convert_examples_to_features"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>GLUE </span></h2> <p><a href="https://gluebenchmark.com/" rel="nofollow">General Language Understanding Evaluation (GLUE)</a> is a benchmark that evaluates the performance of models across a diverse set of existing NLU tasks. It was released together with the paper <a href="https://openreview.net/pdf?id=rJ4km2R5t7" rel="nofollow">GLUE: A multi-task benchmark and analysis platform for natural language understanding</a></p> <p>This library hosts a total of 10 processors for the following tasks: MRPC, MNLI, MNLI (mismatched), CoLA, SST2, STSB, QQP, QNLI, RTE and WNLI.</p> <p>Those processors are:</p> <ul><li><code>~data.processors.utils.MrpcProcessor</code></li> <li><code>~data.processors.utils.MnliProcessor</code></li> <li><code>~data.processors.utils.MnliMismatchedProcessor</code></li> <li><code>~data.processors.utils.Sst2Processor</code></li> <li><code>~data.processors.utils.StsbProcessor</code></li> <li><code>~data.processors.utils.QqpProcessor</code></li> <li><code>~data.processors.utils.QnliProcessor</code></li> <li><code>~data.processors.utils.RteProcessor</code></li> <li><code>~data.processors.utils.WnliProcessor</code></li></ul> <p>Additionally, the following method can be used to load values from a data file and convert them to a list of <a href="/docs/transformers/pr_19429/en/main_classes/processors#transformers.InputExample">InputExample</a>.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.glue_convert_examples_to_features"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.glue_convert_examples_to_features</span></h4><!-- HTML_TAG_END --> <a id="transformers.glue_convert_examples_to_features" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.glue_convert_examples_to_features"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/glue.py#L41" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">examples<span class="opacity-60">: typing.Union[typing.List[transformers.data.processors.utils.InputExample], ForwardRef(&#39;tf.data.Dataset&#39;)]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: PreTrainedTokenizer</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">task<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">label_list<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_mode<span class="opacity-60"> = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Loads a data file into a list of <code>InputFeatures</code></p></div> <h2 class="relative group"><a id="xnli" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#xnli"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XNLI </span></h2> <p><a href="https://www.nyu.edu/projects/bowman/xnli/" rel="nofollow">The Cross-Lingual NLI Corpus (XNLI)</a> is a benchmark that evaluates the quality of cross-lingual text representations. XNLI is crowd-sourced dataset based on <a href="http://www.nyu.edu/projects/bowman/multinli/" rel="nofollow"><em>MultiNLI</em></a>: pairs of text are labeled with textual entailment annotations for 15 different languages (including both high-resource language such as English and low-resource languages such as Swahili).</p> <p>It was released together with the paper <a href="https://arxiv.org/abs/1809.05053" rel="nofollow">XNLI: Evaluating Cross-lingual Sentence Representations</a></p> <p>This library hosts the processor to load the XNLI data:</p> <ul><li><code>~data.processors.utils.XnliProcessor</code></li></ul> <p>Please note that since the gold labels are available on the test set, evaluation is performed on the test set.</p> <p>An example using these processors is given in the <a href="https://github.com/huggingface/transformers/tree/main/examples/legacy/text-classification/run_xnli.py" rel="nofollow">run_xnli.py</a> script.</p> <h2 class="relative group"><a id="squad" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#squad"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>SQuAD </span></h2> <p><a href="https://rajpurkar.github.io/SQuAD-explorer//" rel="nofollow">The Stanford Question Answering Dataset (SQuAD)</a> is a benchmark that evaluates the performance of models on question answering. Two versions are available, v1.1 and v2.0. The first version (v1.1) was released together with the paper <a href="https://arxiv.org/abs/1606.05250" rel="nofollow">SQuAD: 100,000+ Questions for Machine Comprehension of Text</a>. The second version (v2.0) was released alongside the paper <a href="https://arxiv.org/abs/1806.03822" rel="nofollow">Know What You Don’t Know: Unanswerable Questions for SQuAD</a>.</p> <p>This library hosts a processor for each of the two versions:</p> <h3 class="relative group"><a id="transformers.data.processors.squad.SquadProcessor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.data.processors.squad.SquadProcessor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Processors </span></h3> <p>Those processors are:</p> <ul><li><code>~data.processors.utils.SquadV1Processor</code></li> <li><code>~data.processors.utils.SquadV2Processor</code></li></ul> <p>They both inherit from the abstract class <code>~data.processors.utils.SquadProcessor</code></p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.data.processors.squad.SquadProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.data.processors.squad.</span><span class="font-semibold">SquadProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.data.processors.squad.SquadProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.data.processors.squad.SquadProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/squad.py#L542" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Processor for the SQuAD data set. overridden by SquadV1Processor and SquadV2Processor, used by the version 1.1 and version 2.0 of SQuAD, respectively.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.data.processors.squad.SquadProcessor.get_dev_examples"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_dev_examples</span></h4><!-- HTML_TAG_END --> <a id="transformers.data.processors.squad.SquadProcessor.get_dev_examples" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.data.processors.squad.SquadProcessor.get_dev_examples"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/squad.py#L630" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data_dir<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filename<span class="opacity-60"> = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Returns the evaluation example from the data directory.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.data.processors.squad.SquadProcessor.get_examples_from_dataset"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_examples_from_dataset</span></h4><!-- HTML_TAG_END --> <a id="transformers.data.processors.squad.SquadProcessor.get_examples_from_dataset" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.data.processors.squad.SquadProcessor.get_examples_from_dataset"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/squad.py#L575" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dataset<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">evaluate<span class="opacity-60"> = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Creates a list of <code>SquadExample</code> using a TFDS dataset.</p> <div class="relative group rounded-md"><a id="transformers.data.processors.squad.SquadProcessor.get_examples_from_dataset.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.data.processors.squad.SquadProcessor.get_examples_from_dataset.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow_datasets <span class="hljs-keyword">as</span> tfds <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = tfds.load(<span class="hljs-string">&quot;squad&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>training_examples = get_examples_from_dataset(dataset, evaluate=<span class="hljs-literal">False</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>evaluation_examples = get_examples_from_dataset(dataset, evaluate=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.data.processors.squad.SquadProcessor.get_train_examples"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_train_examples</span></h4><!-- HTML_TAG_END --> <a id="transformers.data.processors.squad.SquadProcessor.get_train_examples" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.data.processors.squad.SquadProcessor.get_train_examples"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/squad.py#L608" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data_dir<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filename<span class="opacity-60"> = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Returns the training examples from the data directory.</p></div></div> <p>Additionally, the following method can be used to convert SQuAD examples into <code>~data.processors.utils.SquadFeatures</code> that can be used as model inputs.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.squad_convert_examples_to_features"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.squad_convert_examples_to_features</span></h4><!-- HTML_TAG_END --> <a id="transformers.squad_convert_examples_to_features" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.squad_convert_examples_to_features"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/data/processors/squad.py#L317" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">examples<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_seq_length<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">doc_stride<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_query_length<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_training<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding_strategy<span class="opacity-60"> = &#39;max_length&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dataset<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">threads<span class="opacity-60"> = 1</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tqdm_enabled<span class="opacity-60"> = True</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Converts a list of examples into a list of features that can be directly given as input to a model. It is model-dependant and takes advantage of many of the tokenizer’s features to create the model’s inputs.</p> <div class="relative group rounded-md"><a id="transformers.squad_convert_examples_to_features.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.squad_convert_examples_to_features.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->processor = SquadV2Processor() examples = processor.get_dev_examples(data_dir) features = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=<span class="hljs-keyword">not</span> evaluate, )<!-- HTML_TAG_END --></pre></div></div></div> <p>These processors as well as the aforementionned method can be used with files containing the data as well as with the <em>tensorflow_datasets</em> package. Examples are given below.</p> <h3 class="relative group"><a id="example-usage" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#example-usage"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Example usage </span></h3> <p>Here is an example using the processors as well as the conversion method using data files:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment"># Loading a V2 processor</span> processor = SquadV2Processor() examples = processor.get_dev_examples(squad_v2_data_dir) <span class="hljs-comment"># Loading a V1 processor</span> processor = SquadV1Processor() examples = processor.get_dev_examples(squad_v1_data_dir) features = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=max_seq_length, doc_stride=args.doc_stride, max_query_length=max_query_length, is_training=<span class="hljs-keyword">not</span> evaluate, )<!-- HTML_TAG_END --></pre></div> <p>Using <em>tensorflow_datasets</em> is as easy as using a data file:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment"># tensorflow_datasets only handle Squad V1.</span> tfds_examples = tfds.load(<span class="hljs-string">&quot;squad&quot;</span>) examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate) features = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=max_seq_length, doc_stride=args.doc_stride, max_query_length=max_query_length, is_training=<span class="hljs-keyword">not</span> evaluate, )<!-- HTML_TAG_END --></pre></div> <p>Another example using these processors is given in the <a href="https://github.com/huggingface/transformers/tree/main/examples/legacy/question-answering/run_squad.py" rel="nofollow">run_squad.py</a> script.</p> <script type="module" data-hydrate="1fi0wgr"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="1fi0wgr"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/main_classes/processors.mdx-hf-doc-builder.js") ], params: {} } }); </script>
50
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/main_classes/trainer.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;trainer&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.Trainer&quot;,&quot;title&quot;:&quot;Trainer&quot;},{&quot;local&quot;:&quot;transformers.Seq2SeqTrainer&quot;,&quot;title&quot;:&quot;Seq2SeqTrainer&quot;},{&quot;local&quot;:&quot;transformers.TrainingArguments&quot;,&quot;title&quot;:&quot;TrainingArguments&quot;},{&quot;local&quot;:&quot;transformers.Seq2SeqTrainingArguments&quot;,&quot;title&quot;:&quot;Seq2SeqTrainingArguments&quot;},{&quot;local&quot;:&quot;checkpoints&quot;,&quot;title&quot;:&quot;Checkpoints&quot;},{&quot;local&quot;:&quot;logging&quot;,&quot;title&quot;:&quot;Logging&quot;},{&quot;local&quot;:&quot;randomness&quot;,&quot;title&quot;:&quot;Randomness&quot;},{&quot;local&quot;:&quot;specific-gpus-selection&quot;,&quot;title&quot;:&quot;Specific GPUs Selection&quot;},{&quot;local&quot;:&quot;trainer-integrations&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;cuda-extension-installation-notes&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;possible-problem-1&quot;,&quot;title&quot;:&quot;Possible problem #1&quot;},{&quot;local&quot;:&quot;possible-problem-2&quot;,&quot;title&quot;:&quot;Possible problem #2&quot;},{&quot;local&quot;:&quot;possible-problem-3&quot;,&quot;title&quot;:&quot;Possible problem #3&quot;}],&quot;title&quot;:&quot;CUDA Extension Installation Notes&quot;},{&quot;local&quot;:&quot;fairscale&quot;,&quot;title&quot;:&quot;FairScale&quot;},{&quot;local&quot;:&quot;pytorch-fully-sharded-data-parallel&quot;,&quot;title&quot;:&quot;PyTorch Fully Sharded Data parallel&quot;},{&quot;local&quot;:&quot;using-trainer-for-accelerated-pytorch-training-on-mac&quot;,&quot;title&quot;:&quot;Using Trainer for accelerated PyTorch Training on Mac &quot;}],&quot;title&quot;:&quot;Trainer Integrations&quot;}],&quot;title&quot;:&quot;Trainer&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/main_classes/trainer.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Docstring-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/ExampleCodeBlock-hf-doc-builder.js"> <h1 class="relative group"><a id="trainer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#trainer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Trainer </span></h1> <p>The <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> class provides an API for feature-complete training in PyTorch for most standard use cases. It’s used in most of the <a href="https://github.com/huggingface/transformers/tree/main/examples" rel="nofollow">example scripts</a>.</p> <p>Before instantiating your <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>, create a <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> to access all the points of customization during training.</p> <p>The API supports distributed training on multiple GPUs/TPUs, mixed precision through <a href="https://github.com/NVIDIA/apex" rel="nofollow">NVIDIA Apex</a> and Native AMP for PyTorch.</p> <p>The <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> contains the basic training loop which supports the above features. To inject custom behavior you can subclass them and override the following methods:</p> <ul><li><strong>get_train_dataloader</strong> — Creates the training DataLoader.</li> <li><strong>get_eval_dataloader</strong> — Creates the evaluation DataLoader.</li> <li><strong>get_test_dataloader</strong> — Creates the test DataLoader.</li> <li><strong>log</strong> — Logs information on the various objects watching training.</li> <li><strong>create_optimizer_and_scheduler</strong> — Sets up the optimizer and learning rate scheduler if they were not passed at init. Note, that you can also subclass or override the <code>create_optimizer</code> and <code>create_scheduler</code> methods separately.</li> <li><strong>create_optimizer</strong> — Sets up the optimizer if it wasn’t passed at init.</li> <li><strong>create_scheduler</strong> — Sets up the learning rate scheduler if it wasn’t passed at init.</li> <li><strong>compute_loss</strong> - Computes the loss on a batch of training inputs.</li> <li><strong>training_step</strong> — Performs a training step.</li> <li><strong>prediction_step</strong> — Performs an evaluation/test step.</li> <li><strong>evaluate</strong> — Runs an evaluation loop and returns metrics.</li> <li><strong>predict</strong> — Returns predictions (with metrics if labels are available) on a test set.</li></ul> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>The <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> class is optimized for 🤗 Transformers models and can have surprising behaviors when you use it on other models. When using it on your own model, make sure:</p> <ul><li>your model always return tuples or subclasses of <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a>.</li> <li>your model can compute the loss if a <code>labels</code> argument is provided and that loss is returned as the first element of the tuple (if your model returns tuples)</li> <li>your model can accept multiple label arguments (use the <code>label_names</code> in your <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> to indicate their name to the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>) but none of them should be named <code>&quot;label&quot;</code>.</li></ul></div> <p>Here is an example of how to customize <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> to use a weighted loss (useful when you have an unbalanced training set):</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> torch <span class="hljs-keyword">import</span> nn <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Trainer <span class="hljs-keyword">class</span> <span class="hljs-title class_">CustomTrainer</span>(<span class="hljs-title class_ inherited__">Trainer</span>): <span class="hljs-keyword">def</span> <span class="hljs-title function_">compute_loss</span>(<span class="hljs-params">self, model, inputs, return_outputs=<span class="hljs-literal">False</span></span>): labels = inputs.get(<span class="hljs-string">&quot;labels&quot;</span>) <span class="hljs-comment"># forward pass</span> outputs = model(**inputs) logits = outputs.get(<span class="hljs-string">&quot;logits&quot;</span>) <span class="hljs-comment"># compute custom loss (suppose one has 3 labels with different weights)</span> loss_fct = nn.CrossEntropyLoss(weight=torch.tensor([<span class="hljs-number">1.0</span>, <span class="hljs-number">2.0</span>, <span class="hljs-number">3.0</span>])) loss = loss_fct(logits.view(-<span class="hljs-number">1</span>, self.model.config.num_labels), labels.view(-<span class="hljs-number">1</span>)) <span class="hljs-keyword">return</span> (loss, outputs) <span class="hljs-keyword">if</span> return_outputs <span class="hljs-keyword">else</span> loss<!-- HTML_TAG_END --></pre></div> <p>Another way to customize the training loop behavior for the PyTorch <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> is to use <a href="callback">callbacks</a> that can inspect the training loop state (for progress reporting, logging on TensorBoard or other ML platforms…) and take decisions (like early stopping).</p> <h2 class="relative group"><a id="transformers.Trainer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Trainer </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Trainer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Trainer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L209" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60">: typing.Union[transformers.modeling_utils.PreTrainedModel, torch.nn.modules.module.Module] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data_collator<span class="opacity-60">: typing.Optional[DataCollator] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train_dataset<span class="opacity-60">: typing.Optional[torch.utils.data.dataset.Dataset] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eval_dataset<span class="opacity-60">: typing.Optional[torch.utils.data.dataset.Dataset] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: typing.Optional[transformers.tokenization_utils_base.PreTrainedTokenizerBase] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model_init<span class="opacity-60">: typing.Callable[[], transformers.modeling_utils.PreTrainedModel] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">compute_metrics<span class="opacity-60">: typing.Union[typing.Callable[[transformers.trainer_utils.EvalPrediction], typing.Dict], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">callbacks<span class="opacity-60">: typing.Optional[typing.List[transformers.trainer_callback.TrainerCallback]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">optimizers<span class="opacity-60">: typing.Tuple[torch.optim.optimizer.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None)</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">preprocess_logits_for_metrics<span class="opacity-60">: typing.Callable[[torch.Tensor, torch.Tensor], torch.Tensor] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <code>torch.nn.Module</code>, <em>optional</em>) &#x2014; The model to train, evaluate or use for predictions. If not provided, a <code>model_init</code> must be passed.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p><a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> is optimized to work with the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> provided by the library. You can still use your own models defined as <code>torch.nn.Module</code> as long as they work the same way as the &#x1F917; Transformers models.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>, <em>optional</em>) &#x2014; The arguments to tweak for training. Will default to a basic instance of <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> with the <code>output_dir</code> set to a directory named <em>tmp_trainer</em> in the current directory if not provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.data_collator" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.data_collator"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>data_collator</strong> (<code>DataCollator</code>, <em>optional</em>) &#x2014; The function to use to form a batch from a list of elements of <code>train_dataset</code> or <code>eval_dataset</code>. Will default to <a href="/docs/transformers/pr_19429/en/main_classes/data_collator#transformers.default_data_collator">default_data_collator()</a> if no <code>tokenizer</code> is provided, an instance of <a href="/docs/transformers/pr_19429/en/main_classes/data_collator#transformers.DataCollatorWithPadding">DataCollatorWithPadding</a> otherwise.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.train_dataset" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.train_dataset"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>train_dataset</strong> (<code>torch.utils.data.Dataset</code> or <code>torch.utils.data.IterableDataset</code>, <em>optional</em>) &#x2014; The dataset to use for training. If it is a <a href="https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset" rel="nofollow">Dataset</a>, columns not accepted by the <code>model.forward()</code> method are automatically removed.</p> <p>Note that if it&#x2019;s a <code>torch.utils.data.IterableDataset</code> with some randomization and you are training in a distributed fashion, your iterable dataset should either use a internal attribute <code>generator</code> that is a <code>torch.Generator</code> for the randomization that must be identical on all processes (and the Trainer will manually set the seed of this <code>generator</code> at each epoch) or have a <code>set_epoch()</code> method that internally sets the seed of the RNGs used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.eval_dataset" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.eval_dataset"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eval_dataset</strong> (Union[<code>torch.utils.data.Dataset</code>, Dict[str, <code>torch.utils.data.Dataset</code>]), <em>optional</em>) &#x2014; The dataset to use for evaluation. If it is a <a href="https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset" rel="nofollow">Dataset</a>, columns not accepted by the <code>model.forward()</code> method are automatically removed. If it is a dictionary, it will evaluate on each dataset prepending the dictionary key to the metric name.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase">PreTrainedTokenizerBase</a>, <em>optional</em>) &#x2014; The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an interrupted training or reuse the fine-tuned model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.model_init" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.model_init"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_init</strong> (<code>Callable[[], PreTrainedModel]</code>, <em>optional</em>) &#x2014; A function that instantiates the model to be used. If provided, each call to <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.train">train()</a> will start from a new instance of the model as given by this function.</p> <p>The function may have zero argument, or a single one containing the optuna/Ray Tune/SigOpt trial object, to be able to choose different architectures according to hyper parameters (such as layer count, sizes of inner layers, dropout probabilities etc).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.compute_metrics" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.compute_metrics"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>compute_metrics</strong> (<code>Callable[[EvalPrediction], Dict]</code>, <em>optional</em>) &#x2014; The function that will be used to compute metrics at evaluation. Must take a <a href="/docs/transformers/pr_19429/en/internal/trainer_utils#transformers.EvalPrediction">EvalPrediction</a> and return a dictionary string to metric values.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.callbacks" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.callbacks"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>callbacks</strong> (List of <a href="/docs/transformers/pr_19429/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a>, <em>optional</em>) &#x2014; A list of callbacks to customize the training loop. Will add those to the list of default callbacks detailed in <a href="callback">here</a>.</p> <p>If you want to remove one of the default callbacks used, use the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.remove_callback">Trainer.remove_callback()</a> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.optimizers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.optimizers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>optimizers</strong> (<code>Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]</code>, <em>optional</em>) &#x2014; A tuple containing the optimizer and the scheduler to use. Will default to an instance of <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> on your model and a scheduler given by <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.get_linear_schedule_with_warmup">get_linear_schedule_with_warmup()</a> controlled by <code>args</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.preprocess_logits_for_metrics" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.preprocess_logits_for_metrics"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>preprocess_logits_for_metrics</strong> (<code>Callable[[torch.Tensor, torch.Tensor], torch.Tensor]</code>, <em>optional</em>) &#x2014; A function that preprocess the logits right before caching them at each evaluation step. Must take two tensors, the logits and the labels, and return the logits once processed as desired. The modifications made by this function will be reflected in the predictions received by <code>compute_metrics</code>.</p> <p>Note that the labels (second parameter) will be <code>None</code> if the dataset does not have them.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.</p> <p>Important attributes:</p> <ul><li><strong>model</strong> — Always points to the core model. If using a transformers model, it will be a <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> subclass.</li> <li><strong>model_wrapped</strong> — Always points to the most external model in case one or more other modules wrap the original model. This is the model that should be used for the forward pass. For example, under <code>DeepSpeed</code>, the inner model is wrapped in <code>DeepSpeed</code> and then again in <code>torch.nn.DistributedDataParallel</code>. If the inner model hasn’t been wrapped, then <code>self.model_wrapped</code> is the same as <code>self.model</code>.</li> <li><strong>is_model_parallel</strong> — Whether or not a model has been switched to a model parallel mode (different from data parallelism, this means some of the model layers are split on different GPUs).</li> <li><strong>place_model_on_device</strong> — Whether or not to automatically place the model on the device - it will be set to <code>False</code> if model parallel or deepspeed is used, or if the default <code>TrainingArguments.place_model_on_device</code> is overridden to return <code>False</code> .</li> <li><strong>is_in_train</strong> — Whether or not a model is currently running <code>train</code> (e.g. when <code>evaluate</code> is called while in <code>train</code>)</li></ul> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.add_callback"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>add_callback</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.add_callback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.add_callback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L664" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">callback<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.add_callback.callback" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.add_callback.callback"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>callback</strong> (<code>type</code> or <code>~transformer.TrainerCallback</code>) &#x2014; A <code>~transformer.TrainerCallback</code> class or an instance of a <code>~transformer.TrainerCallback</code>. In the first case, will instantiate a member of that class.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Add a callback to the current list of <code>~transformer.TrainerCallback</code>.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.autocast_smart_context_manager"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>autocast_smart_context_manager</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.autocast_smart_context_manager" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.autocast_smart_context_manager"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L2441" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A helper wrapper that creates an appropriate context manager for <code>autocast</code> while feeding it the desired arguments, depending on the situation.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.compute_loss"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>compute_loss</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.compute_loss" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.compute_loss"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L2508" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_outputs<span class="opacity-60"> = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>How the loss is computed by Trainer. By default, all models return the loss in the first element.</p> <p>Subclass and override for custom behavior.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.compute_loss_context_manager"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>compute_loss_context_manager</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.compute_loss_context_manager" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.compute_loss_context_manager"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L2424" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A helper wrapper to group together context managers.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.create_model_card"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>create_model_card</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.create_model_card" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.create_model_card"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L3292" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">language<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">license<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tags<span class="opacity-60">: typing.Union[str, typing.List[str], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model_name<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">finetuned_from<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tasks<span class="opacity-60">: typing.Union[str, typing.List[str], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dataset_tags<span class="opacity-60">: typing.Union[str, typing.List[str], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dataset<span class="opacity-60">: typing.Union[str, typing.List[str], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dataset_args<span class="opacity-60">: typing.Union[str, typing.List[str], NoneType] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.create_model_card.language" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.create_model_card.language"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>language</strong> (<code>str</code>, <em>optional</em>) &#x2014; The language of the model (if applicable)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.create_model_card.license" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.create_model_card.license"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>license</strong> (<code>str</code>, <em>optional</em>) &#x2014; The license of the model. Will default to the license of the pretrained model used, if the original model given to the <code>Trainer</code> comes from a repo on the Hub.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.create_model_card.tags" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.create_model_card.tags"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tags</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014; Some tags to be included in the metadata of the model card.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.create_model_card.model_name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.create_model_card.model_name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; The name of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.create_model_card.finetuned_from" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.create_model_card.finetuned_from"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>finetuned_from</strong> (<code>str</code>, <em>optional</em>) &#x2014; The name of the model used to fine-tune this one (if applicable). Will default to the name of the repo of the original model given to the <code>Trainer</code> (if it comes from the Hub).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.create_model_card.tasks" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.create_model_card.tasks"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tasks</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014; One or several task identifiers, to be included in the metadata of the model card.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.create_model_card.dataset_tags" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.create_model_card.dataset_tags"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dataset_tags</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014; One or several dataset tags, to be included in the metadata of the model card.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.create_model_card.dataset" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.create_model_card.dataset"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dataset</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014; One or several dataset identifiers, to be included in the metadata of the model card.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.create_model_card.dataset_args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.create_model_card.dataset_args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dataset_args</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014; One or several dataset arguments, to be included in the metadata of the model card.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Creates a draft of a model card using the information available to the <code>Trainer</code>.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.create_optimizer"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>create_optimizer</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.create_optimizer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.create_optimizer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L1024" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Setup the optimizer.</p> <p>We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the Trainer’s init through <code>optimizers</code>, or subclass and override this method in a subclass.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.create_optimizer_and_scheduler"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>create_optimizer_and_scheduler</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.create_optimizer_and_scheduler" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.create_optimizer_and_scheduler"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L1008" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_training_steps<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Setup the optimizer and the learning rate scheduler.</p> <p>We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the Trainer’s init through <code>optimizers</code>, or subclass and override this method (or <code>create_optimizer</code> and/or <code>create_scheduler</code>) in a subclass.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.create_scheduler"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>create_scheduler</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.create_scheduler" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.create_scheduler"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L1132" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_training_steps<span class="opacity-60">: int</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">optimizer<span class="opacity-60">: Optimizer = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.create_scheduler.num_training_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.create_scheduler.num_training_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_training_steps</strong> (int) &#x2014; The number of training steps to do.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or passed as an argument.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.evaluate"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>evaluate</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.evaluate" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.evaluate"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L2737" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eval_dataset<span class="opacity-60">: typing.Optional[torch.utils.data.dataset.Dataset] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ignore_keys<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">metric_key_prefix<span class="opacity-60">: str = &#39;eval&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.evaluate.eval_dataset" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.evaluate.eval_dataset"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eval_dataset</strong> (<code>Dataset</code>, <em>optional</em>) &#x2014; Pass a dataset if you wish to override <code>self.eval_dataset</code>. If it is a <a href="https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset" rel="nofollow">Dataset</a>, columns not accepted by the <code>model.forward()</code> method are automatically removed. It must implement the <code>__len__</code> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.evaluate.ignore_keys" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.evaluate.ignore_keys"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ignore_keys</strong> (<code>Lst[str]</code>, <em>optional</em>) &#x2014; A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.evaluate.metric_key_prefix" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.evaluate.metric_key_prefix"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>metric_key_prefix</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;eval&quot;</code>) &#x2014; An optional prefix to be used as the metrics key prefix. For example the metrics &#x201C;bleu&#x201D; will be named &#x201C;eval_bleu&#x201D; if the prefix is &#x201C;eval&#x201D; (default)<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Run evaluation and returns metrics.</p> <p>The calling script will be responsible for providing a method to compute metrics, as they are task-dependent (pass it to the init <code>compute_metrics</code> argument).</p> <p>You can also subclass and override this method to inject custom behavior.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.evaluation_loop"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>evaluation_loop</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.evaluation_loop" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.evaluation_loop"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L2866" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dataloader<span class="opacity-60">: DataLoader</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">description<span class="opacity-60">: str</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">prediction_loss_only<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ignore_keys<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">metric_key_prefix<span class="opacity-60">: str = &#39;eval&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Prediction/evaluation loop, shared by <code>Trainer.evaluate()</code> and <code>Trainer.predict()</code>.</p> <p>Works both with or without labels.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.floating_point_ops"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>floating_point_ops</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.floating_point_ops" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.floating_point_ops"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L3223" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Dict[str, typing.Union[torch.Tensor, typing.Any]]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.floating_point_ops.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.floating_point_ops.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>Dict[str, Union[torch.Tensor, Any]]</code>) &#x2014; The inputs and targets of the model.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.Trainer.floating_point_ops.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The number of floating-point operations.</p> <!-- HTML_TAG_END --></p> </div></div> <p>For models that inherit from <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>, uses that method to compute the number of floating point operations for every backward + forward pass. If using another model, either implement such a method in the model or subclass and override this method.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.get_eval_dataloader"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_eval_dataloader</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.get_eval_dataloader" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.get_eval_dataloader"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L910" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eval_dataset<span class="opacity-60">: typing.Optional[torch.utils.data.dataset.Dataset] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.get_eval_dataloader.eval_dataset" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.get_eval_dataloader.eval_dataset"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eval_dataset</strong> (<code>torch.utils.data.Dataset</code>, <em>optional</em>) &#x2014; If provided, will override <code>self.eval_dataset</code>. If it is a <a href="https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset" rel="nofollow">Dataset</a>, columns not accepted by the <code>model.forward()</code> method are automatically removed. It must implement <code>__len__</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Returns the evaluation <code>~torch.utils.data.DataLoader</code>.</p> <p>Subclass and override this method if you want to inject some custom behavior.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.get_optimizer_cls_and_kwargs"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_optimizer_cls_and_kwargs</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.get_optimizer_cls_and_kwargs" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.get_optimizer_cls_and_kwargs"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L1072" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.get_optimizer_cls_and_kwargs.args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.get_optimizer_cls_and_kwargs.args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args</strong> (<code>transformers.training_args.TrainingArguments</code>) &#x2014; The training arguments for the training session.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Returns the optimizer class and optimizer parameters based on the training arguments.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.get_test_dataloader"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_test_dataloader</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.get_test_dataloader" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.get_test_dataloader"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L960" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">test_dataset<span class="opacity-60">: Dataset</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.get_test_dataloader.test_dataset" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.get_test_dataloader.test_dataset"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>test_dataset</strong> (<code>torch.utils.data.Dataset</code>, <em>optional</em>) &#x2014; The test dataset to use. If it is a <a href="https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset" rel="nofollow">Dataset</a>, columns not accepted by the <code>model.forward()</code> method are automatically removed. It must implement <code>__len__</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Returns the test <code>~torch.utils.data.DataLoader</code>.</p> <p>Subclass and override this method if you want to inject some custom behavior.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.get_train_dataloader"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_train_dataloader</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.get_train_dataloader" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.get_train_dataloader"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L831" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Returns the training <code>~torch.utils.data.DataLoader</code>.</p> <p>Will use no sampler if <code>train_dataset</code> does not implement <code>__len__</code>, a random sampler (adapted to distributed training if necessary) otherwise.</p> <p>Subclass and override this method if you want to inject some custom behavior.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.hyperparameter_search"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>hyperparameter_search</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.hyperparameter_search" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.hyperparameter_search"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L2278" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hp_space<span class="opacity-60">: typing.Union[typing.Callable[[ForwardRef(&#39;optuna.Trial&#39;)], typing.Dict[str, float]], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">compute_objective<span class="opacity-60">: typing.Union[typing.Callable[[typing.Dict[str, float]], float], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">n_trials<span class="opacity-60">: int = 20</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">direction<span class="opacity-60">: str = &#39;minimize&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">backend<span class="opacity-60">: typing.Union[ForwardRef(&#39;str&#39;), transformers.trainer_utils.HPSearchBackend, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hp_name<span class="opacity-60">: typing.Union[typing.Callable[[ForwardRef(&#39;optuna.Trial&#39;)], str], NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>trainer_utils.BestRun</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.hyperparameter_search.hp_space" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.hyperparameter_search.hp_space"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hp_space</strong> (<code>Callable[[&quot;optuna.Trial&quot;], Dict[str, float]]</code>, <em>optional</em>) &#x2014; A function that defines the hyperparameter search space. Will default to <code>default_hp_space_optuna()</code> or <code>default_hp_space_ray()</code> or <code>default_hp_space_sigopt()</code> depending on your backend.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.hyperparameter_search.compute_objective" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.hyperparameter_search.compute_objective"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>compute_objective</strong> (<code>Callable[[Dict[str, float]], float]</code>, <em>optional</em>) &#x2014; A function computing the objective to minimize or maximize from the metrics returned by the <code>evaluate</code> method. Will default to <code>default_compute_objective()</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.hyperparameter_search.n_trials" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.hyperparameter_search.n_trials"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>n_trials</strong> (<code>int</code>, <em>optional</em>, defaults to 100) &#x2014; The number of trial runs to test.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.hyperparameter_search.direction" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.hyperparameter_search.direction"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>direction</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;minimize&quot;</code>) &#x2014; Whether to optimize greater or lower objects. Can be <code>&quot;minimize&quot;</code> or <code>&quot;maximize&quot;</code>, you should pick <code>&quot;minimize&quot;</code> when optimizing the validation loss, <code>&quot;maximize&quot;</code> when optimizing one or several metrics.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.hyperparameter_search.backend" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.hyperparameter_search.backend"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>backend</strong> (<code>str</code> or <code>~training_utils.HPSearchBackend</code>, <em>optional</em>) &#x2014; The backend to use for hyperparameter search. Will default to optuna or Ray Tune or SigOpt, depending on which one is installed. If all are installed, will default to optuna.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.hyperparameter_search.hp_name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.hyperparameter_search.hp_name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hp_name</strong> (<code>Callable[[&quot;optuna.Trial&quot;], str]]</code>, <em>optional</em>) &#x2014; A function that defines the trial/run name. Will default to None.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.hyperparameter_search.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.hyperparameter_search.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (<code>Dict[str, Any]</code>, <em>optional</em>) &#x2014; Additional keyword arguments passed along to <code>optuna.create_study</code> or <code>ray.tune.run</code>. For more information see:</p> <ul> <li>the documentation of <a href="https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html" rel="nofollow">optuna.create_study</a></li> <li>the documentation of <a href="https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run" rel="nofollow">tune.run</a></li> <li>the documentation of <a href="https://app.sigopt.com/docs/endpoints/experiments/create" rel="nofollow">sigopt</a></li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.Trainer.hyperparameter_search.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>trainer_utils.BestRun</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>All the information about the best run.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Launch an hyperparameter search using <code>optuna</code> or <code>Ray Tune</code> or <code>SigOpt</code>. The optimized quantity is determined by <code>compute_objective</code>, which defaults to a function returning the evaluation loss when no metric is provided, the sum of all metrics otherwise.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>To use this method, you need to have provided a <code>model_init</code> when initializing your <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>: we need to reinitialize the model at each new run. This is incompatible with the <code>optimizers</code> argument, so you need to subclass <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> and override the method <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.create_optimizer_and_scheduler">create_optimizer_and_scheduler()</a> for custom optimizer/scheduler.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.init_git_repo"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>init_git_repo</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.init_git_repo" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.init_git_repo"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L3241" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">at_init<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.init_git_repo.at_init" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.init_git_repo.at_init"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>at_init</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether this function is called before any training or not. If <code>self.args.overwrite_output_dir</code> is <code>True</code> and <code>at_init</code> is <code>True</code>, the path to the repo (which is <code>self.args.output_dir</code>) might be wiped out.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Initializes a git repo in <code>self.args.hub_model_id</code>.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.is_local_process_zero"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>is_local_process_zero</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.is_local_process_zero" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.is_local_process_zero"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L2540" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several machines) main process.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.is_world_process_zero"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>is_world_process_zero</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.is_world_process_zero" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.is_world_process_zero"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L2547" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Whether or not this process is the global main process (when training in a distributed fashion on several machines, this is only going to be <code>True</code> for one process).</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.log"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>log</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.log" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.log"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L2373" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logs<span class="opacity-60">: typing.Dict[str, float]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.log.logs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.log.logs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logs</strong> (<code>Dict[str, float]</code>) &#x2014; The values to log.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Log <code>logs</code> on the various objects watching training.</p> <p>Subclass and override this method to inject custom behavior.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.log_metrics"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>log_metrics</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.log_metrics" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.log_metrics"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_pt_utils.py#L874" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">split<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">metrics<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.log_metrics.split" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.log_metrics.split"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>split</strong> (<code>str</code>) &#x2014; Mode/split name: one of <code>train</code>, <code>eval</code>, <code>test</code><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.log_metrics.metrics" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.log_metrics.metrics"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>metrics</strong> (<code>Dict[str, float]</code>) &#x2014; The metrics returned from train/evaluate/predictmetrics: metrics dict<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Log metrics in a specially formatted way</p> <p>Under distributed environment this is done only for a process with rank 0.</p> <p>Notes on memory reports:</p> <p>In order to get memory usage report you need to install <code>psutil</code>. You can do that with <code>pip install psutil</code>.</p> <div class="relative group rounded-md"><a id="transformers.Trainer.log_metrics.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.log_metrics.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Now when this method is run, you will see a report that will include: :</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-attr">init_mem_cpu_alloc_delta</span> = <span class="hljs-number">1301</span>MB <span class="hljs-attr">init_mem_cpu_peaked_delta</span> = <span class="hljs-number">154</span>MB <span class="hljs-attr">init_mem_gpu_alloc_delta</span> = <span class="hljs-number">230</span>MB <span class="hljs-attr">init_mem_gpu_peaked_delta</span> = <span class="hljs-number">0</span>MB <span class="hljs-attr">train_mem_cpu_alloc_delta</span> = <span class="hljs-number">1345</span>MB <span class="hljs-attr">train_mem_cpu_peaked_delta</span> = <span class="hljs-number">0</span>MB <span class="hljs-attr">train_mem_gpu_alloc_delta</span> = <span class="hljs-number">693</span>MB <span class="hljs-attr">train_mem_gpu_peaked_delta</span> = <span class="hljs-number">7</span>MB<!-- HTML_TAG_END --></pre></div></div> <p><strong>Understanding the reports:</strong></p> <ul><li>the first segment, e.g., <code>train__</code>, tells you which stage the metrics are for. Reports starting with <code>init_</code> will be added to the first stage that gets run. So that if only evaluation is run, the memory usage for the <code>__init__</code> will be reported along with the <code>eval_</code> metrics.</li> <li>the third segment, is either <code>cpu</code> or <code>gpu</code>, tells you whether it’s the general RAM or the gpu0 memory metric.</li> <li><code>*_alloc_delta</code> - is the difference in the used/allocated memory counter between the end and the start of the stage - it can be negative if a function released more memory than it allocated.</li> <li><code>*_peaked_delta</code> - is any extra memory that was consumed and then freed - relative to the current allocated memory counter - it is never negative. When you look at the metrics of any stage you add up <code>alloc_delta</code> + <code>peaked_delta</code> and you know how much memory was needed to complete that stage.</li></ul> <p>The reporting happens only for process of rank 0 and gpu 0 (if there is a gpu). Typically this is enough since the main process does the bulk of work, but it could be not quite so if model parallel is used and then other GPUs may use a different amount of gpu memory. This is also not the same under DataParallel where gpu0 may require much more memory than the rest since it stores the gradient and optimizer states for all participating GPUS. Perhaps in the future these reports will evolve to measure those too.</p> <p>The CPU RAM metric measures RSS (Resident Set Size) includes both the memory which is unique to the process and the memory shared with other processes. It is important to note that it does not include swapped out memory, so the reports could be imprecise.</p> <p>The CPU peak memory is measured using a sampling thread. Due to python’s GIL it may miss some of the peak memory if that thread didn’t get a chance to run when the highest memory was used. Therefore this report can be less than reality. Using <code>tracemalloc</code> would have reported the exact peak memory, but it doesn’t report memory allocations outside of python. So if some C++ CUDA extension allocated its own memory it won’t be reported. And therefore it was dropped in favor of the memory sampling approach, which reads the current process memory usage.</p> <p>The GPU allocated and peak memory reporting is done with <code>torch.cuda.memory_allocated()</code> and <code>torch.cuda.max_memory_allocated()</code>. This metric reports only “deltas” for pytorch-specific allocations, as <code>torch.cuda</code> memory management system doesn’t track any memory allocated outside of pytorch. For example, the very first cuda call typically loads CUDA kernels, which may take from 0.5 to 2GB of GPU memory.</p> <p>Note that this tracker doesn’t account for memory allocations outside of <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>’s <code>__init__</code>, <code>train</code>, <code>evaluate</code> and <code>predict</code> calls.</p> <p>Because <code>evaluation</code> calls may happen during <code>train</code>, we can’t handle nested invocations because <code>torch.cuda.max_memory_allocated</code> is a single counter, so if it gets reset by a nested eval call, <code>train</code>’s tracker will report incorrect info. If this <a href="https://github.com/pytorch/pytorch/issues/16266" rel="nofollow">pytorch issue</a> gets resolved it will be possible to change this class to be re-entrant. Until then we will only track the outer level of <code>train</code>, <code>evaluate</code> and <code>predict</code> methods. Which means that if <code>eval</code> is called during <code>train</code>, it’s the latter that will account for its memory usage and that of the former.</p> <p>This also means that if any other tool that is used along the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> calls <code>torch.cuda.reset_peak_memory_stats</code>, the gpu peak memory stats could be invalid. And the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> will disrupt the normal behavior of any such tools that rely on calling <code>torch.cuda.reset_peak_memory_stats</code> themselves.</p> <p>For best performance you may want to consider turning the memory profiling off for production runs.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.metrics_format"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>metrics_format</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.metrics_format" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.metrics_format"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_pt_utils.py#L848" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">metrics<span class="opacity-60">: typing.Dict[str, float]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>metrics (<code>Dict[str, float]</code>)</span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.metrics_format.metrics" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.metrics_format.metrics"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>metrics</strong> (<code>Dict[str, float]</code>) &#x2014; The metrics returned from train/evaluate/predict<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.Trainer.metrics_format.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>metrics (<code>Dict[str, float]</code>)</p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The reformatted metrics</p> <!-- HTML_TAG_END --></p> </div></div> <p>Reformat Trainer metrics values to a human-readable format</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.num_examples"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>num_examples</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.num_examples" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.num_examples"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L1149" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dataloader<span class="opacity-60">: DataLoader</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Helper to get number of samples in a <code>~torch.utils.data.DataLoader</code> by accessing its dataset. When dataloader.dataset does not exist or has no length, estimates as best it can</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.pop_callback"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>pop_callback</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.pop_callback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.pop_callback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L675" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">callback<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>~transformer.TrainerCallback</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.pop_callback.callback" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.pop_callback.callback"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>callback</strong> (<code>type</code> or <code>~transformer.TrainerCallback</code>) &#x2014; A <code>~transformer.TrainerCallback</code> class or an instance of a <code>~transformer.TrainerCallback</code>. In the first case, will pop the first member of that class found in the list of callbacks.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.Trainer.pop_callback.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>~transformer.TrainerCallback</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The callback removed, if found.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Remove a callback from the current list of <code>~transformer.TrainerCallback</code> and returns it.</p> <p>If the callback is not found, returns <code>None</code> (and no error is raised).</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.predict"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>predict</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.predict" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.predict"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L2806" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">test_dataset<span class="opacity-60">: Dataset</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ignore_keys<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">metric_key_prefix<span class="opacity-60">: str = &#39;test&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.predict.test_dataset" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.predict.test_dataset"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>test_dataset</strong> (<code>Dataset</code>) &#x2014; Dataset to run the predictions on. If it is an <code>datasets.Dataset</code>, columns not accepted by the <code>model.forward()</code> method are automatically removed. Has to implement the method <code>__len__</code><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.predict.ignore_keys" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.predict.ignore_keys"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ignore_keys</strong> (<code>Lst[str]</code>, <em>optional</em>) &#x2014; A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.predict.metric_key_prefix" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.predict.metric_key_prefix"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>metric_key_prefix</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;test&quot;</code>) &#x2014; An optional prefix to be used as the metrics key prefix. For example the metrics &#x201C;bleu&#x201D; will be named &#x201C;test_bleu&#x201D; if the prefix is &#x201C;test&#x201D; (default)<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Run prediction and returns predictions and potential metrics.</p> <p>Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in <code>evaluate()</code>.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If your predictions or labels have different sequence length (for instance because you’re doing dynamic padding in a token classification task) the predictions will be padded (on the right) to allow for concatenation into one array. The padding index is -100.</p></div> <p>Returns: <em>NamedTuple</em> A namedtuple with the following keys:</p> <ul><li>predictions (<code>np.ndarray</code>): The predictions on <code>test_dataset</code>.</li> <li>label_ids (<code>np.ndarray</code>, <em>optional</em>): The labels (if the dataset contained some).</li> <li>metrics (<code>Dict[str, float]</code>, <em>optional</em>): The potential dictionary of metrics (if the dataset contained labels).</li></ul></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.prediction_loop"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>prediction_loop</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.prediction_loop" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.prediction_loop"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L3449" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dataloader<span class="opacity-60">: DataLoader</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">description<span class="opacity-60">: str</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">prediction_loss_only<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ignore_keys<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">metric_key_prefix<span class="opacity-60">: str = &#39;eval&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Prediction/evaluation loop, shared by <code>Trainer.evaluate()</code> and <code>Trainer.predict()</code>.</p> <p>Works both with or without labels.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.prediction_step"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>prediction_step</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.prediction_step" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.prediction_step"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L3126" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60">: Module</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Dict[str, typing.Union[torch.Tensor, typing.Any]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">prediction_loss_only<span class="opacity-60">: bool</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ignore_keys<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]</span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.prediction_step.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.prediction_step.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<code>nn.Module</code>) &#x2014; The model to evaluate.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.prediction_step.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.prediction_step.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>Dict[str, Union[torch.Tensor, Any]]</code>) &#x2014; The inputs and targets of the model.</p> <p>The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument <code>labels</code>. Check your model&#x2019;s documentation for all accepted arguments.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.prediction_step.prediction_loss_only" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.prediction_step.prediction_loss_only"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>prediction_loss_only</strong> (<code>bool</code>) &#x2014; Whether or not to return the loss only.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.prediction_step.ignore_keys" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.prediction_step.ignore_keys"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ignore_keys</strong> (<code>Lst[str]</code>, <em>optional</em>) &#x2014; A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.Trainer.prediction_step.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]</p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A tuple with the loss, logits and labels (each being optional).</p> <!-- HTML_TAG_END --></p> </div></div> <p>Perform an evaluation step on <code>model</code> using <code>inputs</code>.</p> <p>Subclass and override to inject custom behavior.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.push_to_hub"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>push_to_hub</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.push_to_hub" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.push_to_hub"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L3390" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">commit_message<span class="opacity-60">: typing.Optional[str] = &#39;End of training&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">blocking<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.push_to_hub.commit_message" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.push_to_hub.commit_message"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>commit_message</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;End of training&quot;</code>) &#x2014; Message to commit while pushing.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.push_to_hub.blocking" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.push_to_hub.blocking"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>blocking</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether the function should return only when the <code>git push</code> has finished. kwargs &#x2014; Additional keyword arguments passed along to <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.create_model_card">create_model_card()</a>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Upload <em>self.model</em> and <em>self.tokenizer</em> to the 🤗 model hub on the repo <em>self.args.hub_model_id</em>.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.remove_callback"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>remove_callback</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.remove_callback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.remove_callback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L691" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">callback<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.remove_callback.callback" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.remove_callback.callback"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>callback</strong> (<code>type</code> or <code>~transformer.TrainerCallback</code>) &#x2014; A <code>~transformer.TrainerCallback</code> class or an instance of a <code>~transformer.TrainerCallback</code>. In the first case, will remove the first member of that class found in the list of callbacks.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Remove a callback from the current list of <code>~transformer.TrainerCallback</code>.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.save_metrics"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_metrics</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.save_metrics" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.save_metrics"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_pt_utils.py#L964" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">split<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">metrics<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">combined<span class="opacity-60"> = True</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.save_metrics.split" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.save_metrics.split"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>split</strong> (<code>str</code>) &#x2014; Mode/split name: one of <code>train</code>, <code>eval</code>, <code>test</code>, <code>all</code><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.save_metrics.metrics" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.save_metrics.metrics"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>metrics</strong> (<code>Dict[str, float]</code>) &#x2014; The metrics returned from train/evaluate/predict<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.save_metrics.combined" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.save_metrics.combined"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>combined</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Creates combined metrics by updating <code>all_results.json</code> with metrics of this call<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Save metrics into a json file for that split, e.g. <code>train_results.json</code>.</p> <p>Under distributed environment this is done only for a process with rank 0.</p> <p>To understand the metrics please read the docstring of <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.log_metrics">log_metrics()</a>. The only difference is that raw unformatted numbers are saved in the current method.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.save_model"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_model</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.save_model" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.save_model"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L2559" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_dir<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">_internal_call<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Will save the model, so you can reload it using <code>from_pretrained()</code>.</p> <p>Will only save from the main process.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.save_state"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_state</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.save_state" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.save_state"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_pt_utils.py#L1002" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Saves the Trainer state, since Trainer.save_model saves only the tokenizer with the model</p> <p>Under distributed environment this is done only for a process with rank 0.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.torchdynamo_smart_context_manager"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>torchdynamo_smart_context_manager</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.torchdynamo_smart_context_manager" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.torchdynamo_smart_context_manager"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L2435" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A helper wrapper that creates an appropriate context manager for <code>torchdynamo</code>.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.train"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>train</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.train" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.train"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L1421" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">resume_from_checkpoint<span class="opacity-60">: typing.Union[str, bool, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">trial<span class="opacity-60">: typing.Union[ForwardRef(&#39;optuna.Trial&#39;), typing.Dict[str, typing.Any]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ignore_keys_for_eval<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.train.resume_from_checkpoint" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.train.resume_from_checkpoint"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>resume_from_checkpoint</strong> (<code>str</code> or <code>bool</code>, <em>optional</em>) &#x2014; If a <code>str</code>, local path to a saved checkpoint as saved by a previous instance of <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>. If a <code>bool</code> and equals <code>True</code>, load the last checkpoint in <em>args.output_dir</em> as saved by a previous instance of <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>. If present, training will resume from the model/optimizer/scheduler states loaded here.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.train.trial" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.train.trial"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>trial</strong> (<code>optuna.Trial</code> or <code>Dict[str, Any]</code>, <em>optional</em>) &#x2014; The trial run or the hyperparameter dictionary for hyperparameter search.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.train.ignore_keys_for_eval" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.train.ignore_keys_for_eval"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ignore_keys_for_eval</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions for evaluation during the training. kwargs &#x2014; Additional keyword arguments used to hide deprecated arguments<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Main training entry point.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.training_step"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>training_step</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.training_step" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.training_step"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer.py#L2460" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60">: Module</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Dict[str, typing.Union[torch.Tensor, typing.Any]]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>torch.Tensor</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.training_step.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.training_step.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<code>nn.Module</code>) &#x2014; The model to train.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.training_step.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.training_step.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>Dict[str, Union[torch.Tensor, Any]]</code>) &#x2014; The inputs and targets of the model.</p> <p>The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument <code>labels</code>. Check your model&#x2019;s documentation for all accepted arguments.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.Trainer.training_step.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>torch.Tensor</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The tensor with training loss on this batch.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Perform a training step on a batch of inputs.</p> <p>Subclass and override to inject custom behavior.</p></div></div> <h2 class="relative group"><a id="transformers.Seq2SeqTrainer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Seq2SeqTrainer </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Seq2SeqTrainer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Seq2SeqTrainer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Seq2SeqTrainer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Seq2SeqTrainer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_seq2seq.py#L30" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60">: typing.Union[transformers.modeling_utils.PreTrainedModel, torch.nn.modules.module.Module] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data_collator<span class="opacity-60">: typing.Optional[DataCollator] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train_dataset<span class="opacity-60">: typing.Optional[torch.utils.data.dataset.Dataset] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eval_dataset<span class="opacity-60">: typing.Optional[torch.utils.data.dataset.Dataset] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: typing.Optional[transformers.tokenization_utils_base.PreTrainedTokenizerBase] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model_init<span class="opacity-60">: typing.Callable[[], transformers.modeling_utils.PreTrainedModel] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">compute_metrics<span class="opacity-60">: typing.Union[typing.Callable[[transformers.trainer_utils.EvalPrediction], typing.Dict], NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">callbacks<span class="opacity-60">: typing.Optional[typing.List[transformers.trainer_callback.TrainerCallback]] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">optimizers<span class="opacity-60">: typing.Tuple[torch.optim.optimizer.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None)</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">preprocess_logits_for_metrics<span class="opacity-60">: typing.Callable[[torch.Tensor, torch.Tensor], torch.Tensor] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Seq2SeqTrainer.evaluate"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>evaluate</span></h4><!-- HTML_TAG_END --> <a id="transformers.Seq2SeqTrainer.evaluate" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Seq2SeqTrainer.evaluate"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_seq2seq.py#L31" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eval_dataset<span class="opacity-60">: typing.Optional[torch.utils.data.dataset.Dataset] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ignore_keys<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">metric_key_prefix<span class="opacity-60">: str = &#39;eval&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**gen_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainer.evaluate.eval_dataset" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainer.evaluate.eval_dataset"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eval_dataset</strong> (<code>Dataset</code>, <em>optional</em>) &#x2014; Pass a dataset if you wish to override <code>self.eval_dataset</code>. If it is an <a href="https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset" rel="nofollow">Dataset</a>, columns not accepted by the <code>model.forward()</code> method are automatically removed. It must implement the <code>__len__</code> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainer.evaluate.ignore_keys" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainer.evaluate.ignore_keys"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ignore_keys</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainer.evaluate.metric_key_prefix" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainer.evaluate.metric_key_prefix"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>metric_key_prefix</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;eval&quot;</code>) &#x2014; An optional prefix to be used as the metrics key prefix. For example the metrics &#x201C;bleu&#x201D; will be named &#x201C;eval_bleu&#x201D; if the prefix is <code>&quot;eval&quot;</code> (default)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainer.evaluate.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainer.evaluate.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum target length to use when predicting with the generate method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainer.evaluate.num_beams" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainer.evaluate.num_beams"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_beams</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of beams for beam search that will be used when predicting with the generate method. 1 means no beam search. gen_kwargs &#x2014; Additional <code>generate</code> specific kwargs.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Run evaluation and returns metrics.</p> <p>The calling script will be responsible for providing a method to compute metrics, as they are task-dependent (pass it to the init <code>compute_metrics</code> argument).</p> <p>You can also subclass and override this method to inject custom behavior.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Seq2SeqTrainer.predict"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>predict</span></h4><!-- HTML_TAG_END --> <a id="transformers.Seq2SeqTrainer.predict" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Seq2SeqTrainer.predict"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_seq2seq.py#L80" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">test_dataset<span class="opacity-60">: Dataset</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ignore_keys<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">metric_key_prefix<span class="opacity-60">: str = &#39;test&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**gen_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainer.predict.test_dataset" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainer.predict.test_dataset"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>test_dataset</strong> (<code>Dataset</code>) &#x2014; Dataset to run the predictions on. If it is a <a href="https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset" rel="nofollow">Dataset</a>, columns not accepted by the <code>model.forward()</code> method are automatically removed. Has to implement the method <code>__len__</code><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainer.predict.ignore_keys" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainer.predict.ignore_keys"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ignore_keys</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainer.predict.metric_key_prefix" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainer.predict.metric_key_prefix"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>metric_key_prefix</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;eval&quot;</code>) &#x2014; An optional prefix to be used as the metrics key prefix. For example the metrics &#x201C;bleu&#x201D; will be named &#x201C;eval_bleu&#x201D; if the prefix is <code>&quot;eval&quot;</code> (default)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainer.predict.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainer.predict.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum target length to use when predicting with the generate method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainer.predict.num_beams" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainer.predict.num_beams"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_beams</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of beams for beam search that will be used when predicting with the generate method. 1 means no beam search. gen_kwargs &#x2014; Additional <code>generate</code> specific kwargs.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Run prediction and returns predictions and potential metrics.</p> <p>Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in <code>evaluate()</code>.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If your predictions or labels have different sequence lengths (for instance because you’re doing dynamic padding in a token classification task) the predictions will be padded (on the right) to allow for concatenation into one array. The padding index is -100.</p></div> <p>Returns: <em>NamedTuple</em> A namedtuple with the following keys:</p> <ul><li>predictions (<code>np.ndarray</code>): The predictions on <code>test_dataset</code>.</li> <li>label_ids (<code>np.ndarray</code>, <em>optional</em>): The labels (if the dataset contained some).</li> <li>metrics (<code>Dict[str, float]</code>, <em>optional</em>): The potential dictionary of metrics (if the dataset contained labels).</li></ul></div></div> <h2 class="relative group"><a id="transformers.TrainingArguments" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TrainingArguments </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainingArguments"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TrainingArguments</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TrainingArguments" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainingArguments"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/training_args.py#L121" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_dir<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">overwrite_output_dir<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_eval<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_predict<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">evaluation_strategy<span class="opacity-60">: typing.Union[transformers.trainer_utils.IntervalStrategy, str] = &#39;no&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">prediction_loss_only<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">per_device_train_batch_size<span class="opacity-60">: int = 8</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">per_device_eval_batch_size<span class="opacity-60">: int = 8</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">per_gpu_train_batch_size<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">per_gpu_eval_batch_size<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">gradient_accumulation_steps<span class="opacity-60">: int = 1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eval_accumulation_steps<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eval_delay<span class="opacity-60">: typing.Optional[float] = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">learning_rate<span class="opacity-60">: float = 5e-05</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">weight_decay<span class="opacity-60">: float = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">adam_beta1<span class="opacity-60">: float = 0.9</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">adam_beta2<span class="opacity-60">: float = 0.999</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">adam_epsilon<span class="opacity-60">: float = 1e-08</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_grad_norm<span class="opacity-60">: float = 1.0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_train_epochs<span class="opacity-60">: float = 3.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_steps<span class="opacity-60">: int = -1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">lr_scheduler_type<span class="opacity-60">: typing.Union[transformers.trainer_utils.SchedulerType, str] = &#39;linear&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">warmup_ratio<span class="opacity-60">: float = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">warmup_steps<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">log_level<span class="opacity-60">: typing.Optional[str] = &#39;passive&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">log_level_replica<span class="opacity-60">: typing.Optional[str] = &#39;passive&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">log_on_each_node<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logging_dir<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logging_strategy<span class="opacity-60">: typing.Union[transformers.trainer_utils.IntervalStrategy, str] = &#39;steps&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logging_first_step<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logging_steps<span class="opacity-60">: int = 500</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logging_nan_inf_filter<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_strategy<span class="opacity-60">: typing.Union[transformers.trainer_utils.IntervalStrategy, str] = &#39;steps&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_steps<span class="opacity-60">: int = 500</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_total_limit<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_on_each_node<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">no_cuda<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_mps_device<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int = 42</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data_seed<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">jit_mode_eval<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_ipex<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bf16<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fp16<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fp16_opt_level<span class="opacity-60">: str = &#39;O1&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">half_precision_backend<span class="opacity-60">: str = &#39;auto&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bf16_full_eval<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fp16_full_eval<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tf32<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">local_rank<span class="opacity-60">: int = -1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">xpu_backend<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tpu_num_cores<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tpu_metrics_debug<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">debug<span class="opacity-60">: str = &#39;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dataloader_drop_last<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eval_steps<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dataloader_num_workers<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_index<span class="opacity-60">: int = -1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">run_name<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">disable_tqdm<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">remove_unused_columns<span class="opacity-60">: typing.Optional[bool] = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">label_names<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">load_best_model_at_end<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">metric_for_best_model<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">greater_is_better<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ignore_data_skip<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sharded_ddp<span class="opacity-60">: str = &#39;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fsdp<span class="opacity-60">: str = &#39;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fsdp_min_num_params<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fsdp_transformer_layer_cls_to_wrap<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">deepspeed<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">label_smoothing_factor<span class="opacity-60">: float = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">optim<span class="opacity-60">: typing.Union[transformers.training_args.OptimizerNames, str] = &#39;adamw_hf&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">adafactor<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">group_by_length<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">length_column_name<span class="opacity-60">: typing.Optional[str] = &#39;length&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">report_to<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ddp_find_unused_parameters<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ddp_bucket_cap_mb<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dataloader_pin_memory<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">skip_memory_metrics<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_legacy_prediction_loop<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">resume_from_checkpoint<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hub_model_id<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hub_strategy<span class="opacity-60">: typing.Union[transformers.trainer_utils.HubStrategy, str] = &#39;every_save&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hub_token<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hub_private_repo<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">gradient_checkpointing<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">include_inputs_for_metrics<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fp16_backend<span class="opacity-60">: str = &#39;auto&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub_model_id<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub_organization<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub_token<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mp_parameters<span class="opacity-60">: str = &#39;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">auto_find_batch_size<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">full_determinism<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">torchdynamo<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ray_scope<span class="opacity-60">: typing.Optional[str] = &#39;last&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ddp_timeout<span class="opacity-60">: typing.Optional[int] = 1800</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.output_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.output_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_dir</strong> (<code>str</code>) &#x2014; The output directory where the model predictions and checkpoints will be written.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.overwrite_output_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.overwrite_output_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>overwrite_output_dir</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If <code>True</code>, overwrite the content of the output directory. Use this to continue training if <code>output_dir</code> points to a checkpoint directory.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.do_train" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.do_train"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_train</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to run training or not. This argument is not directly used by <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/main/examples" rel="nofollow">example scripts</a> for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.do_eval" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.do_eval"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_eval</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to run evaluation on the validation set or not. Will be set to <code>True</code> if <code>evaluation_strategy</code> is different from <code>&quot;no&quot;</code>. This argument is not directly used by <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/main/examples" rel="nofollow">example scripts</a> for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.do_predict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.do_predict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_predict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to run predictions on the test set or not. This argument is not directly used by <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/main/examples" rel="nofollow">example scripts</a> for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.evaluation_strategy" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.evaluation_strategy"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>evaluation_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/trainer_utils#transformers.IntervalStrategy">IntervalStrategy</a>, <em>optional</em>, defaults to <code>&quot;no&quot;</code>) &#x2014; The evaluation strategy to adopt during training. Possible values are:</p> <ul> <li><code>&quot;no&quot;</code>: No evaluation is done during training.</li> <li><code>&quot;steps&quot;</code>: Evaluation is done (and logged) every <code>eval_steps</code>.</li> <li><code>&quot;epoch&quot;</code>: Evaluation is done at the end of each epoch.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.prediction_loss_only" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.prediction_loss_only"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>prediction_loss_only</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; When performing evaluation and generating predictions, only returns the loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.per_device_train_batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.per_device_train_batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>per_device_train_batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; The batch size per GPU/TPU core/CPU for training.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.per_device_eval_batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.per_device_eval_batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>per_device_eval_batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; The batch size per GPU/TPU core/CPU for evaluation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.gradient_accumulation_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.gradient_accumulation_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>gradient_accumulation_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of updates steps to accumulate the gradients for, before performing a backward/update pass.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>When using gradient accumulation, one step is counted as one step with backward pass. Therefore, logging, evaluation, save will be conducted every <code>gradient_accumulation_steps * xxx_step</code> training examples.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.eval_accumulation_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.eval_accumulation_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eval_accumulation_steps</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of predictions steps to accumulate the output tensors for, before moving the results to the CPU. If left unset, the whole predictions are accumulated on GPU/TPU before being moved to the CPU (faster but requires more memory).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.eval_delay" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.eval_delay"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eval_delay</strong> (<code>float</code>, <em>optional</em>) &#x2014; Number of epochs or steps to wait for before the first evaluation can be performed, depending on the evaluation_strategy.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.learning_rate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.learning_rate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>learning_rate</strong> (<code>float</code>, <em>optional</em>, defaults to 5e-5) &#x2014; The initial learning rate for <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.weight_decay" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.weight_decay"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>weight_decay</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The weight decay to apply (if not zero) to all layers except all bias and LayerNorm weights in <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.adam_beta1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.adam_beta1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>adam_beta1</strong> (<code>float</code>, <em>optional</em>, defaults to 0.9) &#x2014; The beta1 hyperparameter for the <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.adam_beta2" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.adam_beta2"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>adam_beta2</strong> (<code>float</code>, <em>optional</em>, defaults to 0.999) &#x2014; The beta2 hyperparameter for the <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.adam_epsilon" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.adam_epsilon"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>adam_epsilon</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-8) &#x2014; The epsilon hyperparameter for the <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.max_grad_norm" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.max_grad_norm"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_grad_norm</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Maximum gradient norm (for gradient clipping).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.num_train_epochs(float," class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.num_train_epochs(float,"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_train_epochs(<code>float</code>,</strong> <em>optional</em>, defaults to 3.0) &#x2014; Total number of training epochs to perform (if not an integer, will perform the decimal part percents of the last epoch before stopping training).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.max_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.max_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_steps</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; If set to a positive number, the total number of training steps to perform. Overrides <code>num_train_epochs</code>. In case of using a finite iterable dataset the training may stop before reaching the set number of steps when all data is exhausted<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.lr_scheduler_type" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.lr_scheduler_type"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>lr_scheduler_type</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.SchedulerType">SchedulerType</a>, <em>optional</em>, defaults to <code>&quot;linear&quot;</code>) &#x2014; The scheduler type to use. See the documentation of <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.SchedulerType">SchedulerType</a> for all possible values.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.warmup_ratio" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.warmup_ratio"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>warmup_ratio</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; Ratio of total training steps used for a linear warmup from 0 to <code>learning_rate</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.warmup_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.warmup_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>warmup_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Number of steps used for a linear warmup from 0 to <code>learning_rate</code>. Overrides any effect of <code>warmup_ratio</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.log_level" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.log_level"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>log_level</strong> (<code>str</code>, <em>optional</em>, defaults to <code>passive</code>) &#x2014; Logger log level to use on the main process. Possible choices are the log levels as strings: &#x2018;debug&#x2019;, &#x2018;info&#x2019;, &#x2018;warning&#x2019;, &#x2018;error&#x2019; and &#x2018;critical&#x2019;, plus a &#x2018;passive&#x2019; level which doesn&#x2019;t set anything and lets the application set the level.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.log_level_replica" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.log_level_replica"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>log_level_replica</strong> (<code>str</code>, <em>optional</em>, defaults to <code>passive</code>) &#x2014; Logger log level to use on replicas. Same choices as <code>log_level</code>&#x201D;<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.log_on_each_node" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.log_on_each_node"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>log_on_each_node</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; In multinode distributed training, whether to log using <code>log_level</code> once per node, or only on the main node.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.logging_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.logging_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logging_dir</strong> (<code>str</code>, <em>optional</em>) &#x2014; <a href="https://www.tensorflow.org/tensorboard" rel="nofollow">TensorBoard</a> log directory. Will default to *output_dir/runs/<strong>CURRENT_DATETIME_HOSTNAME*</strong>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.logging_strategy" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.logging_strategy"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logging_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/trainer_utils#transformers.IntervalStrategy">IntervalStrategy</a>, <em>optional</em>, defaults to <code>&quot;steps&quot;</code>) &#x2014; The logging strategy to adopt during training. Possible values are:</p> <ul> <li><code>&quot;no&quot;</code>: No logging is done during training.</li> <li><code>&quot;epoch&quot;</code>: Logging is done at the end of each epoch.</li> <li><code>&quot;steps&quot;</code>: Logging is done every <code>logging_steps</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.logging_first_step" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.logging_first_step"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logging_first_step</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to log and evaluate the first <code>global_step</code> or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.logging_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.logging_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logging_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 500) &#x2014; Number of update steps between two logs if <code>logging_strategy=&quot;steps&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.logging_nan_inf_filter" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.logging_nan_inf_filter"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logging_nan_inf_filter</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to filter <code>nan</code> and <code>inf</code> losses for logging. If set to <code>True</code> the loss of every step that is <code>nan</code> or <code>inf</code> is filtered and the average loss of the current logging window is taken instead.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p><code>logging_nan_inf_filter</code> only influences the logging of loss values, it does not change the behavior the gradient is computed or applied to the model.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.save_strategy" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.save_strategy"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/trainer_utils#transformers.IntervalStrategy">IntervalStrategy</a>, <em>optional</em>, defaults to <code>&quot;steps&quot;</code>) &#x2014; The checkpoint save strategy to adopt during training. Possible values are:</p> <ul> <li><code>&quot;no&quot;</code>: No save is done during training.</li> <li><code>&quot;epoch&quot;</code>: Save is done at the end of each epoch.</li> <li><code>&quot;steps&quot;</code>: Save is done every <code>save_steps</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.save_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.save_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 500) &#x2014; Number of updates steps before two checkpoint saves if <code>save_strategy=&quot;steps&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.save_total_limit" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.save_total_limit"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_total_limit</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in <code>output_dir</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.save_on_each_node" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.save_on_each_node"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_on_each_node</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; When doing multi-node distributed training, whether to save models and checkpoints on each node, or only on the main one.</p> <p>This should not be activated when the different nodes use the same storage as the files will be saved with the same names for each node.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.no_cuda" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.no_cuda"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>no_cuda</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to not use CUDA even when it is available or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.seed" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.seed"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>seed</strong> (<code>int</code>, <em>optional</em>, defaults to 42) &#x2014; Random seed that will be set at the beginning of training. To ensure reproducibility across runs, use the <code>~Trainer.model_init</code> function to instantiate the model if it has some randomly initialized parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.data_seed" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.data_seed"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>data_seed</strong> (<code>int</code>, <em>optional</em>) &#x2014; Random seed to be used with data samplers. If not set, random generators for data sampling will use the same seed as <code>seed</code>. This can be used to ensure reproducibility of data sampling, independent of the model seed.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.jit_mode_eval" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.jit_mode_eval"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>jit_mode_eval</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use PyTorch jit trace for inference.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.use_ipex" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.use_ipex"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_ipex</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Use Intel extension for PyTorch when it is available. <a href="https://github.com/intel/intel-extension-for-pytorch" rel="nofollow">IPEX installation</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.bf16" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.bf16"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bf16</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use bf16 16-bit (mixed) precision training instead of 32-bit training. Requires Ampere or higher NVIDIA architecture or using CPU (no_cuda). This is an experimental API and it may change.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.fp16" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.fp16"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>fp16</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use fp16 16-bit (mixed) precision training instead of 32-bit training.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.fp16_opt_level" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.fp16_opt_level"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>fp16_opt_level</strong> (<code>str</code>, <em>optional</em>, defaults to &#x2018;O1&#x2019;) &#x2014; For <code>fp16</code> training, Apex AMP optimization level selected in [&#x2018;O0&#x2019;, &#x2018;O1&#x2019;, &#x2018;O2&#x2019;, and &#x2018;O3&#x2019;]. See details on the <a href="https://nvidia.github.io/apex/amp" rel="nofollow">Apex documentation</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.fp16_backend" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.fp16_backend"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>fp16_backend</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;auto&quot;</code>) &#x2014; This argument is deprecated. Use <code>half_precision_backend</code> instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.half_precision_backend" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.half_precision_backend"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>half_precision_backend</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;auto&quot;</code>) &#x2014; The backend to use for mixed precision training. Must be one of <code>&quot;auto&quot;, &quot;cuda_amp&quot;, &quot;apex&quot;, &quot;cpu_amp&quot;</code>. <code>&quot;auto&quot;</code> will use CPU/CUDA AMP or APEX depending on the PyTorch version detected, while the other choices will force the requested backend.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.bf16_full_eval" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.bf16_full_eval"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bf16_full_eval</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use full bfloat16 evaluation instead of 32-bit. This will be faster and save memory but can harm metric values. This is an experimental API and it may change.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.fp16_full_eval" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.fp16_full_eval"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>fp16_full_eval</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use full float16 evaluation instead of 32-bit. This will be faster and save memory but can harm metric values.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.tf32" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.tf32"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tf32</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to enable the TF32 mode, available in Ampere and newer GPU architectures. The default value depends on PyTorch&#x2019;s version default of <code>torch.backends.cuda.matmul.allow_tf32</code>. For more details please refer to the <a href="https://huggingface.co/docs/transformers/performance#tf32" rel="nofollow">TF32</a> documentation. This is an experimental API and it may change.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.local_rank" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.local_rank"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>local_rank</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Rank of the process during distributed training.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.xpu_backend" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.xpu_backend"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>xpu_backend</strong> (<code>str</code>, <em>optional</em>) &#x2014; The backend to use for xpu distributed training. Must be one of <code>&quot;mpi&quot;</code> or <code>&quot;ccl&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.tpu_num_cores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.tpu_num_cores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tpu_num_cores</strong> (<code>int</code>, <em>optional</em>) &#x2014; When training on TPU, the number of TPU cores (automatically passed by launcher script).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.dataloader_drop_last" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.dataloader_drop_last"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dataloader_drop_last</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size) or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.eval_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.eval_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eval_steps</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of update steps between two evaluations if <code>evaluation_strategy=&quot;steps&quot;</code>. Will default to the same value as <code>logging_steps</code> if not set.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.dataloader_num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.dataloader_num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dataloader_num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the main process.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.past_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.past_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_index</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Some models like <a href="../model_doc/transformerxl">TransformerXL</a> or <a href="../model_doc/xlnet">XLNet</a> can make use of the past hidden states for their predictions. If this argument is set to a positive int, the <code>Trainer</code> will use the corresponding output (usually index 2) as the past state and feed it to the model at the next training step under the keyword argument <code>mems</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.run_name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.run_name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>run_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; A descriptor for the run. Typically used for <a href="https://www.wandb.com/" rel="nofollow">wandb</a> and <a href="https://www.mlflow.org/" rel="nofollow">mlflow</a> logging.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.disable_tqdm" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.disable_tqdm"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>disable_tqdm</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to disable the tqdm progress bars and table of metrics produced by <code>~notebook.NotebookTrainingTracker</code> in Jupyter Notebooks. Will default to <code>True</code> if the logging level is set to warn or lower (default), <code>False</code> otherwise.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.remove_unused_columns" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.remove_unused_columns"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>remove_unused_columns</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to automatically remove the columns unused by the model forward method.</p> <p>(Note that this behavior is not implemented for <code>TFTrainer</code> yet.)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.label_names" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.label_names"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>label_names</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; The list of keys in your dictionary of inputs that correspond to the labels.</p> <p>Will eventually default to <code>[&quot;labels&quot;]</code> except if the model used is one of the <code>XxxForQuestionAnswering</code> in which case it will default to <code>[&quot;start_positions&quot;, &quot;end_positions&quot;]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.load_best_model_at_end" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.load_best_model_at_end"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>load_best_model_at_end</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to load the best model found during training at the end of training.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When set to <code>True</code>, the parameters <code>save_strategy</code> needs to be the same as <code>evaluation_strategy</code>, and in the case it is &#x201C;steps&#x201D;, <code>save_steps</code> must be a round multiple of <code>eval_steps</code>.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.metric_for_best_model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.metric_for_best_model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>metric_for_best_model</strong> (<code>str</code>, <em>optional</em>) &#x2014; Use in conjunction with <code>load_best_model_at_end</code> to specify the metric to use to compare two different models. Must be the name of a metric returned by the evaluation with or without the prefix <code>&quot;eval_&quot;</code>. Will default to <code>&quot;loss&quot;</code> if unspecified and <code>load_best_model_at_end=True</code> (to use the evaluation loss).</p> <p>If you set this value, <code>greater_is_better</code> will default to <code>True</code>. Don&#x2019;t forget to set it to <code>False</code> if your metric is better when lower.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.greater_is_better" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.greater_is_better"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>greater_is_better</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Use in conjunction with <code>load_best_model_at_end</code> and <code>metric_for_best_model</code> to specify if better models should have a greater metric or not. Will default to:</p> <ul> <li><code>True</code> if <code>metric_for_best_model</code> is set to a value that isn&#x2019;t <code>&quot;loss&quot;</code> or <code>&quot;eval_loss&quot;</code>.</li> <li><code>False</code> if <code>metric_for_best_model</code> is not set, or set to <code>&quot;loss&quot;</code> or <code>&quot;eval_loss&quot;</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.ignore_data_skip" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.ignore_data_skip"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ignore_data_skip</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; When resuming training, whether or not to skip the epochs and batches to get the data loading at the same stage as in the previous training. If set to <code>True</code>, the training will begin faster (as that skipping step can take a long time) but will not yield the same results as the interrupted training would have.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.sharded_ddp" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.sharded_ddp"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sharded_ddp</strong> (<code>bool</code>, <code>str</code> or list of <code>ShardedDDPOption</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Use Sharded DDP training from <a href="https://github.com/facebookresearch/fairscale" rel="nofollow">FairScale</a> (in distributed training only). This is an experimental feature.</p> <p>A list of options along the following:</p> <ul> <li><code>&quot;simple&quot;</code>: to use first instance of sharded DDP released by fairscale (<code>ShardedDDP</code>) similar to ZeRO-2.</li> <li><code>&quot;zero_dp_2&quot;</code>: to use the second instance of sharded DPP released by fairscale (<code>FullyShardedDDP</code>) in Zero-2 mode (with <code>reshard_after_forward=False</code>).</li> <li><code>&quot;zero_dp_3&quot;</code>: to use the second instance of sharded DPP released by fairscale (<code>FullyShardedDDP</code>) in Zero-3 mode (with <code>reshard_after_forward=True</code>).</li> <li><code>&quot;offload&quot;</code>: to add ZeRO-offload (only compatible with <code>&quot;zero_dp_2&quot;</code> and <code>&quot;zero_dp_3&quot;</code>).</li> </ul> <p>If a string is passed, it will be split on space. If a bool is passed, it will be converted to an empty list for <code>False</code> and <code>[&quot;simple&quot;]</code> for <code>True</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.fsdp" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.fsdp"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>fsdp</strong> (<code>bool</code>, <code>str</code> or list of <code>FSDPOption</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Use PyTorch Distributed Parallel Training (in distributed training only).</p> <p>A list of options along the following:</p> <ul> <li><code>&quot;full_shard&quot;</code>: Shard parameters, gradients and optimizer states.</li> <li><code>&quot;shard_grad_op&quot;</code>: Shard optimizer states and gradients.</li> <li><code>&quot;offload&quot;</code>: Offload parameters and gradients to CPUs (only compatible with <code>&quot;full_shard&quot;</code> and <code>&quot;shard_grad_op&quot;</code>).</li> <li><code>&quot;auto_wrap&quot;</code>: Automatically recursively wrap layers with FSDP using <code>default_auto_wrap_policy</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.fsdp_min_num_params" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.fsdp_min_num_params"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>fsdp_min_num_params</strong> (<code>int</code>, <em>optional</em>, defaults to <code>0</code>) &#x2014; FSDP&#x2019;s minimum number of parameters for Default Auto Wrapping. (useful only when <code>fsdp</code> field is passed).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.deepspeed" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.deepspeed"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>deepspeed</strong> (<code>str</code> or <code>dict</code>, <em>optional</em>) &#x2014; Use <a href="https://github.com/microsoft/deepspeed" rel="nofollow">Deepspeed</a>. This is an experimental feature and its API may evolve in the future. The value is either the location of DeepSpeed json config file (e.g., <code>ds_config.json</code>) or an already loaded json file as a <code>dict</code>&#x201D;<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.label_smoothing_factor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.label_smoothing_factor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>label_smoothing_factor</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The label smoothing factor to use. Zero means no label smoothing, otherwise the underlying onehot-encoded labels are changed from 0s and 1s to <code>label_smoothing_factor/num_labels</code> and <code>1 - label_smoothing_factor + label_smoothing_factor/num_labels</code> respectively.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.debug" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.debug"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>debug</strong> (<code>str</code> or list of <code>DebugOption</code>, <em>optional</em>, defaults to <code>&quot;&quot;</code>) &#x2014; Enable one or more debug features. This is an experimental feature.</p> <p>Possible options are:</p> <ul> <li><code>&quot;underflow_overflow&quot;</code>: detects overflow in model&#x2019;s input/outputs and reports the last frames that led to the event</li> <li><code>&quot;tpu_metrics_debug&quot;</code>: print debug metrics on TPU</li> </ul> <p>The options should be separated by whitespaces.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.optim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.optim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>optim</strong> (<code>str</code> or <code>training_args.OptimizerNames</code>, <em>optional</em>, defaults to <code>&quot;adamw_hf&quot;</code>) &#x2014; The optimizer to use: adamw_hf, adamw_torch, adamw_apex_fused, or adafactor.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.adafactor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.adafactor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>adafactor</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; This argument is deprecated. Use <code>--optim adafactor</code> instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.group_by_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.group_by_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>group_by_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to group together samples of roughly the same length in the training dataset (to minimize padding applied and be more efficient). Only useful if applying dynamic padding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.length_column_name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.length_column_name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>length_column_name</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;length&quot;</code>) &#x2014; Column name for precomputed lengths. If the column exists, grouping by length will use these values rather than computing them on train startup. Ignored unless <code>group_by_length</code> is <code>True</code> and the dataset is an instance of <code>Dataset</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.report_to" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.report_to"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>report_to</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>, defaults to <code>&quot;all&quot;</code>) &#x2014; The list of integrations to report the results and logs to. Supported platforms are <code>&quot;azure_ml&quot;</code>, <code>&quot;comet_ml&quot;</code>, <code>&quot;mlflow&quot;</code>, <code>&quot;neptune&quot;</code>, <code>&quot;tensorboard&quot;</code> and <code>&quot;wandb&quot;</code>. Use <code>&quot;all&quot;</code> to report to all integrations installed, <code>&quot;none&quot;</code> for no integrations.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.ddp_find_unused_parameters" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.ddp_find_unused_parameters"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ddp_find_unused_parameters</strong> (<code>bool</code>, <em>optional</em>) &#x2014; When using distributed training, the value of the flag <code>find_unused_parameters</code> passed to <code>DistributedDataParallel</code>. Will default to <code>False</code> if gradient checkpointing is used, <code>True</code> otherwise.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.ddp_bucket_cap_mb" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.ddp_bucket_cap_mb"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ddp_bucket_cap_mb</strong> (<code>int</code>, <em>optional</em>) &#x2014; When using distributed training, the value of the flag <code>bucket_cap_mb</code> passed to <code>DistributedDataParallel</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.dataloader_pin_memory" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.dataloader_pin_memory"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dataloader_pin_memory</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether you want to pin memory in data loaders or not. Will default to <code>True</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.skip_memory_metrics" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.skip_memory_metrics"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>skip_memory_metrics</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to skip adding of memory profiler reports to metrics. This is skipped by default because it slows down the training and evaluation speed.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.push_to_hub" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.push_to_hub"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push the model to the Hub every time the model is saved. If this is activated, <code>output_dir</code> will begin a git directory synced with the repo (determined by <code>hub_model_id</code>) and the content will be pushed each time a save is triggered (depending on your <code>save_strategy</code>). Calling <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.save_model">save_model()</a> will also trigger a push.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>If <code>output_dir</code> exists, it needs to be a local clone of the repository to which the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> will be pushed.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.resume_from_checkpoint" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.resume_from_checkpoint"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>resume_from_checkpoint</strong> (<code>str</code>, <em>optional</em>) &#x2014; The path to a folder with a valid checkpoint for your model. This argument is not directly used by <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/main/examples" rel="nofollow">example scripts</a> for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.hub_model_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.hub_model_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hub_model_id</strong> (<code>str</code>, <em>optional</em>) &#x2014; The name of the repository to keep in sync with the local <em>output_dir</em>. It can be a simple model ID in which case the model will be pushed in your namespace. Otherwise it should be the whole repository name, for instance <code>&quot;user_name/model&quot;</code>, which allows you to push to an organization you are a member of with <code>&quot;organization_name/model&quot;</code>. Will default to <code>user_name/output_dir_name</code> with <em>output_dir_name</em> being the name of <code>output_dir</code>.</p> <p>Will default to the name of <code>output_dir</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.hub_strategy" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.hub_strategy"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hub_strategy</strong> (<code>str</code> or <code>HubStrategy</code>, <em>optional</em>, defaults to <code>&quot;every_save&quot;</code>) &#x2014; Defines the scope of what is pushed to the Hub and when. Possible values are:</p> <ul> <li><code>&quot;end&quot;</code>: push the model, its configuration, the tokenizer (if passed along to the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>) and a draft of a model card when the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.save_model">save_model()</a> method is called.</li> <li><code>&quot;every_save&quot;</code>: push the model, its configuration, the tokenizer (if passed along to the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>) and a draft of a model card each time there is a model save. The pushes are asynchronous to not block training, and in case the save are very frequent, a new push is only attempted if the previous one is finished. A last push is made with the final model at the end of training.</li> <li><code>&quot;checkpoint&quot;</code>: like <code>&quot;every_save&quot;</code> but the latest checkpoint is also pushed in a subfolder named last-checkpoint, allowing you to resume training easily with <code>trainer.train(resume_from_checkpoint=&quot;last-checkpoint&quot;)</code>.</li> <li><code>&quot;all_checkpoints&quot;</code>: like <code>&quot;checkpoint&quot;</code> but all checkpoints are pushed like they appear in the output folder (so you will get one checkpoint folder per folder in your final repository)</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.hub_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.hub_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hub_token</strong> (<code>str</code>, <em>optional</em>) &#x2014; The token to use to push the model to the Hub. Will default to the token in the cache folder obtained with <code>huggingface-cli login</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.hub_private_repo" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.hub_private_repo"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hub_private_repo</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If True, the Hub repo will be set to private.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.gradient_checkpointing" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.gradient_checkpointing"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>gradient_checkpointing</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If True, use gradient checkpointing to save memory at the expense of slower backward pass.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.include_inputs_for_metrics" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.include_inputs_for_metrics"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>include_inputs_for_metrics</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the inputs will be passed to the <code>compute_metrics</code> function. This is intended for metrics that need inputs, predictions and references for scoring calculation in Metric class.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.auto_find_batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.auto_find_batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>auto_find_batch_size</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to find a batch size that will fit into memory automatically through exponential decay, avoiding CUDA Out-of-Memory errors. Requires accelerate to be installed (<code>pip install accelerate</code>)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.full_determinism" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.full_determinism"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>full_determinism</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If <code>True</code>, <a href="/docs/transformers/pr_19429/en/internal/trainer_utils#transformers.enable_full_determinism">enable_full_determinism()</a> is called instead of <a href="/docs/transformers/pr_19429/en/internal/trainer_utils#transformers.set_seed">set_seed()</a> to ensure reproducible results in distributed training<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.torchdynamo" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.torchdynamo"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>torchdynamo</strong> (<code>str</code>, <em>optional</em>) &#x2014; The token that is used to set the backend compiler for TorchDynamo. Possible choices are [&#x201C;eager&#x201D;, &#x201C;nvfuser]. This is an experimental API and subject to change.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.ray_scope" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.ray_scope"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ray_scope</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;last&quot;</code>) &#x2014; The scope to use when doing hyperparameter search with Ray. By default, <code>&quot;last&quot;</code> will be used. Ray will then use the last checkpoint of all trials, compare those, and select the best one. However, other options are also available. See the <a href="https://docs.ray.io/en/latest/tune/api_docs/analysis.html#ray.tune.ExperimentAnalysis.get_best_trial" rel="nofollow">Ray documentation</a> for more options.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.ddp_timeout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.ddp_timeout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ddp_timeout</strong> (<code>int</code>, <em>optional</em>, defaults to 1800) &#x2014; The timeout for <code>torch.distributed.init_process_group</code> calls, used to avoid GPU socket timeouts when performing slow operations in distributed runnings. Please refer the [PyTorch documentation] (<a href="https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group" rel="nofollow">https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group</a>) for more information.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.use_mps_device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.use_mps_device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_mps_device</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use Apple Silicon chip based <code>mps</code> device.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>TrainingArguments is the subset of the arguments we use in our example scripts <strong>which relate to the training loop itself</strong>.</p> <p>Using <a href="/docs/transformers/pr_19429/en/internal/trainer_utils#transformers.HfArgumentParser">HfArgumentParser</a> we can turn this class into <a href="https://docs.python.org/3/library/argparse#module-argparse" rel="nofollow">argparse</a> arguments that can be specified on the command line.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainingArguments.get_process_log_level"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_process_log_level</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainingArguments.get_process_log_level" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainingArguments.get_process_log_level"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/training_args.py#L1590" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Returns the log level to be used depending on whether this process is the main process of node 0, main process of node non-0, or a non-main process.</p> <p>For the main process the log level defaults to <code>logging.INFO</code> unless overridden by <code>log_level</code> argument.</p> <p>For the replica processes the log level defaults to <code>logging.WARNING</code> unless overridden by <code>log_level_replica</code> argument.</p> <p>The choice between the main and replica process settings is made according to the return value of <code>should_log</code>.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainingArguments.get_warmup_steps"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_warmup_steps</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainingArguments.get_warmup_steps" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainingArguments.get_warmup_steps"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/training_args.py#L1680" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_training_steps<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Get number of steps used for a linear warmup.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainingArguments.main_process_first"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>main_process_first</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainingArguments.main_process_first" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainingArguments.main_process_first"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/training_args.py#L1625" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">local<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">desc<span class="opacity-60"> = &#39;work&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.main_process_first.local" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.main_process_first.local"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>local</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; if <code>True</code> first means process of rank 0 of each node if <code>False</code> first means process of rank 0 of node rank 0 In multi-node environment with a shared filesystem you most likely will want to use <code>local=False</code> so that only the main process of the first node will do the processing. If however, the filesystem is not shared, then the main process of each node will need to do the processing, which is the default behavior.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.main_process_first.desc" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.main_process_first.desc"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>desc</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;work&quot;</code>) &#x2014; a work description to be used in debug logs<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>A context manager for torch distributed environment where on needs to do something on the main process, while blocking replicas, and when it’s finished releasing the replicas.</p> <p>One such use is for <code>datasets</code>’s <code>map</code> feature which to be efficient should be run once on the main process, which upon completion saves a cached version of results and which then automatically gets loaded by the replicas.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainingArguments.to_dict"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_dict</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainingArguments.to_dict" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainingArguments.to_dict"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/training_args.py#L1689" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Serializes this instance while replace <code>Enum</code> by their values (for JSON serialization support). It obfuscates the token values by removing their value.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainingArguments.to_json_string"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_json_string</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainingArguments.to_json_string" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainingArguments.to_json_string"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/training_args.py#L1706" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Serializes this instance to a JSON string.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainingArguments.to_sanitized_dict"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_sanitized_dict</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainingArguments.to_sanitized_dict" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainingArguments.to_sanitized_dict"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/training_args.py#L1712" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Sanitized serialization to use with TensorBoard’s hparams</p></div></div> <h2 class="relative group"><a id="transformers.Seq2SeqTrainingArguments" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Seq2SeqTrainingArguments </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Seq2SeqTrainingArguments"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Seq2SeqTrainingArguments</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Seq2SeqTrainingArguments" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Seq2SeqTrainingArguments"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/training_args_seq2seq.py#L28" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_dir<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">overwrite_output_dir<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_eval<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_predict<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">evaluation_strategy<span class="opacity-60">: typing.Union[transformers.trainer_utils.IntervalStrategy, str] = &#39;no&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">prediction_loss_only<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">per_device_train_batch_size<span class="opacity-60">: int = 8</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">per_device_eval_batch_size<span class="opacity-60">: int = 8</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">per_gpu_train_batch_size<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">per_gpu_eval_batch_size<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">gradient_accumulation_steps<span class="opacity-60">: int = 1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eval_accumulation_steps<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eval_delay<span class="opacity-60">: typing.Optional[float] = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">learning_rate<span class="opacity-60">: float = 5e-05</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">weight_decay<span class="opacity-60">: float = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">adam_beta1<span class="opacity-60">: float = 0.9</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">adam_beta2<span class="opacity-60">: float = 0.999</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">adam_epsilon<span class="opacity-60">: float = 1e-08</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_grad_norm<span class="opacity-60">: float = 1.0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_train_epochs<span class="opacity-60">: float = 3.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_steps<span class="opacity-60">: int = -1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">lr_scheduler_type<span class="opacity-60">: typing.Union[transformers.trainer_utils.SchedulerType, str] = &#39;linear&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">warmup_ratio<span class="opacity-60">: float = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">warmup_steps<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">log_level<span class="opacity-60">: typing.Optional[str] = &#39;passive&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">log_level_replica<span class="opacity-60">: typing.Optional[str] = &#39;passive&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">log_on_each_node<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logging_dir<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logging_strategy<span class="opacity-60">: typing.Union[transformers.trainer_utils.IntervalStrategy, str] = &#39;steps&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logging_first_step<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logging_steps<span class="opacity-60">: int = 500</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logging_nan_inf_filter<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_strategy<span class="opacity-60">: typing.Union[transformers.trainer_utils.IntervalStrategy, str] = &#39;steps&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_steps<span class="opacity-60">: int = 500</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_total_limit<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_on_each_node<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">no_cuda<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_mps_device<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int = 42</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data_seed<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">jit_mode_eval<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_ipex<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bf16<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fp16<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fp16_opt_level<span class="opacity-60">: str = &#39;O1&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">half_precision_backend<span class="opacity-60">: str = &#39;auto&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bf16_full_eval<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fp16_full_eval<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tf32<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">local_rank<span class="opacity-60">: int = -1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">xpu_backend<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tpu_num_cores<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tpu_metrics_debug<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">debug<span class="opacity-60">: str = &#39;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dataloader_drop_last<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eval_steps<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dataloader_num_workers<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_index<span class="opacity-60">: int = -1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">run_name<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">disable_tqdm<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">remove_unused_columns<span class="opacity-60">: typing.Optional[bool] = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">label_names<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">load_best_model_at_end<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">metric_for_best_model<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">greater_is_better<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ignore_data_skip<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sharded_ddp<span class="opacity-60">: str = &#39;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fsdp<span class="opacity-60">: str = &#39;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fsdp_min_num_params<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fsdp_transformer_layer_cls_to_wrap<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">deepspeed<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">label_smoothing_factor<span class="opacity-60">: float = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">optim<span class="opacity-60">: typing.Union[transformers.training_args.OptimizerNames, str] = &#39;adamw_hf&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">adafactor<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">group_by_length<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">length_column_name<span class="opacity-60">: typing.Optional[str] = &#39;length&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">report_to<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ddp_find_unused_parameters<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ddp_bucket_cap_mb<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dataloader_pin_memory<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">skip_memory_metrics<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_legacy_prediction_loop<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">resume_from_checkpoint<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hub_model_id<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hub_strategy<span class="opacity-60">: typing.Union[transformers.trainer_utils.HubStrategy, str] = &#39;every_save&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hub_token<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hub_private_repo<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">gradient_checkpointing<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">include_inputs_for_metrics<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fp16_backend<span class="opacity-60">: str = &#39;auto&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub_model_id<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub_organization<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub_token<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mp_parameters<span class="opacity-60">: str = &#39;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">auto_find_batch_size<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">full_determinism<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">torchdynamo<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ray_scope<span class="opacity-60">: typing.Optional[str] = &#39;last&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ddp_timeout<span class="opacity-60">: typing.Optional[int] = 1800</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sortish_sampler<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">predict_with_generate<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">generation_max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">generation_num_beams<span class="opacity-60">: typing.Optional[int] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.output_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.output_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_dir</strong> (<code>str</code>) &#x2014; The output directory where the model predictions and checkpoints will be written.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.overwrite_output_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.overwrite_output_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>overwrite_output_dir</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If <code>True</code>, overwrite the content of the output directory. Use this to continue training if <code>output_dir</code> points to a checkpoint directory.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.do_train" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.do_train"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_train</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to run training or not. This argument is not directly used by <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/main/examples" rel="nofollow">example scripts</a> for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.do_eval" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.do_eval"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_eval</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to run evaluation on the validation set or not. Will be set to <code>True</code> if <code>evaluation_strategy</code> is different from <code>&quot;no&quot;</code>. This argument is not directly used by <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/main/examples" rel="nofollow">example scripts</a> for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.do_predict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.do_predict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_predict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to run predictions on the test set or not. This argument is not directly used by <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/main/examples" rel="nofollow">example scripts</a> for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.evaluation_strategy" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.evaluation_strategy"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>evaluation_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/trainer_utils#transformers.IntervalStrategy">IntervalStrategy</a>, <em>optional</em>, defaults to <code>&quot;no&quot;</code>) &#x2014; The evaluation strategy to adopt during training. Possible values are:</p> <ul> <li><code>&quot;no&quot;</code>: No evaluation is done during training.</li> <li><code>&quot;steps&quot;</code>: Evaluation is done (and logged) every <code>eval_steps</code>.</li> <li><code>&quot;epoch&quot;</code>: Evaluation is done at the end of each epoch.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.prediction_loss_only" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.prediction_loss_only"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>prediction_loss_only</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; When performing evaluation and generating predictions, only returns the loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.per_device_train_batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.per_device_train_batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>per_device_train_batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; The batch size per GPU/TPU core/CPU for training.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.per_device_eval_batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.per_device_eval_batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>per_device_eval_batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; The batch size per GPU/TPU core/CPU for evaluation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.gradient_accumulation_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.gradient_accumulation_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>gradient_accumulation_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of updates steps to accumulate the gradients for, before performing a backward/update pass.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>When using gradient accumulation, one step is counted as one step with backward pass. Therefore, logging, evaluation, save will be conducted every <code>gradient_accumulation_steps * xxx_step</code> training examples.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.eval_accumulation_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.eval_accumulation_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eval_accumulation_steps</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of predictions steps to accumulate the output tensors for, before moving the results to the CPU. If left unset, the whole predictions are accumulated on GPU/TPU before being moved to the CPU (faster but requires more memory).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.eval_delay" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.eval_delay"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eval_delay</strong> (<code>float</code>, <em>optional</em>) &#x2014; Number of epochs or steps to wait for before the first evaluation can be performed, depending on the evaluation_strategy.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.learning_rate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.learning_rate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>learning_rate</strong> (<code>float</code>, <em>optional</em>, defaults to 5e-5) &#x2014; The initial learning rate for <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.weight_decay" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.weight_decay"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>weight_decay</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The weight decay to apply (if not zero) to all layers except all bias and LayerNorm weights in <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.adam_beta1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.adam_beta1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>adam_beta1</strong> (<code>float</code>, <em>optional</em>, defaults to 0.9) &#x2014; The beta1 hyperparameter for the <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.adam_beta2" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.adam_beta2"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>adam_beta2</strong> (<code>float</code>, <em>optional</em>, defaults to 0.999) &#x2014; The beta2 hyperparameter for the <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.adam_epsilon" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.adam_epsilon"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>adam_epsilon</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-8) &#x2014; The epsilon hyperparameter for the <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.max_grad_norm" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.max_grad_norm"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_grad_norm</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Maximum gradient norm (for gradient clipping).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.num_train_epochs(float," class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.num_train_epochs(float,"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_train_epochs(<code>float</code>,</strong> <em>optional</em>, defaults to 3.0) &#x2014; Total number of training epochs to perform (if not an integer, will perform the decimal part percents of the last epoch before stopping training).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.max_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.max_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_steps</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; If set to a positive number, the total number of training steps to perform. Overrides <code>num_train_epochs</code>. In case of using a finite iterable dataset the training may stop before reaching the set number of steps when all data is exhausted<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.lr_scheduler_type" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.lr_scheduler_type"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>lr_scheduler_type</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.SchedulerType">SchedulerType</a>, <em>optional</em>, defaults to <code>&quot;linear&quot;</code>) &#x2014; The scheduler type to use. See the documentation of <a href="/docs/transformers/pr_19429/en/main_classes/optimizer_schedules#transformers.SchedulerType">SchedulerType</a> for all possible values.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.warmup_ratio" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.warmup_ratio"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>warmup_ratio</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; Ratio of total training steps used for a linear warmup from 0 to <code>learning_rate</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.warmup_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.warmup_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>warmup_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Number of steps used for a linear warmup from 0 to <code>learning_rate</code>. Overrides any effect of <code>warmup_ratio</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.log_level" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.log_level"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>log_level</strong> (<code>str</code>, <em>optional</em>, defaults to <code>passive</code>) &#x2014; Logger log level to use on the main process. Possible choices are the log levels as strings: &#x2018;debug&#x2019;, &#x2018;info&#x2019;, &#x2018;warning&#x2019;, &#x2018;error&#x2019; and &#x2018;critical&#x2019;, plus a &#x2018;passive&#x2019; level which doesn&#x2019;t set anything and lets the application set the level.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.log_level_replica" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.log_level_replica"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>log_level_replica</strong> (<code>str</code>, <em>optional</em>, defaults to <code>passive</code>) &#x2014; Logger log level to use on replicas. Same choices as <code>log_level</code>&#x201D;<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.log_on_each_node" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.log_on_each_node"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>log_on_each_node</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; In multinode distributed training, whether to log using <code>log_level</code> once per node, or only on the main node.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.logging_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.logging_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logging_dir</strong> (<code>str</code>, <em>optional</em>) &#x2014; <a href="https://www.tensorflow.org/tensorboard" rel="nofollow">TensorBoard</a> log directory. Will default to *output_dir/runs/<strong>CURRENT_DATETIME_HOSTNAME*</strong>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.logging_strategy" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.logging_strategy"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logging_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/trainer_utils#transformers.IntervalStrategy">IntervalStrategy</a>, <em>optional</em>, defaults to <code>&quot;steps&quot;</code>) &#x2014; The logging strategy to adopt during training. Possible values are:</p> <ul> <li><code>&quot;no&quot;</code>: No logging is done during training.</li> <li><code>&quot;epoch&quot;</code>: Logging is done at the end of each epoch.</li> <li><code>&quot;steps&quot;</code>: Logging is done every <code>logging_steps</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.logging_first_step" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.logging_first_step"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logging_first_step</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to log and evaluate the first <code>global_step</code> or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.logging_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.logging_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logging_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 500) &#x2014; Number of update steps between two logs if <code>logging_strategy=&quot;steps&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.logging_nan_inf_filter" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.logging_nan_inf_filter"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logging_nan_inf_filter</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to filter <code>nan</code> and <code>inf</code> losses for logging. If set to <code>True</code> the loss of every step that is <code>nan</code> or <code>inf</code> is filtered and the average loss of the current logging window is taken instead.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p><code>logging_nan_inf_filter</code> only influences the logging of loss values, it does not change the behavior the gradient is computed or applied to the model.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.save_strategy" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.save_strategy"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/trainer_utils#transformers.IntervalStrategy">IntervalStrategy</a>, <em>optional</em>, defaults to <code>&quot;steps&quot;</code>) &#x2014; The checkpoint save strategy to adopt during training. Possible values are:</p> <ul> <li><code>&quot;no&quot;</code>: No save is done during training.</li> <li><code>&quot;epoch&quot;</code>: Save is done at the end of each epoch.</li> <li><code>&quot;steps&quot;</code>: Save is done every <code>save_steps</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.save_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.save_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 500) &#x2014; Number of updates steps before two checkpoint saves if <code>save_strategy=&quot;steps&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.save_total_limit" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.save_total_limit"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_total_limit</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in <code>output_dir</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.save_on_each_node" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.save_on_each_node"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_on_each_node</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; When doing multi-node distributed training, whether to save models and checkpoints on each node, or only on the main one.</p> <p>This should not be activated when the different nodes use the same storage as the files will be saved with the same names for each node.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.no_cuda" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.no_cuda"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>no_cuda</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to not use CUDA even when it is available or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.seed" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.seed"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>seed</strong> (<code>int</code>, <em>optional</em>, defaults to 42) &#x2014; Random seed that will be set at the beginning of training. To ensure reproducibility across runs, use the <code>~Trainer.model_init</code> function to instantiate the model if it has some randomly initialized parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.data_seed" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.data_seed"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>data_seed</strong> (<code>int</code>, <em>optional</em>) &#x2014; Random seed to be used with data samplers. If not set, random generators for data sampling will use the same seed as <code>seed</code>. This can be used to ensure reproducibility of data sampling, independent of the model seed.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.jit_mode_eval" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.jit_mode_eval"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>jit_mode_eval</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use PyTorch jit trace for inference.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.use_ipex" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.use_ipex"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_ipex</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Use Intel extension for PyTorch when it is available. <a href="https://github.com/intel/intel-extension-for-pytorch" rel="nofollow">IPEX installation</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.bf16" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.bf16"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bf16</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use bf16 16-bit (mixed) precision training instead of 32-bit training. Requires Ampere or higher NVIDIA architecture or using CPU (no_cuda). This is an experimental API and it may change.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.fp16" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.fp16"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>fp16</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use fp16 16-bit (mixed) precision training instead of 32-bit training.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.fp16_opt_level" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.fp16_opt_level"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>fp16_opt_level</strong> (<code>str</code>, <em>optional</em>, defaults to &#x2018;O1&#x2019;) &#x2014; For <code>fp16</code> training, Apex AMP optimization level selected in [&#x2018;O0&#x2019;, &#x2018;O1&#x2019;, &#x2018;O2&#x2019;, and &#x2018;O3&#x2019;]. See details on the <a href="https://nvidia.github.io/apex/amp" rel="nofollow">Apex documentation</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.fp16_backend" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.fp16_backend"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>fp16_backend</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;auto&quot;</code>) &#x2014; This argument is deprecated. Use <code>half_precision_backend</code> instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.half_precision_backend" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.half_precision_backend"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>half_precision_backend</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;auto&quot;</code>) &#x2014; The backend to use for mixed precision training. Must be one of <code>&quot;auto&quot;, &quot;cuda_amp&quot;, &quot;apex&quot;, &quot;cpu_amp&quot;</code>. <code>&quot;auto&quot;</code> will use CPU/CUDA AMP or APEX depending on the PyTorch version detected, while the other choices will force the requested backend.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.bf16_full_eval" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.bf16_full_eval"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bf16_full_eval</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use full bfloat16 evaluation instead of 32-bit. This will be faster and save memory but can harm metric values. This is an experimental API and it may change.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.fp16_full_eval" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.fp16_full_eval"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>fp16_full_eval</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use full float16 evaluation instead of 32-bit. This will be faster and save memory but can harm metric values.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.tf32" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.tf32"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tf32</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to enable the TF32 mode, available in Ampere and newer GPU architectures. The default value depends on PyTorch&#x2019;s version default of <code>torch.backends.cuda.matmul.allow_tf32</code>. For more details please refer to the <a href="https://huggingface.co/docs/transformers/performance#tf32" rel="nofollow">TF32</a> documentation. This is an experimental API and it may change.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.local_rank" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.local_rank"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>local_rank</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Rank of the process during distributed training.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.xpu_backend" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.xpu_backend"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>xpu_backend</strong> (<code>str</code>, <em>optional</em>) &#x2014; The backend to use for xpu distributed training. Must be one of <code>&quot;mpi&quot;</code> or <code>&quot;ccl&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.tpu_num_cores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.tpu_num_cores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tpu_num_cores</strong> (<code>int</code>, <em>optional</em>) &#x2014; When training on TPU, the number of TPU cores (automatically passed by launcher script).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.dataloader_drop_last" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.dataloader_drop_last"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dataloader_drop_last</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size) or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.eval_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.eval_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eval_steps</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of update steps between two evaluations if <code>evaluation_strategy=&quot;steps&quot;</code>. Will default to the same value as <code>logging_steps</code> if not set.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.dataloader_num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.dataloader_num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dataloader_num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the main process.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.past_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.past_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_index</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Some models like <a href="../model_doc/transformerxl">TransformerXL</a> or <a href="../model_doc/xlnet">XLNet</a> can make use of the past hidden states for their predictions. If this argument is set to a positive int, the <code>Trainer</code> will use the corresponding output (usually index 2) as the past state and feed it to the model at the next training step under the keyword argument <code>mems</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.run_name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.run_name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>run_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; A descriptor for the run. Typically used for <a href="https://www.wandb.com/" rel="nofollow">wandb</a> and <a href="https://www.mlflow.org/" rel="nofollow">mlflow</a> logging.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.disable_tqdm" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.disable_tqdm"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>disable_tqdm</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to disable the tqdm progress bars and table of metrics produced by <code>~notebook.NotebookTrainingTracker</code> in Jupyter Notebooks. Will default to <code>True</code> if the logging level is set to warn or lower (default), <code>False</code> otherwise.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.remove_unused_columns" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.remove_unused_columns"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>remove_unused_columns</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to automatically remove the columns unused by the model forward method.</p> <p>(Note that this behavior is not implemented for <code>TFTrainer</code> yet.)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.label_names" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.label_names"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>label_names</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; The list of keys in your dictionary of inputs that correspond to the labels.</p> <p>Will eventually default to <code>[&quot;labels&quot;]</code> except if the model used is one of the <code>XxxForQuestionAnswering</code> in which case it will default to <code>[&quot;start_positions&quot;, &quot;end_positions&quot;]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.load_best_model_at_end" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.load_best_model_at_end"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>load_best_model_at_end</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to load the best model found during training at the end of training.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When set to <code>True</code>, the parameters <code>save_strategy</code> needs to be the same as <code>evaluation_strategy</code>, and in the case it is &#x201C;steps&#x201D;, <code>save_steps</code> must be a round multiple of <code>eval_steps</code>.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.metric_for_best_model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.metric_for_best_model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>metric_for_best_model</strong> (<code>str</code>, <em>optional</em>) &#x2014; Use in conjunction with <code>load_best_model_at_end</code> to specify the metric to use to compare two different models. Must be the name of a metric returned by the evaluation with or without the prefix <code>&quot;eval_&quot;</code>. Will default to <code>&quot;loss&quot;</code> if unspecified and <code>load_best_model_at_end=True</code> (to use the evaluation loss).</p> <p>If you set this value, <code>greater_is_better</code> will default to <code>True</code>. Don&#x2019;t forget to set it to <code>False</code> if your metric is better when lower.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.greater_is_better" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.greater_is_better"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>greater_is_better</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Use in conjunction with <code>load_best_model_at_end</code> and <code>metric_for_best_model</code> to specify if better models should have a greater metric or not. Will default to:</p> <ul> <li><code>True</code> if <code>metric_for_best_model</code> is set to a value that isn&#x2019;t <code>&quot;loss&quot;</code> or <code>&quot;eval_loss&quot;</code>.</li> <li><code>False</code> if <code>metric_for_best_model</code> is not set, or set to <code>&quot;loss&quot;</code> or <code>&quot;eval_loss&quot;</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.ignore_data_skip" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.ignore_data_skip"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ignore_data_skip</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; When resuming training, whether or not to skip the epochs and batches to get the data loading at the same stage as in the previous training. If set to <code>True</code>, the training will begin faster (as that skipping step can take a long time) but will not yield the same results as the interrupted training would have.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.sharded_ddp" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.sharded_ddp"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sharded_ddp</strong> (<code>bool</code>, <code>str</code> or list of <code>ShardedDDPOption</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Use Sharded DDP training from <a href="https://github.com/facebookresearch/fairscale" rel="nofollow">FairScale</a> (in distributed training only). This is an experimental feature.</p> <p>A list of options along the following:</p> <ul> <li><code>&quot;simple&quot;</code>: to use first instance of sharded DDP released by fairscale (<code>ShardedDDP</code>) similar to ZeRO-2.</li> <li><code>&quot;zero_dp_2&quot;</code>: to use the second instance of sharded DPP released by fairscale (<code>FullyShardedDDP</code>) in Zero-2 mode (with <code>reshard_after_forward=False</code>).</li> <li><code>&quot;zero_dp_3&quot;</code>: to use the second instance of sharded DPP released by fairscale (<code>FullyShardedDDP</code>) in Zero-3 mode (with <code>reshard_after_forward=True</code>).</li> <li><code>&quot;offload&quot;</code>: to add ZeRO-offload (only compatible with <code>&quot;zero_dp_2&quot;</code> and <code>&quot;zero_dp_3&quot;</code>).</li> </ul> <p>If a string is passed, it will be split on space. If a bool is passed, it will be converted to an empty list for <code>False</code> and <code>[&quot;simple&quot;]</code> for <code>True</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.fsdp" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.fsdp"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>fsdp</strong> (<code>bool</code>, <code>str</code> or list of <code>FSDPOption</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Use PyTorch Distributed Parallel Training (in distributed training only).</p> <p>A list of options along the following:</p> <ul> <li><code>&quot;full_shard&quot;</code>: Shard parameters, gradients and optimizer states.</li> <li><code>&quot;shard_grad_op&quot;</code>: Shard optimizer states and gradients.</li> <li><code>&quot;offload&quot;</code>: Offload parameters and gradients to CPUs (only compatible with <code>&quot;full_shard&quot;</code> and <code>&quot;shard_grad_op&quot;</code>).</li> <li><code>&quot;auto_wrap&quot;</code>: Automatically recursively wrap layers with FSDP using <code>default_auto_wrap_policy</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.fsdp_min_num_params" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.fsdp_min_num_params"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>fsdp_min_num_params</strong> (<code>int</code>, <em>optional</em>, defaults to <code>0</code>) &#x2014; FSDP&#x2019;s minimum number of parameters for Default Auto Wrapping. (useful only when <code>fsdp</code> field is passed).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.deepspeed" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.deepspeed"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>deepspeed</strong> (<code>str</code> or <code>dict</code>, <em>optional</em>) &#x2014; Use <a href="https://github.com/microsoft/deepspeed" rel="nofollow">Deepspeed</a>. This is an experimental feature and its API may evolve in the future. The value is either the location of DeepSpeed json config file (e.g., <code>ds_config.json</code>) or an already loaded json file as a <code>dict</code>&#x201D;<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.label_smoothing_factor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.label_smoothing_factor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>label_smoothing_factor</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The label smoothing factor to use. Zero means no label smoothing, otherwise the underlying onehot-encoded labels are changed from 0s and 1s to <code>label_smoothing_factor/num_labels</code> and <code>1 - label_smoothing_factor + label_smoothing_factor/num_labels</code> respectively.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.debug" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.debug"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>debug</strong> (<code>str</code> or list of <code>DebugOption</code>, <em>optional</em>, defaults to <code>&quot;&quot;</code>) &#x2014; Enable one or more debug features. This is an experimental feature.</p> <p>Possible options are:</p> <ul> <li><code>&quot;underflow_overflow&quot;</code>: detects overflow in model&#x2019;s input/outputs and reports the last frames that led to the event</li> <li><code>&quot;tpu_metrics_debug&quot;</code>: print debug metrics on TPU</li> </ul> <p>The options should be separated by whitespaces.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.optim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.optim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>optim</strong> (<code>str</code> or <code>training_args.OptimizerNames</code>, <em>optional</em>, defaults to <code>&quot;adamw_hf&quot;</code>) &#x2014; The optimizer to use: adamw_hf, adamw_torch, adamw_apex_fused, or adafactor.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.adafactor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.adafactor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>adafactor</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; This argument is deprecated. Use <code>--optim adafactor</code> instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.group_by_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.group_by_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>group_by_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to group together samples of roughly the same length in the training dataset (to minimize padding applied and be more efficient). Only useful if applying dynamic padding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.length_column_name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.length_column_name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>length_column_name</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;length&quot;</code>) &#x2014; Column name for precomputed lengths. If the column exists, grouping by length will use these values rather than computing them on train startup. Ignored unless <code>group_by_length</code> is <code>True</code> and the dataset is an instance of <code>Dataset</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.report_to" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.report_to"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>report_to</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>, defaults to <code>&quot;all&quot;</code>) &#x2014; The list of integrations to report the results and logs to. Supported platforms are <code>&quot;azure_ml&quot;</code>, <code>&quot;comet_ml&quot;</code>, <code>&quot;mlflow&quot;</code>, <code>&quot;neptune&quot;</code>, <code>&quot;tensorboard&quot;</code> and <code>&quot;wandb&quot;</code>. Use <code>&quot;all&quot;</code> to report to all integrations installed, <code>&quot;none&quot;</code> for no integrations.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.ddp_find_unused_parameters" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.ddp_find_unused_parameters"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ddp_find_unused_parameters</strong> (<code>bool</code>, <em>optional</em>) &#x2014; When using distributed training, the value of the flag <code>find_unused_parameters</code> passed to <code>DistributedDataParallel</code>. Will default to <code>False</code> if gradient checkpointing is used, <code>True</code> otherwise.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.ddp_bucket_cap_mb" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.ddp_bucket_cap_mb"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ddp_bucket_cap_mb</strong> (<code>int</code>, <em>optional</em>) &#x2014; When using distributed training, the value of the flag <code>bucket_cap_mb</code> passed to <code>DistributedDataParallel</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.dataloader_pin_memory" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.dataloader_pin_memory"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dataloader_pin_memory</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether you want to pin memory in data loaders or not. Will default to <code>True</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.skip_memory_metrics" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.skip_memory_metrics"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>skip_memory_metrics</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to skip adding of memory profiler reports to metrics. This is skipped by default because it slows down the training and evaluation speed.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.push_to_hub" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.push_to_hub"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push the model to the Hub every time the model is saved. If this is activated, <code>output_dir</code> will begin a git directory synced with the repo (determined by <code>hub_model_id</code>) and the content will be pushed each time a save is triggered (depending on your <code>save_strategy</code>). Calling <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.save_model">save_model()</a> will also trigger a push.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>If <code>output_dir</code> exists, it needs to be a local clone of the repository to which the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> will be pushed.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.resume_from_checkpoint" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.resume_from_checkpoint"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>resume_from_checkpoint</strong> (<code>str</code>, <em>optional</em>) &#x2014; The path to a folder with a valid checkpoint for your model. This argument is not directly used by <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/main/examples" rel="nofollow">example scripts</a> for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.hub_model_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.hub_model_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hub_model_id</strong> (<code>str</code>, <em>optional</em>) &#x2014; The name of the repository to keep in sync with the local <em>output_dir</em>. It can be a simple model ID in which case the model will be pushed in your namespace. Otherwise it should be the whole repository name, for instance <code>&quot;user_name/model&quot;</code>, which allows you to push to an organization you are a member of with <code>&quot;organization_name/model&quot;</code>. Will default to <code>user_name/output_dir_name</code> with <em>output_dir_name</em> being the name of <code>output_dir</code>.</p> <p>Will default to the name of <code>output_dir</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.hub_strategy" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.hub_strategy"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hub_strategy</strong> (<code>str</code> or <code>HubStrategy</code>, <em>optional</em>, defaults to <code>&quot;every_save&quot;</code>) &#x2014; Defines the scope of what is pushed to the Hub and when. Possible values are:</p> <ul> <li><code>&quot;end&quot;</code>: push the model, its configuration, the tokenizer (if passed along to the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>) and a draft of a model card when the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.save_model">save_model()</a> method is called.</li> <li><code>&quot;every_save&quot;</code>: push the model, its configuration, the tokenizer (if passed along to the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>) and a draft of a model card each time there is a model save. The pushes are asynchronous to not block training, and in case the save are very frequent, a new push is only attempted if the previous one is finished. A last push is made with the final model at the end of training.</li> <li><code>&quot;checkpoint&quot;</code>: like <code>&quot;every_save&quot;</code> but the latest checkpoint is also pushed in a subfolder named last-checkpoint, allowing you to resume training easily with <code>trainer.train(resume_from_checkpoint=&quot;last-checkpoint&quot;)</code>.</li> <li><code>&quot;all_checkpoints&quot;</code>: like <code>&quot;checkpoint&quot;</code> but all checkpoints are pushed like they appear in the output folder (so you will get one checkpoint folder per folder in your final repository)</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.hub_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.hub_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hub_token</strong> (<code>str</code>, <em>optional</em>) &#x2014; The token to use to push the model to the Hub. Will default to the token in the cache folder obtained with <code>huggingface-cli login</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.hub_private_repo" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.hub_private_repo"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hub_private_repo</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If True, the Hub repo will be set to private.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.gradient_checkpointing" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.gradient_checkpointing"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>gradient_checkpointing</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If True, use gradient checkpointing to save memory at the expense of slower backward pass.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.include_inputs_for_metrics" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.include_inputs_for_metrics"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>include_inputs_for_metrics</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the inputs will be passed to the <code>compute_metrics</code> function. This is intended for metrics that need inputs, predictions and references for scoring calculation in Metric class.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.auto_find_batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.auto_find_batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>auto_find_batch_size</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to find a batch size that will fit into memory automatically through exponential decay, avoiding CUDA Out-of-Memory errors. Requires accelerate to be installed (<code>pip install accelerate</code>)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.full_determinism" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.full_determinism"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>full_determinism</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If <code>True</code>, <a href="/docs/transformers/pr_19429/en/internal/trainer_utils#transformers.enable_full_determinism">enable_full_determinism()</a> is called instead of <a href="/docs/transformers/pr_19429/en/internal/trainer_utils#transformers.set_seed">set_seed()</a> to ensure reproducible results in distributed training<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.torchdynamo" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.torchdynamo"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>torchdynamo</strong> (<code>str</code>, <em>optional</em>) &#x2014; The token that is used to set the backend compiler for TorchDynamo. Possible choices are [&#x201C;eager&#x201D;, &#x201C;nvfuser]. This is an experimental API and subject to change.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.ray_scope" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.ray_scope"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ray_scope</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;last&quot;</code>) &#x2014; The scope to use when doing hyperparameter search with Ray. By default, <code>&quot;last&quot;</code> will be used. Ray will then use the last checkpoint of all trials, compare those, and select the best one. However, other options are also available. See the <a href="https://docs.ray.io/en/latest/tune/api_docs/analysis.html#ray.tune.ExperimentAnalysis.get_best_trial" rel="nofollow">Ray documentation</a> for more options.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.ddp_timeout" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.ddp_timeout"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ddp_timeout</strong> (<code>int</code>, <em>optional</em>, defaults to 1800) &#x2014; The timeout for <code>torch.distributed.init_process_group</code> calls, used to avoid GPU socket timeouts when performing slow operations in distributed runnings. Please refer the [PyTorch documentation] (<a href="https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group" rel="nofollow">https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group</a>) for more information.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.use_mps_device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.use_mps_device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_mps_device</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use Apple Silicon chip based <code>mps</code> device.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.sortish_sampler" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.sortish_sampler"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sortish_sampler</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use a <em>sortish sampler</em> or not. Only possible if the underlying datasets are <em>Seq2SeqDataset</em> for now but will become generally available in the near future.</p> <p>It sorts the inputs according to lengths in order to minimize the padding size, with a bit of randomness for the training set.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.predict_with_generate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.predict_with_generate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>predict_with_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use generate to calculate generative metrics (ROUGE, BLEU).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.generation_max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.generation_max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>generation_max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; The <code>max_length</code> to use on each evaluation loop when <code>predict_with_generate=True</code>. Will default to the <code>max_length</code> value of the model configuration.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.generation_num_beams" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.generation_num_beams"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>generation_num_beams</strong> (<code>int</code>, <em>optional</em>) &#x2014; The <code>num_beams</code> to use on each evaluation loop when <code>predict_with_generate=True</code>. Will default to the <code>num_beams</code> value of the model configuration.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>TrainingArguments is the subset of the arguments we use in our example scripts <strong>which relate to the training loop itself</strong>.</p> <p>Using <a href="/docs/transformers/pr_19429/en/internal/trainer_utils#transformers.HfArgumentParser">HfArgumentParser</a> we can turn this class into <a href="https://docs.python.org/3/library/argparse#module-argparse" rel="nofollow">argparse</a> arguments that can be specified on the command line.</p></div> <h2 class="relative group"><a id="checkpoints" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#checkpoints"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Checkpoints </span></h2> <p>By default, <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> will save all checkpoints in the <code>output_dir</code> you set in the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> you are using. Those will go in subfolder named <code>checkpoint-xxx</code> with xxx being the step at which the training was at.</p> <p>Resuming training from a checkpoint can be done when calling <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.train">Trainer.train()</a> with either:</p> <ul><li><code>resume_from_checkpoint=True</code> which will resume training from the latest checkpoint</li> <li><code>resume_from_checkpoint=checkpoint_dir</code> which will resume training from the specific checkpoint in the directory passed.</li></ul> <p>In addition, you can easily save your checkpoints on the Model Hub when using <code>push_to_hub=True</code>. By default, all the models saved in intermediate checkpoints are saved in different commits, but not the optimizer state. You can adapt the <code>hub-strategy</code> value of your <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> to either:</p> <ul><li><code>&quot;checkpoint&quot;</code>: the latest checkpoint is also pushed in a subfolder named last-checkpoint, allowing you to resume training easily with <code>trainer.train(resume_from_checkpoint=&quot;output_dir/last-checkpoint&quot;)</code>.</li> <li><code>&quot;all_checkpoints&quot;</code>: all checkpoints are pushed like they appear in the output folder (so you will get one checkpoint folder per folder in your final repository)</li></ul> <h2 class="relative group"><a id="logging" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#logging"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Logging </span></h2> <p>By default <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> will use <code>logging.INFO</code> for the main process and <code>logging.WARNING</code> for the replicas if any.</p> <p>These defaults can be overridden to use any of the 5 <code>logging</code> levels with <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>’s arguments:</p> <ul><li><code>log_level</code> - for the main process</li> <li><code>log_level_replica</code> - for the replicas</li></ul> <p>Further, if <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>’s <code>log_on_each_node</code> is set to <code>False</code> only the main node will use the log level settings for its main process, all other nodes will use the log level settings for replicas.</p> <p>Note that <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> is going to set <code>transformers</code>’s log level separately for each node in its <code>Trainer.__init__()</code>. So you may want to set this sooner (see the next example) if you tap into other <code>transformers</code> functionality before creating the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> object.</p> <p>Here is an example of how this can be used in an application:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->[...] logger = logging.getLogger(__name__) <span class="hljs-comment"># Setup logging</span> logging.basicConfig( <span class="hljs-built_in">format</span>=<span class="hljs-string">&quot;%(asctime)s - %(levelname)s - %(name)s - %(message)s&quot;</span>, datefmt=<span class="hljs-string">&quot;%m/%d/%Y %H:%M:%S&quot;</span>, handlers=[logging.StreamHandler(sys.stdout)], ) <span class="hljs-comment"># set the main code and the modules it uses to the same log-level according to the node</span> log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) trainer = Trainer(...)<!-- HTML_TAG_END --></pre></div> <p>And then if you only want to see warnings on the main node and all other nodes to not print any most likely duplicated warnings you could run it as:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->my_app.py ... --log_level warning --log_level_replica error<!-- HTML_TAG_END --></pre></div> <p>In the multi-node environment if you also don’t want the logs to repeat for each node’s main process, you will want to change the above to:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->my_app.py ... --log_level warning --log_level_replica error --log_on_each_node 0<!-- HTML_TAG_END --></pre></div> <p>and then only the main process of the first node will log at the “warning” level, and all other processes on the main node and all processes on other nodes will log at the “error” level.</p> <p>If you need your application to be as quiet as possible you could do:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->my_app.py ... --log_level error --log_level_replica error --log_on_each_node 0<!-- HTML_TAG_END --></pre></div> <p>(add <code>--log_on_each_node 0</code> if on multi-node environment)</p> <h2 class="relative group"><a id="randomness" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#randomness"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Randomness </span></h2> <p>When resuming from a checkpoint generated by <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> all efforts are made to restore the <em>python</em>, <em>numpy</em> and <em>pytorch</em> RNG states to the same states as they were at the moment of saving that checkpoint, which should make the “stop and resume” style of training as close as possible to non-stop training.</p> <p>However, due to various default non-deterministic pytorch settings this might not fully work. If you want full determinism please refer to <a href="https://pytorch.org/docs/stable/notes/randomness" rel="nofollow">Controlling sources of randomness</a>. As explained in the document, that some of those settings that make things deterministic (.e.g., <code>torch.backends.cudnn.deterministic</code>) may slow things down, therefore this can’t be done by default, but you can enable those yourself if needed.</p> <h2 class="relative group"><a id="specific-gpus-selection" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#specific-gpus-selection"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Specific GPUs Selection </span></h2> <p>Let’s discuss how you can tell your program which GPUs are to be used and in what order.</p> <p>When using <a href="https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html" rel="nofollow"><code>DistributedDataParallel</code></a> to use only a subset of your GPUs, you simply specify the number of GPUs to use. For example, if you have 4 GPUs, but you wish to use the first 2 you can do:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -m torch.distributed.launch --nproc_per_node=2 trainer-program.py ...<!-- HTML_TAG_END --></pre></div> <p>if you have either <a href="https://github.com/huggingface/accelerate" rel="nofollow"><code>accelerate</code></a> or <a href="https://github.com/microsoft/DeepSpeed" rel="nofollow"><code>deepspeed</code></a> installed you can also accomplish the same by using one of:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->accelerate launch --num_processes 2 trainer-program.py ...<!-- HTML_TAG_END --></pre></div> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->deepspeed --num_gpus 2 trainer-program.py ...<!-- HTML_TAG_END --></pre></div> <p>You don’t need to use the Accelerate or <a href="Deepspeed">the Deepspeed integration</a> features to use these launchers.</p> <p>Until now you were able to tell the program how many GPUs to use. Now let’s discuss how to select specific GPUs and control their order.</p> <p>The following environment variables help you control which GPUs to use and their order.</p> <p><strong><code>CUDA_VISIBLE_DEVICES</code></strong></p> <p>If you have multiple GPUs and you’d like to use only 1 or a few of those GPUs, set the environment variable <code>CUDA_VISIBLE_DEVICES</code> to a list of the GPUs to be used.</p> <p>For example, let’s say you have 4 GPUs: 0, 1, 2 and 3. To run only on the physical GPUs 0 and 2, you can do:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->CUDA_VISIBLE_DEVICES=0,2 python -m torch.distributed.launch trainer-program.py ...<!-- HTML_TAG_END --></pre></div> <p>So now pytorch will see only 2 GPUs, where your physical GPUs 0 and 2 are mapped to <code>cuda:0</code> and <code>cuda:1</code> correspondingly.</p> <p>You can even change their order:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->CUDA_VISIBLE_DEVICES=2,0 python -m torch.distributed.launch trainer-program.py ...<!-- HTML_TAG_END --></pre></div> <p>Here your physical GPUs 0 and 2 are mapped to <code>cuda:1</code> and <code>cuda:0</code> correspondingly.</p> <p>The above examples were all for <code>DistributedDataParallel</code> use pattern, but the same method works for <a href="https://pytorch.org/docs/stable/generated/torch.nn.DataParallel.html" rel="nofollow"><code>DataParallel</code></a> as well:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->CUDA_VISIBLE_DEVICES=2,0 python trainer-program.py ...<!-- HTML_TAG_END --></pre></div> <p>To emulate an environment without GPUs simply set this environment variable to an empty value like so:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->CUDA_VISIBLE_DEVICES= python trainer-program.py ...<!-- HTML_TAG_END --></pre></div> <p>As with any environment variable you can, of course, export those instead of adding these to the command line, as in:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-built_in">export</span> CUDA_VISIBLE_DEVICES=0,2 python -m torch.distributed.launch trainer-program.py ...<!-- HTML_TAG_END --></pre></div> <p>but this approach can be confusing since you may forget you set up the environment variable earlier and not understand why the wrong GPUs are used. Therefore, it’s a common practice to set the environment variable just for a specific run on the same command line as it’s shown in most examples of this section.</p> <p><strong><code>CUDA_DEVICE_ORDER</code></strong></p> <p>There is an additional environment variable <code>CUDA_DEVICE_ORDER</code> that controls how the physical devices are ordered. The two choices are:</p> <ol><li>ordered by PCIe bus IDs (matches <code>nvidia-smi</code>’s order) - this is the default.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-built_in">export</span> CUDA_DEVICE_ORDER=PCI_BUS_ID<!-- HTML_TAG_END --></pre></div> <ol start="2"><li>ordered by GPU compute capabilities</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-built_in">export</span> CUDA_DEVICE_ORDER=FASTEST_FIRST<!-- HTML_TAG_END --></pre></div> <p>Most of the time you don’t need to care about this environment variable, but it’s very helpful if you have a lopsided setup where you have an old and a new GPUs physically inserted in such a way so that the slow older card appears to be first. One way to fix that is to swap the cards. But if you can’t swap the cards (e.g., if the cooling of the devices gets impacted) then setting <code>CUDA_DEVICE_ORDER=FASTEST_FIRST</code> will always put the newer faster card first. It’ll be somewhat confusing though since <code>nvidia-smi</code> will still report them in the PCIe order.</p> <p>The other solution to swapping the order is to use:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-built_in">export</span> CUDA_VISIBLE_DEVICES=1,0<!-- HTML_TAG_END --></pre></div> <p>In this example we are working with just 2 GPUs, but of course the same would apply to as many GPUs as your computer has.</p> <p>Also if you do set this environment variable it’s the best to set it in your <code>~/.bashrc</code> file or some other startup config file and forget about it.</p> <h2 class="relative group"><a id="trainer-integrations" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#trainer-integrations"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Trainer Integrations </span></h2> <p>The <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> has been extended to support libraries that may dramatically improve your training time and fit much bigger models.</p> <p>Currently it supports third party solutions, <a href="https://github.com/microsoft/DeepSpeed" rel="nofollow">DeepSpeed</a>, <a href="https://pytorch.org/docs/stable/fsdp.html" rel="nofollow">PyTorch FSDP</a> and <a href="https://github.com/facebookresearch/fairscale/" rel="nofollow">FairScale</a>, which implement parts of the paper <a href="https://arxiv.org/abs/1910.02054" rel="nofollow">ZeRO: Memory Optimizations Toward Training Trillion Parameter Models, by Samyam Rajbhandari, Jeff Rasley, Olatunji Ruwase, Yuxiong He</a>.</p> <p>This provided support is new and experimental as of this writing. While the support for DeepSpeed and PyTorch FSDP is active and we welcome issues around it, we don’t support the FairScale integration anymore since it has been integrated in PyTorch main (see the <a href="#pytorch-fully-sharded-data-parallel">PyTorch FSDP integration</a>)</p> <a id="zero-install-notes"></a> <h3 class="relative group"><a id="cuda-extension-installation-notes" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#cuda-extension-installation-notes"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>CUDA Extension Installation Notes </span></h3> <p>As of this writing, both FairScale and Deepspeed require compilation of CUDA C++ code, before they can be used.</p> <p>While all installation issues should be dealt with through the corresponding GitHub Issues of <a href="https://github.com/facebookresearch/fairscale/issues" rel="nofollow">FairScale</a> and <a href="https://github.com/microsoft/DeepSpeed/issues" rel="nofollow">Deepspeed</a>, there are a few common issues that one may encounter while building any PyTorch extension that needs to build CUDA extensions.</p> <p>Therefore, if you encounter a CUDA-related build issue while doing one of the following or both:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install fairscale pip install deepspeed<!-- HTML_TAG_END --></pre></div> <p>please, read the following notes first.</p> <p>In these notes we give examples for what to do when <code>pytorch</code> has been built with CUDA <code>10.2</code>. If your situation is different remember to adjust the version number to the one you are after.</p> <h4 class="relative group"><a id="possible-problem-1" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#possible-problem-1"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Possible problem #1 </span></h4> <p>While, Pytorch comes with its own CUDA toolkit, to build these two projects you must have an identical version of CUDA installed system-wide.</p> <p>For example, if you installed <code>pytorch</code> with <code>cudatoolkit==10.2</code> in the Python environment, you also need to have CUDA <code>10.2</code> installed system-wide.</p> <p>The exact location may vary from system to system, but <code>/usr/local/cuda-10.2</code> is the most common location on many Unix systems. When CUDA is correctly set up and added to the <code>PATH</code> environment variable, one can find the installation location by doing:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-built_in">which</span> nvcc<!-- HTML_TAG_END --></pre></div> <p>If you don’t have CUDA installed system-wide, install it first. You will find the instructions by using your favorite search engine. For example, if you’re on Ubuntu you may want to search for: <a href="https://www.google.com/search?q=ubuntu+cuda+10.2+install" rel="nofollow">ubuntu cuda 10.2 install</a>.</p> <h4 class="relative group"><a id="possible-problem-2" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#possible-problem-2"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Possible problem #2 </span></h4> <p>Another possible common problem is that you may have more than one CUDA toolkit installed system-wide. For example you may have:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->/usr/local/cuda-10.2 /usr/local/cuda-11.0<!-- HTML_TAG_END --></pre></div> <p>Now, in this situation you need to make sure that your <code>PATH</code> and <code>LD_LIBRARY_PATH</code> environment variables contain the correct paths to the desired CUDA version. Typically, package installers will set these to contain whatever the last version was installed. If you encounter the problem, where the package build fails because it can’t find the right CUDA version despite you having it installed system-wide, it means that you need to adjust the 2 aforementioned environment variables.</p> <p>First, you may look at their contents:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-built_in">echo</span> <span class="hljs-variable">$PATH</span> <span class="hljs-built_in">echo</span> <span class="hljs-variable">$LD_LIBRARY_PATH</span><!-- HTML_TAG_END --></pre></div> <p>so you get an idea of what is inside.</p> <p>It’s possible that <code>LD_LIBRARY_PATH</code> is empty.</p> <p><code>PATH</code> lists the locations of where executables can be found and <code>LD_LIBRARY_PATH</code> is for where shared libraries are to looked for. In both cases, earlier entries have priority over the later ones. <code>:</code> is used to separate multiple entries.</p> <p>Now, to tell the build program where to find the specific CUDA toolkit, insert the desired paths to be listed first by doing:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-built_in">export</span> PATH=/usr/local/cuda-10.2/bin:<span class="hljs-variable">$PATH</span> <span class="hljs-built_in">export</span> LD_LIBRARY_PATH=/usr/local/cuda-10.2/lib64:<span class="hljs-variable">$LD_LIBRARY_PATH</span><!-- HTML_TAG_END --></pre></div> <p>Note that we aren’t overwriting the existing values, but prepending instead.</p> <p>Of course, adjust the version number, the full path if need be. Check that the directories you assign actually do exist. <code>lib64</code> sub-directory is where the various CUDA <code>.so</code> objects, like <code>libcudart.so</code> reside, it’s unlikely that your system will have it named differently, but if it is adjust it to reflect your reality.</p> <h4 class="relative group"><a id="possible-problem-3" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#possible-problem-3"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Possible problem #3 </span></h4> <p>Some older CUDA versions may refuse to build with newer compilers. For example, you my have <code>gcc-9</code> but it wants <code>gcc-7</code>.</p> <p>There are various ways to go about it.</p> <p>If you can install the latest CUDA toolkit it typically should support the newer compiler.</p> <p>Alternatively, you could install the lower version of the compiler in addition to the one you already have, or you may already have it but it’s not the default one, so the build system can’t see it. If you have <code>gcc-7</code> installed but the build system complains it can’t find it, the following might do the trick:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->sudo <span class="hljs-built_in">ln</span> -s /usr/bin/gcc-7 /usr/local/cuda-10.2/bin/gcc sudo <span class="hljs-built_in">ln</span> -s /usr/bin/g++-7 /usr/local/cuda-10.2/bin/g++<!-- HTML_TAG_END --></pre></div> <p>Here, we are making a symlink to <code>gcc-7</code> from <code>/usr/local/cuda-10.2/bin/gcc</code> and since <code>/usr/local/cuda-10.2/bin/</code> should be in the <code>PATH</code> environment variable (see the previous problem’s solution), it should find <code>gcc-7</code> (and <code>g++7</code>) and then the build will succeed.</p> <p>As always make sure to edit the paths in the example to match your situation.</p> <h3 class="relative group"><a id="fairscale" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#fairscale"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FairScale </span></h3> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>This integration is not supported anymore, we recommend you either use DeepSpeed or PyTorch FSDP.</p></div> <p>By integrating <a href="https://github.com/facebookresearch/fairscale/" rel="nofollow">FairScale</a> the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> provides support for the following features from <a href="https://arxiv.org/abs/1910.02054" rel="nofollow">the ZeRO paper</a>:</p> <ol><li>Optimizer State Sharding</li> <li>Gradient Sharding</li> <li>Model Parameters Sharding (new and very experimental)</li> <li>CPU offload (new and very experimental)</li></ol> <p>You will need at least two GPUs to use this feature.</p> <p><strong>Installation</strong>:</p> <p>Install the library via pypi:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install fairscale<!-- HTML_TAG_END --></pre></div> <p>or via <code>transformers</code>’ <code>extras</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install transformers[fairscale]<!-- HTML_TAG_END --></pre></div> <p>(available starting from <code>transformers==4.6.0</code>) or find more details on <a href="https://github.com/facebookresearch/fairscale/#installation" rel="nofollow">the FairScale’s GitHub page</a>.</p> <p>If you’re still struggling with the build, first make sure to read <a href="#zero-install-notes">CUDA Extension Installation Notes</a>.</p> <p>If it’s still not resolved the build issue, here are a few more ideas.</p> <p><code>fairscale</code> seems to have an issue with the recently introduced by pip build isolation feature. If you have a problem with it, you may want to try one of:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install fairscale --no-build-isolation .<!-- HTML_TAG_END --></pre></div> <p>or:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->git <span class="hljs-built_in">clone</span> https://github.com/facebookresearch/fairscale/ <span class="hljs-built_in">cd</span> fairscale <span class="hljs-built_in">rm</span> -r dist build python setup.py bdist_wheel pip uninstall -y fairscale pip install dist/fairscale-*.whl<!-- HTML_TAG_END --></pre></div> <p><code>fairscale</code> also has issues with building against pytorch-nightly, so if you use it you may have to try one of:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip uninstall -y fairscale; pip install fairscale --pre \ -f https://download.pytorch.org/whl/nightly/cu110/torch_nightly \ --no-cache --no-build-isolation<!-- HTML_TAG_END --></pre></div> <p>or:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install -v --disable-pip-version-check . \ -f https://download.pytorch.org/whl/nightly/cu110/torch_nightly --pre<!-- HTML_TAG_END --></pre></div> <p>Of course, adjust the urls to match the cuda version you use.</p> <p>If after trying everything suggested you still encounter build issues, please, proceed with the GitHub Issue of <a href="https://github.com/facebookresearch/fairscale/issues" rel="nofollow">FairScale</a>.</p> <p><strong>Usage</strong>:</p> <p>To use the first version of Sharded data-parallelism, add <code>--sharded_ddp simple</code> to the command line arguments, and make sure you have added the distributed launcher <code>-m torch.distributed.launch --nproc_per_node=NUMBER_OF_GPUS_YOU_HAVE</code> if you haven’t been using it already.</p> <p>For example here is how you could use it for <code>run_translation.py</code> with 2 GPUs:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -m torch.distributed.launch --nproc_per_node=2 examples/pytorch/translation/run_translation.py \ --model_name_or_path t5-small --per_device_train_batch_size 1 \ --output_dir output_dir --overwrite_output_dir \ --do_train --max_train_samples 500 --num_train_epochs 1 \ --dataset_name wmt16 --dataset_config <span class="hljs-string">&quot;ro-en&quot;</span> \ --source_lang en --target_lang ro \ --fp16 --sharded_ddp simple<!-- HTML_TAG_END --></pre></div> <p>Notes:</p> <ul><li>This feature requires distributed training (so multiple GPUs).</li> <li>It is not implemented for TPUs.</li> <li>It works with <code>--fp16</code> too, to make things even faster.</li> <li>One of the main benefits of enabling <code>--sharded_ddp simple</code> is that it uses a lot less GPU memory, so you should be able to use significantly larger batch sizes using the same hardware (e.g. 3x and even bigger) which should lead to significantly shorter training time.</li></ul> <ol start="3"><li>To use the second version of Sharded data-parallelism, add <code>--sharded_ddp zero_dp_2</code> or <code>--sharded_ddp zero_dp_3</code> to the command line arguments, and make sure you have added the distributed launcher <code>-m torch.distributed.launch --nproc_per_node=NUMBER_OF_GPUS_YOU_HAVE</code> if you haven’t been using it already.</li></ol> <p>For example here is how you could use it for <code>run_translation.py</code> with 2 GPUs:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -m torch.distributed.launch --nproc_per_node=2 examples/pytorch/translation/run_translation.py \ --model_name_or_path t5-small --per_device_train_batch_size 1 \ --output_dir output_dir --overwrite_output_dir \ --do_train --max_train_samples 500 --num_train_epochs 1 \ --dataset_name wmt16 --dataset_config <span class="hljs-string">&quot;ro-en&quot;</span> \ --source_lang en --target_lang ro \ --fp16 --sharded_ddp zero_dp_2<!-- HTML_TAG_END --></pre></div> <p><code>zero_dp_2</code> is an optimized version of the simple wrapper, while <code>zero_dp_3</code> fully shards model weights, gradients and optimizer states.</p> <p>Both are compatible with adding <code>cpu_offload</code> to enable ZeRO-offload (activate it like this: <code>--sharded_ddp &quot;zero_dp_2 cpu_offload&quot;</code>).</p> <p>Notes:</p> <ul><li>This feature requires distributed training (so multiple GPUs).</li> <li>It is not implemented for TPUs.</li> <li>It works with <code>--fp16</code> too, to make things even faster.</li> <li>The <code>cpu_offload</code> additional option requires <code>--fp16</code>.</li> <li>This is an area of active development, so make sure you have a source install of fairscale to use this feature as some bugs you encounter may have been fixed there already.</li></ul> <p>Known caveats:</p> <ul><li>This feature is incompatible with <code>--predict_with_generate</code> in the <em>run_translation.py</em> script.</li> <li>Using <code>--sharded_ddp zero_dp_3</code> requires wrapping each layer of the model in the special container <code>FullyShardedDataParallelism</code> of fairscale. It should be used with the option <code>auto_wrap</code> if you are not doing this yourself: <code>--sharded_ddp &quot;zero_dp_3 auto_wrap&quot;</code>.</li></ul> <h3 class="relative group"><a id="pytorch-fully-sharded-data-parallel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#pytorch-fully-sharded-data-parallel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PyTorch Fully Sharded Data parallel </span></h3> <p>To accelerate training huge models on larger batch sizes, we can use a fully sharded data parallel model. This type of data parallel paradigm enables fitting more data and larger models by sharding the optimizer states, gradients and parameters. To read more about it and the benefits, check out the <a href="https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/" rel="nofollow">Fully Sharded Data Parallel blog</a>. We have integrated the latest PyTorch’s Fully Sharded Data Parallel (FSDP) training feature. All you need to do is enable it through the config.</p> <p><strong>Required PyTorch version for FSDP support</strong>: PyTorch Nightly (or 1.12.0 if you read this after it has been released) as the model saving with FSDP activated is only available with recent fixes.</p> <p><strong>Usage</strong>:</p> <ul><li><p>Make sure you have added the distributed launcher <code>-m torch.distributed.launch --nproc_per_node=NUMBER_OF_GPUS_YOU_HAVE</code> if you haven’t been using it already.</p></li> <li><p><strong>Sharding Strategy</strong>: </p> <ul><li>FULL_SHARD : Shards optimizer states + gradients + model parameters across data parallel workers/GPUs. For this, add <code>--fsdp full_shard</code> to the command line arguments. </li> <li>SHARD_GRAD_OP : Shards optimizer states + gradients across data parallel workers/GPUs. For this, add <code>--fsdp shard_grad_op</code> to the command line arguments.</li> <li>NO_SHARD : No sharding. For this, add <code>--fsdp no_shard</code> to the command line arguments.</li></ul></li> <li><p>To offload the parameters and gradients to the CPU, add <code>--fsdp &quot;full_shard offload&quot;</code> or <code>--fsdp &quot;shard_grad_op offload&quot;</code> to the command line arguments.</p></li> <li><p>To automatically recursively wrap layers with FSDP using <code>default_auto_wrap_policy</code>, add <code>--fsdp &quot;full_shard auto_wrap&quot;</code> or <code>--fsdp &quot;shard_grad_op auto_wrap&quot;</code> to the command line arguments.</p></li> <li><p>To enable both CPU offloading and auto wrapping, add <code>--fsdp &quot;full_shard offload auto_wrap&quot;</code> or <code>--fsdp &quot;shard_grad_op offload auto_wrap&quot;</code> to the command line arguments.</p></li> <li><p>If auto wrapping is enabled, you can either use transformer based auto wrap policy or size based auto wrap policy.</p> <ul><li>For transformer based auto wrap policy, please add <code>--fsdp_transformer_layer_cls_to_wrap &lt;value&gt;</code> to command line arguments. This specifies the transformer layer class name (case-sensitive) to wrap ,e.g, <code>BertLayer</code>, <code>GPTJBlock</code>, <code>T5Block</code> … This is important because submodules that share weights (e.g., embedding layer) should not end up in different FSDP wrapped units. Using this policy, wrapping happens for each block containing Multi-Head Attention followed by couple of MLP layers. Remaining layers including the shared embeddings are conviniently wrapped in same outermost FSDP unit. Therefore, use this for transformer based models.</li> <li>For size based auto wrap policy, please add <code>--fsdp_min_num_params &lt;number&gt;</code> to command line arguments. It specifies FSDP’s minimum number of parameters for auto wrapping.</li></ul></li></ul> <p><strong>Few caveats to be aware of</strong></p> <ul><li>Mixed precision is currently not supported with FSDP as we wait for PyTorch to fix support for it. More details in this <a href="https://github.com/pytorch/pytorch/issues/75676" rel="nofollow">issues</a>.</li> <li>FSDP currently doesn’t support multiple parameter groups. More details mentioned in this <a href="https://github.com/pytorch/pytorch/issues/76501" rel="nofollow">issue</a> (<code>The original model parameters&#39; .grads are not set, meaning that they cannot be optimized separately (which is why we cannot support multiple parameter groups)</code>).</li></ul> <h3 class="relative group"><a id="using-trainer-for-accelerated-pytorch-training-on-mac" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#using-trainer-for-accelerated-pytorch-training-on-mac"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Using Trainer for accelerated PyTorch Training on Mac </span></h3> <p>With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac. Apple’s Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new <code>&quot;mps&quot;</code> device. This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS. For more information please refer official documents <a href="https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/" rel="nofollow">Introducing Accelerated PyTorch Training on Mac</a> and <a href="https://pytorch.org/docs/stable/notes/mps.html" rel="nofollow">MPS BACKEND</a>. </p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>We strongly recommend to install PyTorch &gt;= 1.13 (nightly version at the time of writing) on your MacOS machine. It has major fixes related to model correctness and performance improvements for transformer based models. Please refer to <a href="https://github.com/pytorch/pytorch/issues/82707" rel="nofollow">https://github.com/pytorch/pytorch/issues/82707</a> for more details.</p></div> <p><strong>Benefits of Training and Inference using Apple Silicon Chips</strong></p> <ol><li>Enables users to train larger networks or batch sizes locally</li> <li>Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. Therefore, improving end-to-end performance.</li> <li>Reduces costs associated with cloud-based development or the need for additional local GPUs.</li></ol> <p><strong>Pre-requisites</strong>: To install torch with mps support, please follow this nice medium article <a href="https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1" rel="nofollow">GPU-Acceleration Comes to PyTorch on M1 Macs</a>.</p> <p><strong>Usage</strong>: User has to just pass <code>--use_mps_device</code> argument. For example, you can run the offical Glue text classififcation task (from the root folder) using Apple Silicon GPU with below command:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-built_in">export</span> TASK_NAME=mrpc python examples/pytorch/text-classification/run_glue.py \ --model_name_or_path bert-base-cased \ --task_name <span class="hljs-variable">$TASK_NAME</span> \ --do_train \ --do_eval \ --max_seq_length 128 \ --per_device_train_batch_size 32 \ --learning_rate 2e-5 \ --num_train_epochs 3 \ --output_dir /tmp/<span class="hljs-variable">$TASK_NAME</span>/ \ --use_mps_device \ --overwrite_output_dir<!-- HTML_TAG_END --></pre></div> <p><strong>A few caveats to be aware of</strong></p> <ol><li>Some PyTorch operations have not been implemented in mps and will throw an error. One way to get around that is to set the environment variable <code>PYTORCH_ENABLE_MPS_FALLBACK=1</code>, which will fallback to CPU for these operations. It still throws a UserWarning however.</li> <li>Distributed setups <code>gloo</code> and <code>nccl</code> are not working with <code>mps</code> device. This means that currently only single GPU of <code>mps</code> device type can be used.</li></ol> <p>Finally, please, remember that, 🤗 <code>Trainer</code> only integrates MPS backend, therefore if you have any problems or questions with regards to MPS backend usage, please, file an issue with <a href="https://github.com/pytorch/pytorch/issues" rel="nofollow">PyTorch GitHub</a>.</p> <p>Sections that were moved:</p> <p>[ <a href="./deepspeed#deepspeed-trainer-integration">DeepSpeed</a><a id="deepspeed"></a> | <a href="./deepspeed#deepspeed-installation">Installation</a><a id="installation"></a> | <a href="./deepspeed#deepspeed-multi-gpu">Deployment with multiple GPUs</a><a id="deployment-with-multiple-gpus"></a> | <a href="./deepspeed#deepspeed-one-gpu">Deployment with one GPU</a><a id="deployment-with-one-gpu"></a> | <a href="./deepspeed#deepspeed-notebook">Deployment in Notebooks</a><a id="deployment-in-notebooks"></a> | <a href="./deepspeed#deepspeed-config">Configuration</a><a id="configuration"></a> | <a href="./deepspeed#deepspeed-config-passing">Passing Configuration</a><a id="passing-configuration"></a> | <a href="./deepspeed#deepspeed-config-shared">Shared Configuration</a><a id="shared-configuration"></a> | <a href="./deepspeed#deepspeed-zero">ZeRO</a><a id="zero"></a> | <a href="./deepspeed#deepspeed-zero2-config">ZeRO-2 Config</a><a id="zero-2-config"></a> | <a href="./deepspeed#deepspeed-zero3-config">ZeRO-3 Config</a><a id="zero-3-config"></a> | <a href="./deepspeed#deepspeed-nvme">NVMe Support</a><a id="nvme-support"></a> | <a href="./deepspeed#deepspeed-zero2-zero3-performance">ZeRO-2 vs ZeRO-3 Performance</a><a id="zero-2-vs-zero-3-performance"></a> | <a href="./deepspeed#deepspeed-zero2-example">ZeRO-2 Example</a><a id="zero-2-example"></a> | <a href="./deepspeed#deepspeed-zero3-example">ZeRO-3 Example</a><a id="zero-3-example"></a> | <a href="./deepspeed#deepspeed-optimizer">Optimizer</a><a id="optimizer"></a> | <a href="./deepspeed#deepspeed-scheduler">Scheduler</a><a id="scheduler"></a> | <a href="./deepspeed#deepspeed-fp32">fp32 Precision</a><a id="fp32-precision"></a> | <a href="./deepspeed#deepspeed-amp">Automatic Mixed Precision</a><a id="automatic-mixed-precision"></a> | <a href="./deepspeed#deepspeed-bs">Batch Size</a><a id="batch-size"></a> | <a href="./deepspeed#deepspeed-grad-acc">Gradient Accumulation</a><a id="gradient-accumulation"></a> | <a href="./deepspeed#deepspeed-grad-clip">Gradient Clipping</a><a id="gradient-clipping"></a> | <a href="./deepspeed#deepspeed-weight-extraction">Getting The Model Weights Out</a><a id="getting-the-model-weights-out"></a> ]</p> <script type="module" data-hydrate="1t91efi"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="1t91efi"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/main_classes/trainer.mdx-hf-doc-builder.js") ], params: {} } }); </script>
51
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/main_classes/tokenizer.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;tokenizer&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.PreTrainedTokenizer&quot;,&quot;title&quot;:&quot;PreTrainedTokenizer&quot;},{&quot;local&quot;:&quot;transformers.PreTrainedTokenizerFast&quot;,&quot;title&quot;:&quot;PreTrainedTokenizerFast&quot;},{&quot;local&quot;:&quot;transformers.BatchEncoding&quot;,&quot;title&quot;:&quot;BatchEncoding&quot;}],&quot;title&quot;:&quot;Tokenizer&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/main_classes/tokenizer.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Docstring-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/ExampleCodeBlock-hf-doc-builder.js"> <h1 class="relative group"><a id="tokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#tokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Tokenizer </span></h1> <p>A tokenizer is in charge of preparing the inputs for a model. The library contains tokenizers for all the models. Most of the tokenizers are available in two flavors: a full python implementation and a “Fast” implementation based on the Rust library <a href="https://github.com/huggingface/tokenizers" rel="nofollow">🤗 Tokenizers</a>. The “Fast” implementations allows:</p> <ol><li>a significant speed-up in particular when doing batched tokenization and</li> <li>additional methods to map between the original string (character and words) and the token space (e.g. getting the index of the token comprising a given character or the span of characters corresponding to a given token). </li></ol> <p>The base classes <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> and <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a> implement the common methods for encoding string inputs in model inputs (see below) and instantiating/saving python and “Fast” tokenizers either from a local file or directory or from a pretrained tokenizer provided by the library (downloaded from HuggingFace’s AWS S3 repository). They both rely on <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase">PreTrainedTokenizerBase</a> that contains the common methods, and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.SpecialTokensMixin">SpecialTokensMixin</a>.</p> <p><a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> and <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a> thus implement the main methods for using all the tokenizers:</p> <ul><li>Tokenizing (splitting strings in sub-word token strings), converting tokens strings to ids and back, and encoding/decoding (i.e., tokenizing and converting to integers).</li> <li>Adding new tokens to the vocabulary in a way that is independent of the underlying structure (BPE, SentencePiece…).</li> <li>Managing special tokens (like mask, beginning-of-sentence, etc.): adding them, assigning them to attributes in the tokenizer for easy access and making sure they are not split during tokenization.</li></ul> <p><a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding">BatchEncoding</a> holds the output of the <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase">PreTrainedTokenizerBase</a>’s encoding methods (<code>__call__</code>, <code>encode_plus</code> and <code>batch_encode_plus</code>) and is derived from a Python dictionary. When the tokenizer is a pure python tokenizer, this class behaves just like a standard python dictionary and holds the various model inputs computed by these methods (<code>input_ids</code>, <code>attention_mask</code>…). When the tokenizer is a “Fast” tokenizer (i.e., backed by HuggingFace <a href="https://github.com/huggingface/tokenizers" rel="nofollow">tokenizers library</a>), this class provides in addition several advanced alignment methods which can be used to map between the original string (character and words) and the token space (e.g., getting the index of the token comprising a given character or the span of characters corresponding to a given token).</p> <h2 class="relative group"><a id="transformers.PreTrainedTokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PreTrainedTokenizer </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PreTrainedTokenizer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils.py#L333" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.model_max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.model_max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum length (in number of tokens) for the inputs to the transformer model. When the tokenizer is loaded with <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.from_pretrained">from_pretrained()</a>, this will be set to the value stored for the associated model in <code>max_model_input_sizes</code> (see above). If no value is provided, will default to VERY_LARGE_INTEGER (<code>int(1e30)</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.padding_side" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.padding_side"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding_side</strong> (<code>str</code>, <em>optional</em>) &#x2014; The side on which the model should have padding applied. Should be selected between [&#x2018;right&#x2019;, &#x2018;left&#x2019;]. Default value is picked from the class attribute of the same name.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.truncation_side" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.truncation_side"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation_side</strong> (<code>str</code>, <em>optional</em>) &#x2014; The side on which the model should have truncation applied. Should be selected between [&#x2018;right&#x2019;, &#x2018;left&#x2019;]. Default value is picked from the class attribute of the same name.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.model_input_names" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.model_input_names"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_input_names</strong> (<code>List[string]</code>, <em>optional</em>) &#x2014; The list of inputs accepted by the forward pass of the model (like <code>&quot;token_type_ids&quot;</code> or <code>&quot;attention_mask&quot;</code>). Default value is picked from the class attribute of the same name.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.bos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.bos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the beginning of a sentence. Will be associated to <code>self.bos_token</code> and <code>self.bos_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.eos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.eos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the end of a sentence. Will be associated to <code>self.eos_token</code> and <code>self.eos_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.unk_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.unk_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>unk_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing an out-of-vocabulary token. Will be associated to <code>self.unk_token</code> and <code>self.unk_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.sep_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.sep_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sep_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token separating two different sentences in the same input (used by BERT for instance). Will be associated to <code>self.sep_token</code> and <code>self.sep_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.pad_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.pad_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. Will be associated to <code>self.pad_token</code> and <code>self.pad_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.cls_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.cls_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the class of the input (used by BERT for instance). Will be associated to <code>self.cls_token</code> and <code>self.cls_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.mask_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.mask_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing a masked token (used by masked-language modeling pretraining objectives, like BERT). Will be associated to <code>self.mask_token</code> and <code>self.mask_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.additional_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.additional_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>additional_special_tokens</strong> (tuple or list of <code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A tuple or a list of additional special tokens. Add them here to ensure they won&#x2019;t be split by the tokenization process. Will be associated to <code>self.additional_special_tokens</code> and <code>self.additional_special_tokens_ids</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for all slow tokenizers.</p> <p>Inherits from <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase">PreTrainedTokenizerBase</a>.</p> <p>Handle all the shared methods for tokenization and special tokens as well as methods downloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary.</p> <p>This class also contain the added tokens in a unified way on top of all tokenizers so we don’t have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece…).</p> <p>Class attributes (overridden by derived classes)</p> <ul><li><strong>vocab_files_names</strong> (<code>Dict[str, str]</code>) — A dictionary with, as keys, the <code>__init__</code> keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string).</li> <li><strong>pretrained_vocab_files_map</strong> (<code>Dict[str, Dict[str, str]]</code>) — A dictionary of dictionaries, with the high-level keys being the <code>__init__</code> keyword name of each vocabulary file required by the model, the low-level being the <code>short-cut-names</code> of the pretrained models with, as associated values, the <code>url</code> to the associated pretrained vocabulary file.</li> <li><strong>max_model_input_sizes</strong> (<code>Dict[str, Optional[int]]</code>) — A dictionary with, as keys, the <code>short-cut-names</code> of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or <code>None</code> if the model has no maximum input size.</li> <li><strong>pretrained_init_configuration</strong> (<code>Dict[str, Dict[str, Any]]</code>) — A dictionary with, as keys, the <code>short-cut-names</code> of the pretrained models, and as associated values, a dictionary of specific arguments to pass to the <code>__init__</code> method of the tokenizer class for this pretrained model when loading the tokenizer with the <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.from_pretrained">from_pretrained()</a> method.</li> <li><strong>model_input_names</strong> (<code>List[str]</code>) — A list of inputs expected in the forward pass of the model.</li> <li><strong>padding_side</strong> (<code>str</code>) — The default value for the side on which the model should have padding applied. Should be <code>&#39;right&#39;</code> or <code>&#39;left&#39;</code>.</li> <li><strong>truncation_side</strong> (<code>str</code>) — The default value for the side on which the model should have truncation applied. Should be <code>&#39;right&#39;</code> or <code>&#39;left&#39;</code>.</li></ul> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizer.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizer.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizer.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L2410" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[typing.List[str]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_pair<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[typing.List[str]], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_target<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[typing.List[str]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_pair_target<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[typing.List[str]], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_special_tokens<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.utils.generic.PaddingStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">truncation<span class="opacity-60">: typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stride<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_split_into_words<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.utils.generic.TensorType, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_token_type_ids<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_attention_mask<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_overflowing_tokens<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_special_tokens_mask<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_offsets_mapping<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_length<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">verbose<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.__call__.text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.__call__.text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.__call__.text_pair" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.__call__.text_pair"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text_pair</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.__call__.text_target" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.__call__.text_target"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text_target</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.__call__.text_pair_target" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.__call__.text_pair_target"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text_pair_target</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.__call__.add_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.__call__.add_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.__call__.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.__call__.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.__call__.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.__call__.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.__call__.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.__call__.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.__call__.stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.__call__.stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.__call__.is_split_into_words" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.__call__.is_split_into_words"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.__call__.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.__call__.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.__call__.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.__call__.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.__call__.return_token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.__call__.return_token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.__call__.return_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.__call__.return_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.__call__.return_overflowing_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.__call__.return_overflowing_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.__call__.return_special_tokens_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.__call__.return_special_tokens_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.__call__.return_offsets_mapping" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.__call__.return_offsets_mapping"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.__call__.return_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.__call__.return_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.__call__.verbose" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.__call__.verbose"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizer.__call__.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> — List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> — List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>“token_type_ids”</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> — List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>“attention_mask”</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>overflowing_tokens</strong> — List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> — Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> — List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> — The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> <!-- HTML_TAG_END --></p> </div></div> <p>Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizer.batch_decode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>batch_decode</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizer.batch_decode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizer.batch_decode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3370" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences<span class="opacity-60">: typing.Union[typing.List[int], typing.List[typing.List[int]], ForwardRef(&#39;np.ndarray&#39;), ForwardRef(&#39;torch.Tensor&#39;), ForwardRef(&#39;tf.Tensor&#39;)]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">skip_special_tokens<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">clean_up_tokenization_spaces<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[str]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.batch_decode.sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.batch_decode.sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences</strong> (<code>Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.batch_decode.skip_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.batch_decode.skip_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.batch_decode.clean_up_tokenization_spaces" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.batch_decode.clean_up_tokenization_spaces"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.batch_decode.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.batch_decode.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizer.batch_decode.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[str]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The list of decoded sentences.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Convert a list of lists of token ids into a list of strings by calling decode.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizer.decode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>decode</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizer.decode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizer.decode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3403" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids<span class="opacity-60">: typing.Union[int, typing.List[int], ForwardRef(&#39;np.ndarray&#39;), ForwardRef(&#39;torch.Tensor&#39;), ForwardRef(&#39;tf.Tensor&#39;)]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">skip_special_tokens<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">clean_up_tokenization_spaces<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>str</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.decode.token_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.decode.token_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids</strong> (<code>Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.decode.skip_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.decode.skip_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.decode.clean_up_tokenization_spaces" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.decode.clean_up_tokenization_spaces"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.decode.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.decode.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizer.decode.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>str</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The decoded sentence.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces.</p> <p>Similar to doing <code>self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))</code>.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizer.encode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>encode</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizer.encode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizer.encode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L2220" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[int]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_pair<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[int], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_special_tokens<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.utils.generic.PaddingStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">truncation<span class="opacity-60">: typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stride<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.utils.generic.TensorType, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code>, <code>torch.Tensor</code>, <code>tf.Tensor</code> or <code>np.ndarray</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.encode.text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.encode.text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code>) &#x2014; The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.encode.text_pair" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.encode.text_pair"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text_pair</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code>, <em>optional</em>) &#x2014; Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.encode.add_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.encode.add_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.encode.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.encode.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.encode.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.encode.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.encode.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.encode.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.encode.stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.encode.stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.encode.is_split_into_words" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.encode.is_split_into_words"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.encode.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.encode.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.encode.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.encode.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul> <p>**kwargs &#x2014; Passed along to the <code>.tokenize()</code> method.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizer.encode.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code>, <code>torch.Tensor</code>, <code>tf.Tensor</code> or <code>np.ndarray</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The tokenized ids of the text.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary.</p> <p>Same as doing <code>self.convert_tokens_to_ids(self.tokenize(text))</code>.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizer.push_to_hub"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>push_to_hub</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizer.push_to_hub" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizer.push_to_hub"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/hub.py#L712" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repo_id<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_temp_dir<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">commit_message<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">private<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_auth_token<span class="opacity-60">: typing.Union[bool, str, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_shard_size<span class="opacity-60">: typing.Union[int, str, NoneType] = &#39;10GB&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">create_pr<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**deprecated_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.push_to_hub.repo_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.push_to_hub.repo_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repo_id</strong> (<code>str</code>) &#x2014; The name of the repository you want to push your tokenizer to. It should contain your organization name when pushing to a given organization.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.push_to_hub.use_temp_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.push_to_hub.use_temp_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to use a temporary directory to store the files saved before they are pushed to the Hub. Will default to <code>True</code> if there is no directory named like <code>repo_id</code>, <code>False</code> otherwise.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.push_to_hub.commit_message" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.push_to_hub.commit_message"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;Upload tokenizer&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.push_to_hub.private" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.push_to_hub.private"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.push_to_hub.use_auth_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.push_to_hub.use_auth_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>huggingface-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.push_to_hub.max_shard_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.push_to_hub.max_shard_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_shard_size</strong> (<code>int</code> or <code>str</code>, <em>optional</em>, defaults to <code>&quot;10GB&quot;</code>) &#x2014; Only applicable for models. The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like <code>&quot;5MB&quot;</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.push_to_hub.create_pr" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.push_to_hub.create_pr"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>create_pr</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to create a PR with the uploaded files or directly commit.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Upload the tokenizer files to the 🤗 Model Hub while synchronizing a local clone of the repo in <code>repo_path_or_name</code>.</p> <div class="relative group rounded-md"><a id="transformers.PreTrainedTokenizer.push_to_hub.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.push_to_hub.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the tokenizer to your namespace with the name &quot;my-finetuned-bert&quot;.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the tokenizer to an organization with the name &quot;my-finetuned-bert&quot;.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;huggingface/my-finetuned-bert&quot;</span>)<!-- HTML_TAG_END --></pre></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizer.convert_ids_to_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>convert_ids_to_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizer.convert_ids_to_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizer.convert_ids_to_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils.py#L883" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ids<span class="opacity-60">: typing.Union[int, typing.List[int]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">skip_special_tokens<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>str</code> or <code>List[str]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.convert_ids_to_tokens.ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.convert_ids_to_tokens.ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ids</strong> (<code>int</code> or <code>List[int]</code>) &#x2014; The token id (or token ids) to convert to tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.convert_ids_to_tokens.skip_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.convert_ids_to_tokens.skip_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizer.convert_ids_to_tokens.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>str</code> or <code>List[str]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The decoded token(s).</p> <!-- HTML_TAG_END --></p> </div></div> <p>Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and added tokens.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizer.convert_tokens_to_ids"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>convert_tokens_to_ids</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizer.convert_tokens_to_ids" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizer.convert_tokens_to_ids"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils.py#L560" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokens<span class="opacity-60">: typing.Union[str, typing.List[str]]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code> or <code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.convert_tokens_to_ids.tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.convert_tokens_to_ids.tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokens</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several token(s) to convert to token id(s).<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizer.convert_tokens_to_ids.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code> or <code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The token id or list of token ids.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the vocabulary.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizer.get_added_vocab"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_added_vocab</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizer.get_added_vocab" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizer.get_added_vocab"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils.py#L369" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Dict[str, int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div id="transformers.PreTrainedTokenizer.get_added_vocab.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Dict[str, int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The added tokens.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Returns the added tokens in the vocabulary as a dictionary of token to index.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizer.num_special_tokens_to_add"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>num_special_tokens_to_add</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizer.num_special_tokens_to_add" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizer.num_special_tokens_to_add"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils.py#L458" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pair<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.num_special_tokens_to_add.pair" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.num_special_tokens_to_add.pair"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pair</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the number of added tokens should be computed in the case of a sequence pair or a single sequence.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizer.num_special_tokens_to_add.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Number of special tokens added to sequences.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Returns the number of added tokens when encoding a sequence with special tokens.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put this inside your training loop.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizer.prepare_for_tokenization"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>prepare_for_tokenization</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizer.prepare_for_tokenization" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizer.prepare_for_tokenization"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils.py#L821" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_split_into_words<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Tuple[str, Dict[str, Any]]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.prepare_for_tokenization.text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.prepare_for_tokenization.text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text</strong> (<code>str</code>) &#x2014; The text to prepare.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.prepare_for_tokenization.is_split_into_words" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.prepare_for_tokenization.is_split_into_words"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification. kwargs &#x2014; Keyword arguments to use for the tokenization.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizer.prepare_for_tokenization.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Tuple[str, Dict[str, Any]]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The prepared text and the unused kwargs.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Performs any necessary transformations before tokenization.</p> <p>This method should pop the arguments from kwargs and return the remaining <code>kwargs</code> as well. We test the <code>kwargs</code> at the end of the encoding process to be sure all the arguments have been used.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizer.tokenize"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>tokenize</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizer.tokenize" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizer.tokenize"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils.py#L481" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text<span class="opacity-60">: str</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[str]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.tokenize.text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.tokenize.text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text</strong> (<code>str</code>) &#x2014; The sequence to be encoded.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.tokenize.*kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.tokenize.*kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START -->*<strong>*kwargs</strong> (additional keyword arguments) &#x2014; Passed along to the model-specific <code>prepare_for_tokenization</code> preprocessing method.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizer.tokenize.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[str]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The list of tokens.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Converts a string in a sequence of tokens, using the tokenizer.</p> <p>Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces). Takes care of added tokens.</p></div></div> <h2 class="relative group"><a id="transformers.PreTrainedTokenizerFast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PreTrainedTokenizerFast </span></h2> <p>The <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a> depend on the <a href="https://huggingface.co/docs/tokenizers" rel="nofollow">tokenizers</a> library. The tokenizers obtained from the 🤗 tokenizers library can be loaded very simply into 🤗 transformers. Take a look at the <a href="../fast_tokenizers">Using tokenizers from 🤗 tokenizers</a> page to understand how this is done.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerFast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PreTrainedTokenizerFast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerFast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerFast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_fast.py#L78" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.model_max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.model_max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum length (in number of tokens) for the inputs to the transformer model. When the tokenizer is loaded with <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.from_pretrained">from_pretrained()</a>, this will be set to the value stored for the associated model in <code>max_model_input_sizes</code> (see above). If no value is provided, will default to VERY_LARGE_INTEGER (<code>int(1e30)</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.padding_side" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.padding_side"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding_side</strong> (<code>str</code>, <em>optional</em>) &#x2014; The side on which the model should have padding applied. Should be selected between [&#x2018;right&#x2019;, &#x2018;left&#x2019;]. Default value is picked from the class attribute of the same name.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.truncation_side" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.truncation_side"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation_side</strong> (<code>str</code>, <em>optional</em>) &#x2014; The side on which the model should have truncation applied. Should be selected between [&#x2018;right&#x2019;, &#x2018;left&#x2019;]. Default value is picked from the class attribute of the same name.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.model_input_names" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.model_input_names"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_input_names</strong> (<code>List[string]</code>, <em>optional</em>) &#x2014; The list of inputs accepted by the forward pass of the model (like <code>&quot;token_type_ids&quot;</code> or <code>&quot;attention_mask&quot;</code>). Default value is picked from the class attribute of the same name.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.bos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.bos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the beginning of a sentence. Will be associated to <code>self.bos_token</code> and <code>self.bos_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.eos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.eos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the end of a sentence. Will be associated to <code>self.eos_token</code> and <code>self.eos_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.unk_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.unk_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>unk_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing an out-of-vocabulary token. Will be associated to <code>self.unk_token</code> and <code>self.unk_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.sep_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.sep_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sep_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token separating two different sentences in the same input (used by BERT for instance). Will be associated to <code>self.sep_token</code> and <code>self.sep_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.pad_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.pad_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. Will be associated to <code>self.pad_token</code> and <code>self.pad_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.cls_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.cls_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the class of the input (used by BERT for instance). Will be associated to <code>self.cls_token</code> and <code>self.cls_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.mask_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.mask_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing a masked token (used by masked-language modeling pretraining objectives, like BERT). Will be associated to <code>self.mask_token</code> and <code>self.mask_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.additional_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.additional_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>additional_special_tokens</strong> (tuple or list of <code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A tuple or a list of additional special tokens. Add them here to ensure they won&#x2019;t be split by the tokenization process. Will be associated to <code>self.additional_special_tokens</code> and <code>self.additional_special_tokens_ids</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.tokenizer_object" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.tokenizer_object"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer_object</strong> (<a href="https://huggingface.co/docs/tokenizers/main/en/api/tokenizer#tokenizers.Tokenizer" rel="nofollow">tokenizers.Tokenizer</a>) &#x2014; A <a href="https://huggingface.co/docs/tokenizers/main/en/api/tokenizer#tokenizers.Tokenizer" rel="nofollow">tokenizers.Tokenizer</a> object from &#x1F917; tokenizers to instantiate from. See <a href="../fast_tokenizers">Using tokenizers from &#x1F917; tokenizers</a> for more information.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.tokenizer_file" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.tokenizer_file"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer_file</strong> (<code>str</code>) &#x2014; A path to a local JSON file representing a previously serialized <a href="https://huggingface.co/docs/tokenizers/main/en/api/tokenizer#tokenizers.Tokenizer" rel="nofollow">tokenizers.Tokenizer</a> object from &#x1F917; tokenizers.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for all fast tokenizers (wrapping HuggingFace tokenizers library).</p> <p>Inherits from <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase">PreTrainedTokenizerBase</a>.</p> <p>Handles all the shared methods for tokenization and special tokens, as well as methods for downloading/caching/loading pretrained tokenizers, as well as adding tokens to the vocabulary.</p> <p>This class also contains the added tokens in a unified way on top of all tokenizers so we don’t have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece…).</p> <p>Class attributes (overridden by derived classes)</p> <ul><li><strong>vocab_files_names</strong> (<code>Dict[str, str]</code>) — A dictionary with, as keys, the <code>__init__</code> keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string).</li> <li><strong>pretrained_vocab_files_map</strong> (<code>Dict[str, Dict[str, str]]</code>) — A dictionary of dictionaries, with the high-level keys being the <code>__init__</code> keyword name of each vocabulary file required by the model, the low-level being the <code>short-cut-names</code> of the pretrained models with, as associated values, the <code>url</code> to the associated pretrained vocabulary file.</li> <li><strong>max_model_input_sizes</strong> (<code>Dict[str, Optional[int]]</code>) — A dictionary with, as keys, the <code>short-cut-names</code> of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or <code>None</code> if the model has no maximum input size.</li> <li><strong>pretrained_init_configuration</strong> (<code>Dict[str, Dict[str, Any]]</code>) — A dictionary with, as keys, the <code>short-cut-names</code> of the pretrained models, and as associated values, a dictionary of specific arguments to pass to the <code>__init__</code> method of the tokenizer class for this pretrained model when loading the tokenizer with the <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.from_pretrained">from_pretrained()</a> method.</li> <li><strong>model_input_names</strong> (<code>List[str]</code>) — A list of inputs expected in the forward pass of the model.</li> <li><strong>padding_side</strong> (<code>str</code>) — The default value for the side on which the model should have padding applied. Should be <code>&#39;right&#39;</code> or <code>&#39;left&#39;</code>.</li> <li><strong>truncation_side</strong> (<code>str</code>) — The default value for the side on which the model should have truncation applied. Should be <code>&#39;right&#39;</code> or <code>&#39;left&#39;</code>.</li></ul> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerFast.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerFast.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerFast.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L2410" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[typing.List[str]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_pair<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[typing.List[str]], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_target<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[typing.List[str]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_pair_target<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[typing.List[str]], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_special_tokens<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.utils.generic.PaddingStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">truncation<span class="opacity-60">: typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stride<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_split_into_words<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.utils.generic.TensorType, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_token_type_ids<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_attention_mask<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_overflowing_tokens<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_special_tokens_mask<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_offsets_mapping<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_length<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">verbose<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.__call__.text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.__call__.text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.__call__.text_pair" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.__call__.text_pair"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text_pair</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.__call__.text_target" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.__call__.text_target"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text_target</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.__call__.text_pair_target" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.__call__.text_pair_target"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text_pair_target</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.__call__.add_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.__call__.add_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.__call__.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.__call__.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.__call__.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.__call__.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.__call__.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.__call__.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.__call__.stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.__call__.stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.__call__.is_split_into_words" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.__call__.is_split_into_words"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.__call__.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.__call__.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.__call__.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.__call__.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.__call__.return_token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.__call__.return_token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.__call__.return_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.__call__.return_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.__call__.return_overflowing_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.__call__.return_overflowing_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.__call__.return_special_tokens_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.__call__.return_special_tokens_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.__call__.return_offsets_mapping" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.__call__.return_offsets_mapping"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.__call__.return_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.__call__.return_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.__call__.verbose" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.__call__.verbose"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizerFast.__call__.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> — List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> — List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>“token_type_ids”</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> — List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>“attention_mask”</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>overflowing_tokens</strong> — List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> — Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> — List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> — The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> <!-- HTML_TAG_END --></p> </div></div> <p>Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerFast.batch_decode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>batch_decode</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerFast.batch_decode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerFast.batch_decode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3370" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences<span class="opacity-60">: typing.Union[typing.List[int], typing.List[typing.List[int]], ForwardRef(&#39;np.ndarray&#39;), ForwardRef(&#39;torch.Tensor&#39;), ForwardRef(&#39;tf.Tensor&#39;)]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">skip_special_tokens<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">clean_up_tokenization_spaces<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[str]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.batch_decode.sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.batch_decode.sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences</strong> (<code>Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.batch_decode.skip_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.batch_decode.skip_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.batch_decode.clean_up_tokenization_spaces" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.batch_decode.clean_up_tokenization_spaces"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.batch_decode.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.batch_decode.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizerFast.batch_decode.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[str]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The list of decoded sentences.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Convert a list of lists of token ids into a list of strings by calling decode.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerFast.decode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>decode</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerFast.decode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerFast.decode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3403" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids<span class="opacity-60">: typing.Union[int, typing.List[int], ForwardRef(&#39;np.ndarray&#39;), ForwardRef(&#39;torch.Tensor&#39;), ForwardRef(&#39;tf.Tensor&#39;)]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">skip_special_tokens<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">clean_up_tokenization_spaces<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>str</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.decode.token_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.decode.token_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids</strong> (<code>Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.decode.skip_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.decode.skip_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.decode.clean_up_tokenization_spaces" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.decode.clean_up_tokenization_spaces"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.decode.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.decode.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizerFast.decode.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>str</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The decoded sentence.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces.</p> <p>Similar to doing <code>self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))</code>.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerFast.encode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>encode</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerFast.encode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerFast.encode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L2220" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[int]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_pair<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[int], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_special_tokens<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.utils.generic.PaddingStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">truncation<span class="opacity-60">: typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stride<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.utils.generic.TensorType, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code>, <code>torch.Tensor</code>, <code>tf.Tensor</code> or <code>np.ndarray</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.encode.text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.encode.text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code>) &#x2014; The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.encode.text_pair" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.encode.text_pair"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text_pair</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code>, <em>optional</em>) &#x2014; Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.encode.add_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.encode.add_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.encode.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.encode.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.encode.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.encode.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.encode.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.encode.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.encode.stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.encode.stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.encode.is_split_into_words" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.encode.is_split_into_words"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.encode.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.encode.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.encode.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.encode.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul> <p>**kwargs &#x2014; Passed along to the <code>.tokenize()</code> method.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizerFast.encode.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code>, <code>torch.Tensor</code>, <code>tf.Tensor</code> or <code>np.ndarray</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The tokenized ids of the text.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary.</p> <p>Same as doing <code>self.convert_tokens_to_ids(self.tokenize(text))</code>.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerFast.push_to_hub"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>push_to_hub</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerFast.push_to_hub" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerFast.push_to_hub"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/hub.py#L712" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repo_id<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_temp_dir<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">commit_message<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">private<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_auth_token<span class="opacity-60">: typing.Union[bool, str, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_shard_size<span class="opacity-60">: typing.Union[int, str, NoneType] = &#39;10GB&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">create_pr<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**deprecated_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.push_to_hub.repo_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.push_to_hub.repo_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repo_id</strong> (<code>str</code>) &#x2014; The name of the repository you want to push your tokenizer to. It should contain your organization name when pushing to a given organization.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.push_to_hub.use_temp_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.push_to_hub.use_temp_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to use a temporary directory to store the files saved before they are pushed to the Hub. Will default to <code>True</code> if there is no directory named like <code>repo_id</code>, <code>False</code> otherwise.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.push_to_hub.commit_message" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.push_to_hub.commit_message"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;Upload tokenizer&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.push_to_hub.private" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.push_to_hub.private"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.push_to_hub.use_auth_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.push_to_hub.use_auth_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>huggingface-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.push_to_hub.max_shard_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.push_to_hub.max_shard_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_shard_size</strong> (<code>int</code> or <code>str</code>, <em>optional</em>, defaults to <code>&quot;10GB&quot;</code>) &#x2014; Only applicable for models. The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like <code>&quot;5MB&quot;</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.push_to_hub.create_pr" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.push_to_hub.create_pr"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>create_pr</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to create a PR with the uploaded files or directly commit.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Upload the tokenizer files to the 🤗 Model Hub while synchronizing a local clone of the repo in <code>repo_path_or_name</code>.</p> <div class="relative group rounded-md"><a id="transformers.PreTrainedTokenizerFast.push_to_hub.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.push_to_hub.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the tokenizer to your namespace with the name &quot;my-finetuned-bert&quot;.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the tokenizer to an organization with the name &quot;my-finetuned-bert&quot;.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;huggingface/my-finetuned-bert&quot;</span>)<!-- HTML_TAG_END --></pre></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerFast.convert_ids_to_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>convert_ids_to_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerFast.convert_ids_to_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerFast.convert_ids_to_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_fast.py#L293" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ids<span class="opacity-60">: typing.Union[int, typing.List[int]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">skip_special_tokens<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>str</code> or <code>List[str]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.convert_ids_to_tokens.ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.convert_ids_to_tokens.ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ids</strong> (<code>int</code> or <code>List[int]</code>) &#x2014; The token id (or token ids) to convert to tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.convert_ids_to_tokens.skip_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.convert_ids_to_tokens.skip_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizerFast.convert_ids_to_tokens.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>str</code> or <code>List[str]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The decoded token(s).</p> <!-- HTML_TAG_END --></p> </div></div> <p>Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and added tokens.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerFast.convert_tokens_to_ids"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>convert_tokens_to_ids</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerFast.convert_tokens_to_ids" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerFast.convert_tokens_to_ids"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_fast.py#L235" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokens<span class="opacity-60">: typing.Union[str, typing.List[str]]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code> or <code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.convert_tokens_to_ids.tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.convert_tokens_to_ids.tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokens</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several token(s) to convert to token id(s).<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizerFast.convert_tokens_to_ids.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code> or <code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The token id or list of token ids.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the vocabulary.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerFast.get_added_vocab"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_added_vocab</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerFast.get_added_vocab" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerFast.get_added_vocab"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_fast.py#L156" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Dict[str, int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div id="transformers.PreTrainedTokenizerFast.get_added_vocab.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Dict[str, int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The added tokens.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Returns the added tokens in the vocabulary as a dictionary of token to index.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerFast.num_special_tokens_to_add"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>num_special_tokens_to_add</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerFast.num_special_tokens_to_add" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerFast.num_special_tokens_to_add"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_fast.py#L272" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pair<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.num_special_tokens_to_add.pair" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.num_special_tokens_to_add.pair"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pair</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the number of added tokens should be computed in the case of a sequence pair or a single sequence.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizerFast.num_special_tokens_to_add.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Number of special tokens added to sequences.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Returns the number of added tokens when encoding a sequence with special tokens.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put this inside your training loop.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerFast.set_truncation_and_padding"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>set_truncation_and_padding</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerFast.set_truncation_and_padding" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerFast.set_truncation_and_padding"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_fast.py#L322" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding_strategy<span class="opacity-60">: PaddingStrategy</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">truncation_strategy<span class="opacity-60">: TruncationStrategy</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stride<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.set_truncation_and_padding.padding_strategy" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.set_truncation_and_padding.padding_strategy"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding_strategy</strong> (<a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>) &#x2014; The kind of padding that will be applied to the input<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.set_truncation_and_padding.truncation_strategy" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.set_truncation_and_padding.truncation_strategy"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation_strategy</strong> (<a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>) &#x2014; The kind of truncation that will be applied to the input<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.set_truncation_and_padding.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.set_truncation_and_padding.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>) &#x2014; The maximum size of a sequence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.set_truncation_and_padding.stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.set_truncation_and_padding.stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stride</strong> (<code>int</code>) &#x2014; The stride to use when handling overflow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.set_truncation_and_padding.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.set_truncation_and_padding.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Define the truncation and the padding strategies for fast tokenizers (provided by HuggingFace tokenizers library) and restore the tokenizer settings afterwards.</p> <p>The provided tokenizer has no padding / truncation strategy before the managed section. If your tokenizer set a padding / truncation strategy before, then it will be reset to no padding / truncation when exiting the managed section.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerFast.train_new_from_iterator"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>train_new_from_iterator</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerFast.train_new_from_iterator" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerFast.train_new_from_iterator"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_fast.py#L605" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_iterator<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_size<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">length<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">new_special_tokens<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">special_tokens_map<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast" >PreTrainedTokenizerFast</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.train_new_from_iterator.text_iterator" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.train_new_from_iterator.text_iterator"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text_iterator</strong> (generator of <code>List[str]</code>) &#x2014; The training corpus. Should be a generator of batches of texts, for instance a list of lists of texts if you have everything in memory.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.train_new_from_iterator.vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.train_new_from_iterator.vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_size</strong> (<code>int</code>) &#x2014; The size of the vocabulary you want for your tokenizer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.train_new_from_iterator.length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.train_new_from_iterator.length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>length</strong> (<code>int</code>, <em>optional</em>) &#x2014; The total number of sequences in the iterator. This is used to provide meaningful progress tracking<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.train_new_from_iterator.new_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.train_new_from_iterator.new_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>new_special_tokens</strong> (list of <code>str</code> or <code>AddedToken</code>, <em>optional</em>) &#x2014; A list of new special tokens to add to the tokenizer you are training.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.train_new_from_iterator.special_tokens_map" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.train_new_from_iterator.special_tokens_map"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>special_tokens_map</strong> (<code>Dict[str, str]</code>, <em>optional</em>) &#x2014; If you want to rename some of the special tokens this tokenizer uses, pass along a mapping old special token name to new special token name in this argument. kwargs &#x2014; Additional keyword arguments passed along to the trainer from the &#x1F917; Tokenizers library.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizerFast.train_new_from_iterator.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast" >PreTrainedTokenizerFast</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A new tokenizer of the same type as the original one, trained on <code>text_iterator</code>.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Trains a tokenizer on a new corpus with the same defaults (in terms of special tokens or tokenization pipeline) as the current one.</p></div></div> <h2 class="relative group"><a id="transformers.BatchEncoding" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BatchEncoding </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchEncoding"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">BatchEncoding</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.BatchEncoding" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchEncoding"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L159" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data<span class="opacity-60">: typing.Union[typing.Dict[str, typing.Any], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoding<span class="opacity-60">: typing.Union[tokenizers.Encoding, typing.Sequence[tokenizers.Encoding], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tensor_type<span class="opacity-60">: typing.Union[NoneType, str, transformers.utils.generic.TensorType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">prepend_batch_axis<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">n_sequences<span class="opacity-60">: typing.Optional[int] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.data" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.data"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>data</strong> (<code>dict</code>) &#x2014; Dictionary of lists/arrays/tensors returned by the <code>__call__</code>/<code>encode_plus</code>/<code>batch_encode_plus</code> methods (&#x2018;input_ids&#x2019;, &#x2018;attention_mask&#x2019;, etc.).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.encoding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.encoding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoding</strong> (<code>tokenizers.Encoding</code> or <code>Sequence[tokenizers.Encoding]</code>, <em>optional</em>) &#x2014; If the tokenizer is a fast tokenizer which outputs additional information like mapping from word/character space to token space the <code>tokenizers.Encoding</code> instance or list of instance (for batches) hold this information.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.tensor_type" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.tensor_type"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tensor_type</strong> (<code>Union[None, str, TensorType]</code>, <em>optional</em>) &#x2014; You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at initialization.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.prepend_batch_axis" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.prepend_batch_axis"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>prepend_batch_axis</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to add a batch axis when converting to tensors (see <code>tensor_type</code> above).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.n_sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.n_sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>n_sequences</strong> (<code>Optional[int]</code>, <em>optional</em>) &#x2014; You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at initialization.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Holds the output of the <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__"><strong>call</strong>()</a>, <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode_plus">encode_plus()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.batch_encode_plus">batch_encode_plus()</a> methods (tokens, attention_masks, etc).</p> <p>This class is derived from a python dictionary and can be used as a dictionary. In addition, this class exposes utility methods to map from word/character space to token space.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchEncoding.char_to_token"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>char_to_token</span></h4><!-- HTML_TAG_END --> <a id="transformers.BatchEncoding.char_to_token" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchEncoding.char_to_token"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L531" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_or_char_index<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">char_index<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequence_index<span class="opacity-60">: int = 0</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.char_to_token.batch_or_char_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.char_to_token.batch_or_char_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_or_char_index</strong> (<code>int</code>) &#x2014; Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the word in the sequence<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.char_to_token.char_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.char_to_token.char_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>char_index</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a batch index is provided in <em>batch_or_token_index</em>, this can be the index of the word in the sequence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.char_to_token.sequence_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.char_to_token.sequence_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequence_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 or 1) the provided character index belongs to.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.BatchEncoding.char_to_token.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Index of the token.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Get the index of the token in the encoded output comprising a character in the original string for a sequence of the batch.</p> <p>Can be called as:</p> <ul><li><code>self.char_to_token(char_index)</code> if batch size is 1</li> <li><code>self.char_to_token(batch_index, char_index)</code> if batch size is greater or equal to 1</li></ul> <p>This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchEncoding.char_to_word"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>char_to_word</span></h4><!-- HTML_TAG_END --> <a id="transformers.BatchEncoding.char_to_word" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchEncoding.char_to_word"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L617" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_or_char_index<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">char_index<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequence_index<span class="opacity-60">: int = 0</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code> or <code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.char_to_word.batch_or_char_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.char_to_word.batch_or_char_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_or_char_index</strong> (<code>int</code>) &#x2014; Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the character in the original string.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.char_to_word.char_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.char_to_word.char_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>char_index</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a batch index is provided in <em>batch_or_token_index</em>, this can be the index of the character in the original string.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.char_to_word.sequence_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.char_to_word.sequence_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequence_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 or 1) the provided character index belongs to.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.BatchEncoding.char_to_word.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code> or <code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Index or indices of the associated encoded token(s).</p> <!-- HTML_TAG_END --></p> </div></div> <p>Get the word in the original string corresponding to a character in the original string of a sequence of the batch.</p> <p>Can be called as:</p> <ul><li><code>self.char_to_word(char_index)</code> if batch size is 1</li> <li><code>self.char_to_word(batch_index, char_index)</code> if batch size is greater than 1</li></ul> <p>This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchEncoding.convert_to_tensors"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>convert_to_tensors</span></h4><!-- HTML_TAG_END --> <a id="transformers.BatchEncoding.convert_to_tensors" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchEncoding.convert_to_tensors"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L656" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tensor_type<span class="opacity-60">: typing.Union[str, transformers.utils.generic.TensorType, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">prepend_batch_axis<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.convert_to_tensors.tensor_type" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.convert_to_tensors.tensor_type"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tensor_type</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; The type of tensors to use. If <code>str</code>, should be one of the values of the enum <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>. If <code>None</code>, no modification is done.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.convert_to_tensors.prepend_batch_axis" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.convert_to_tensors.prepend_batch_axis"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>prepend_batch_axis</strong> (<code>int</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to add the batch dimension during the conversion.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Convert the inner content to tensors.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchEncoding.sequence_ids"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>sequence_ids</span></h4><!-- HTML_TAG_END --> <a id="transformers.BatchEncoding.sequence_ids" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchEncoding.sequence_ids"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L297" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_index<span class="opacity-60">: int = 0</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[Optional[int]]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.sequence_ids.batch_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.sequence_ids.batch_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The index to access in the batch.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.BatchEncoding.sequence_ids.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[Optional[int]]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A list indicating the sequence id corresponding to each token. Special tokens added by the tokenizer are mapped to <code>None</code> and other tokens are mapped to the index of their corresponding sequence.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Return a list mapping the tokens to the id of their original sentences:</p> <ul><li><code>None</code> for special tokens added around or between sequences,</li> <li><code>0</code> for tokens corresponding to words in the first sequence,</li> <li><code>1</code> for tokens corresponding to words in the second sequence when a pair of sequences was jointly encoded.</li></ul></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchEncoding.to"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to</span></h4><!-- HTML_TAG_END --> <a id="transformers.BatchEncoding.to" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchEncoding.to"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L741" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">device<span class="opacity-60">: typing.Union[str, ForwardRef(&#39;torch.device&#39;)]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.to.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.to.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>str</code> or <code>torch.device</code>) &#x2014; The device to put the tensors on.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.BatchEncoding.to.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The same instance after modification.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Send all values to device by calling <code>v.to(device)</code> (PyTorch only).</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchEncoding.token_to_chars"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>token_to_chars</span></h4><!-- HTML_TAG_END --> <a id="transformers.BatchEncoding.token_to_chars" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchEncoding.token_to_chars"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L492" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_or_token_index<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_index<span class="opacity-60">: typing.Optional[int] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.CharSpan" >CharSpan</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.token_to_chars.batch_or_token_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.token_to_chars.batch_or_token_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_or_token_index</strong> (<code>int</code>) &#x2014; Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the token in the sequence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.token_to_chars.token_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.token_to_chars.token_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_index</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a batch index is provided in <em>batch_or_token_index</em>, this can be the index of the token or tokens in the sequence.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.BatchEncoding.token_to_chars.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.CharSpan" >CharSpan</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Span of characters in the original string, or None, if the token (e.g. <s>, </s>) doesn’t correspond to any chars in the origin string.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Get the character span corresponding to an encoded token in a sequence of the batch.</p> <p>Character spans are returned as a <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.CharSpan">CharSpan</a> with:</p> <ul><li><strong>start</strong> — Index of the first character in the original string associated to the token.</li> <li><strong>end</strong> — Index of the character following the last character in the original string associated to the token.</li></ul> <p>Can be called as:</p> <ul><li><code>self.token_to_chars(token_index)</code> if batch size is 1</li> <li><code>self.token_to_chars(batch_index, token_index)</code> if batch size is greater or equal to 1</li></ul></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchEncoding.token_to_sequence"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>token_to_sequence</span></h4><!-- HTML_TAG_END --> <a id="transformers.BatchEncoding.token_to_sequence" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchEncoding.token_to_sequence"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L364" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_or_token_index<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_index<span class="opacity-60">: typing.Optional[int] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.token_to_sequence.batch_or_token_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.token_to_sequence.batch_or_token_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_or_token_index</strong> (<code>int</code>) &#x2014; Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of the token in the sequence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.token_to_sequence.token_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.token_to_sequence.token_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_index</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a batch index is provided in <em>batch_or_token_index</em>, this can be the index of the token in the sequence.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.BatchEncoding.token_to_sequence.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Index of the word in the input sequence.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Get the index of the sequence represented by the given token. In the general use case, this method returns <code>0</code> for a single sequence or the first sequence of a pair, and <code>1</code> for the second sequence of a pair</p> <p>Can be called as:</p> <ul><li><code>self.token_to_sequence(token_index)</code> if batch size is 1</li> <li><code>self.token_to_sequence(batch_index, token_index)</code> if batch size is greater than 1</li></ul> <p>This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e., words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchEncoding.token_to_word"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>token_to_word</span></h4><!-- HTML_TAG_END --> <a id="transformers.BatchEncoding.token_to_word" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchEncoding.token_to_word"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L403" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_or_token_index<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_index<span class="opacity-60">: typing.Optional[int] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.token_to_word.batch_or_token_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.token_to_word.batch_or_token_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_or_token_index</strong> (<code>int</code>) &#x2014; Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the token in the sequence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.token_to_word.token_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.token_to_word.token_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_index</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a batch index is provided in <em>batch_or_token_index</em>, this can be the index of the token in the sequence.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.BatchEncoding.token_to_word.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Index of the word in the input sequence.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Get the index of the word corresponding (i.e. comprising) to an encoded token in a sequence of the batch.</p> <p>Can be called as:</p> <ul><li><code>self.token_to_word(token_index)</code> if batch size is 1</li> <li><code>self.token_to_word(batch_index, token_index)</code> if batch size is greater than 1</li></ul> <p>This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e., words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchEncoding.tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.BatchEncoding.tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchEncoding.tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L279" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_index<span class="opacity-60">: int = 0</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[str]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.tokens.batch_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.tokens.batch_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The index to access in the batch.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.BatchEncoding.tokens.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[str]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The list of tokens at that index.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Return the list of tokens (sub-parts of the input strings after word/subword splitting and before conversion to integer indices) at a given batch index (only works for the output of a fast tokenizer).</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchEncoding.word_ids"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>word_ids</span></h4><!-- HTML_TAG_END --> <a id="transformers.BatchEncoding.word_ids" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchEncoding.word_ids"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L345" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_index<span class="opacity-60">: int = 0</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[Optional[int]]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.word_ids.batch_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.word_ids.batch_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The index to access in the batch.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.BatchEncoding.word_ids.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[Optional[int]]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A list indicating the word corresponding to each token. Special tokens added by the tokenizer are mapped to <code>None</code> and other tokens are mapped to the index of their corresponding word (several tokens will be mapped to the same word index if they are parts of that word).</p> <!-- HTML_TAG_END --></p> </div></div> <p>Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchEncoding.word_to_chars"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>word_to_chars</span></h4><!-- HTML_TAG_END --> <a id="transformers.BatchEncoding.word_to_chars" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchEncoding.word_to_chars"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L572" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_or_word_index<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">word_index<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequence_index<span class="opacity-60">: int = 0</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>CharSpan</code> or <code>List[CharSpan]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.word_to_chars.batch_or_word_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.word_to_chars.batch_or_word_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_or_word_index</strong> (<code>int</code>) &#x2014; Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the word in the sequence<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.word_to_chars.word_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.word_to_chars.word_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>word_index</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a batch index is provided in <em>batch_or_token_index</em>, this can be the index of the word in the sequence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.word_to_chars.sequence_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.word_to_chars.sequence_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequence_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 or 1) the provided word index belongs to.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.BatchEncoding.word_to_chars.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>CharSpan</code> or <code>List[CharSpan]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Span(s) of the associated character or characters in the string. CharSpan are NamedTuple with:</p> <ul> <li>start: index of the first character associated to the token in the original string</li> <li>end: index of the character following the last character associated to the token in the original string</li> </ul> <!-- HTML_TAG_END --></p> </div></div> <p>Get the character span in the original string corresponding to given word in a sequence of the batch.</p> <p>Character spans are returned as a CharSpan NamedTuple with:</p> <ul><li>start: index of the first character in the original string</li> <li>end: index of the character following the last character in the original string</li></ul> <p>Can be called as:</p> <ul><li><code>self.word_to_chars(word_index)</code> if batch size is 1</li> <li><code>self.word_to_chars(batch_index, word_index)</code> if batch size is greater or equal to 1</li></ul></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchEncoding.word_to_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>word_to_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.BatchEncoding.word_to_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchEncoding.word_to_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L441" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_or_word_index<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">word_index<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequence_index<span class="opacity-60">: int = 0</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.word_to_tokens.batch_or_word_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.word_to_tokens.batch_or_word_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_or_word_index</strong> (<code>int</code>) &#x2014; Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of the word in the sequence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.word_to_tokens.word_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.word_to_tokens.word_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>word_index</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a batch index is provided in <em>batch_or_token_index</em>, this can be the index of the word in the sequence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.word_to_tokens.sequence_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.word_to_tokens.sequence_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequence_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 or 1) the provided word index belongs to.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Get the encoded token span corresponding to a word in a sequence of the batch.</p> <p>Token spans are returned as a <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.TokenSpan">TokenSpan</a> with:</p> <ul><li><strong>start</strong> — Index of the first token.</li> <li><strong>end</strong> — Index of the token following the last token.</li></ul> <p>Can be called as:</p> <ul><li><code>self.word_to_tokens(word_index, sequence_index: int = 0)</code> if batch size is 1</li> <li><code>self.word_to_tokens(batch_index, word_index, sequence_index: int = 0)</code> if batch size is greater or equal to 1</li></ul> <p>This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchEncoding.words"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>words</span></h4><!-- HTML_TAG_END --> <a id="transformers.BatchEncoding.words" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchEncoding.words"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L321" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_index<span class="opacity-60">: int = 0</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[Optional[int]]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.words.batch_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.words.batch_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The index to access in the batch.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.BatchEncoding.words.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[Optional[int]]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A list indicating the word corresponding to each token. Special tokens added by the tokenizer are mapped to <code>None</code> and other tokens are mapped to the index of their corresponding word (several tokens will be mapped to the same word index if they are parts of that word).</p> <!-- HTML_TAG_END --></p> </div></div> <p>Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer.</p></div></div> <script type="module" data-hydrate="1m75ew5"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="1m75ew5"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/main_classes/tokenizer.mdx-hf-doc-builder.js") ], params: {} } }); </script>
52
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/main_classes/keras_callbacks.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;keras-callbacks&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.KerasMetricCallback&quot;,&quot;title&quot;:&quot;KerasMetricCallback&quot;},{&quot;local&quot;:&quot;transformers.PushToHubCallback&quot;,&quot;title&quot;:&quot;PushToHubCallback&quot;}],&quot;title&quot;:&quot;Keras callbacks&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/main_classes/keras_callbacks.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Docstring-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/ExampleCodeBlock-hf-doc-builder.js"> <h1 class="relative group"><a id="keras-callbacks" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#keras-callbacks"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Keras callbacks </span></h1> <p>When training a Transformers model with Keras, there are some library-specific callbacks available to automate common tasks:</p> <h2 class="relative group"><a id="transformers.KerasMetricCallback" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.KerasMetricCallback"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>KerasMetricCallback </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.KerasMetricCallback"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">KerasMetricCallback</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.KerasMetricCallback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.KerasMetricCallback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/keras_callbacks.py#L22" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">metric_fn<span class="opacity-60">: typing.Callable</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eval_dataset<span class="opacity-60">: typing.Union[tensorflow.python.data.ops.dataset_ops.DatasetV2, numpy.ndarray, tensorflow.python.framework.ops.Tensor, tuple, dict]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_cols<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">label_cols<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_size<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">predict_with_generate<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_xla_generation<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">generate_kwargs<span class="opacity-60">: typing.Optional[dict] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.KerasMetricCallback.metric_fn" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.KerasMetricCallback.metric_fn"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>metric_fn</strong> (<code>Callable</code>) &#x2014; Metric function provided by the user. It will be called with two arguments - <code>predictions</code> and <code>labels</code>. These contain the model&#x2019;s outputs and matching labels from the dataset. It should return a dict mapping metric names to numerical values.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.KerasMetricCallback.eval_dataset" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.KerasMetricCallback.eval_dataset"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eval_dataset</strong> (<code>tf.data.Dataset</code> or <code>dict</code> or <code>tuple</code> or <code>np.ndarray</code> or <code>tf.Tensor</code>) &#x2014; Validation data to be used to generate predictions for the <code>metric_fn</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.KerasMetricCallback.output_cols" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.KerasMetricCallback.output_cols"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_cols</strong> (`List[str], <em>optional</em>) &#x2014; A list of columns to be retained from the model output as the predictions. Defaults to all.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.KerasMetricCallback.label_cols" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.KerasMetricCallback.label_cols"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>label_cols</strong> (&#x2019;<code>List[str]</code>, <em>optional</em>&#x2019;) &#x2014; A list of columns to be retained from the input dataset as the labels. Will be autodetected if this is not supplied.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.KerasMetricCallback.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.KerasMetricCallback.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>) &#x2014; Batch size. Only used when the data is not a pre-batched <code>tf.data.Dataset</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.KerasMetricCallback.predict_with_generate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.KerasMetricCallback.predict_with_generate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>predict_with_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether we should use <code>model.generate()</code> to get outputs for the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.KerasMetricCallback.use_xla_generation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.KerasMetricCallback.use_xla_generation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_xla_generation</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If we&#x2019;re generating, whether to compile model generation with XLA. This can massively increase the speed of generation (up to 100X speedup) but will require a new XLA compilation for each input shape. When using XLA generation, it&#x2019;s a good idea to pad your inputs to the same size, or to use the <code>pad_to_multiple_of</code> argument in your <code>tokenizer</code> or <code>DataCollator</code>, which will reduce the number of unique input shapes and save a lot of compilation time. This option has no effect is <code>predict_with_generate</code> is <code>False</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.KerasMetricCallback.generate_kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.KerasMetricCallback.generate_kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>generate_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014; Keyword arguments to pass to <code>model.generate()</code> when generating. Has no effect if <code>predict_with_generate</code> is <code>False</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Callback to compute metrics at the end of every epoch. Unlike normal Keras metrics, these do not need to be compilable by TF. It is particularly useful for common NLP metrics like BLEU and ROUGE that require string operations or generation loops that cannot be compiled. Predictions (or generations) will be computed on the <code>eval_dataset</code> before being passed to the <code>metric_fn</code> in <code>np.ndarray</code> format. The <code>metric_fn</code> should compute metrics and return a dict mapping metric names to metric values.</p> <p>We provide an example of a suitable metric_fn that computes ROUGE scores for a summarization model below. Note that this example skips some post-processing for readability and simplicity, and should probably not be used as-is!</p> <div class="relative group rounded-md"><a id="transformers.KerasMetricCallback.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.KerasMetricCallback.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_metric rouge_metric = load_metric(<span class="hljs-string">&quot;rouge&quot;</span>) <span class="hljs-keyword">def</span> <span class="hljs-title function_">rouge_fn</span>(<span class="hljs-params">predictions, labels</span>): decoded_predictions = tokenizer.batch_decode(predictions, skip_special_tokens=<span class="hljs-literal">True</span>) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=<span class="hljs-literal">True</span>) result = rouge_metric.compute(predictions=decoded_predictions, references=decoded_labels) <span class="hljs-keyword">return</span> {key: value.mid.fmeasure * <span class="hljs-number">100</span> <span class="hljs-keyword">for</span> key, value <span class="hljs-keyword">in</span> result.items()}<!-- HTML_TAG_END --></pre></div></div> <div class="relative group rounded-md"><a id="transformers.KerasMetricCallback.example-2" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.KerasMetricCallback.example-2"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>The above function will return a dict containing values which will be logged like any other Keras metric:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->{&#x27;rouge1&#x27;: <span class="hljs-number">37.4199</span>, &#x27;rouge2&#x27;: <span class="hljs-number">13.9768</span>, &#x27;rougeL&#x27;: <span class="hljs-number">34.361</span>, &#x27;rougeLsum&#x27;: <span class="hljs-number">35.0781</span><!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.PushToHubCallback" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PushToHubCallback"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PushToHubCallback </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PushToHubCallback"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PushToHubCallback</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PushToHubCallback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PushToHubCallback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/keras_callbacks.py#L267" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_dir<span class="opacity-60">: typing.Union[str, pathlib.Path]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_strategy<span class="opacity-60">: typing.Union[str, transformers.trainer_utils.IntervalStrategy] = &#39;epoch&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_steps<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: typing.Optional[transformers.tokenization_utils_base.PreTrainedTokenizerBase] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hub_model_id<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hub_token<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">checkpoint<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**model_card_args<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PushToHubCallback.output_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PushToHubCallback.output_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_dir</strong> (<code>str</code>) &#x2014; The output directory where the model predictions and checkpoints will be written and synced with the repository on the Hub.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PushToHubCallback.save_strategy" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PushToHubCallback.save_strategy"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/trainer_utils#transformers.IntervalStrategy">IntervalStrategy</a>, <em>optional</em>, defaults to <code>&quot;epoch&quot;</code>) &#x2014; The checkpoint save strategy to adopt during training. Possible values are:</p> <ul> <li><code>&quot;no&quot;</code>: Save is done at the end of training.</li> <li><code>&quot;epoch&quot;</code>: Save is done at the end of each epoch.</li> <li><code>&quot;steps&quot;</code>: Save is done every <code>save_steps</code></li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PushToHubCallback.save_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PushToHubCallback.save_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_steps</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of steps between saves when using the &#x201C;steps&#x201D; <code>save_strategy</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PushToHubCallback.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PushToHubCallback.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<code>PreTrainedTokenizerBase</code>, <em>optional</em>) &#x2014; The tokenizer used by the model. If supplied, will be uploaded to the repo alongside the weights.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PushToHubCallback.hub_model_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PushToHubCallback.hub_model_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hub_model_id</strong> (<code>str</code>, <em>optional</em>) &#x2014; The name of the repository to keep in sync with the local <code>output_dir</code>. It can be a simple model ID in which case the model will be pushed in your namespace. Otherwise it should be the whole repository name, for instance <code>&quot;user_name/model&quot;</code>, which allows you to push to an organization you are a member of with <code>&quot;organization_name/model&quot;</code>.</p> <p>Will default to the name of <code>output_dir</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PushToHubCallback.hub_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PushToHubCallback.hub_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hub_token</strong> (<code>str</code>, <em>optional</em>) &#x2014; The token to use to push the model to the Hub. Will default to the token in the cache folder obtained with <code>huggingface-cli login</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PushToHubCallback.checkpoint" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PushToHubCallback.checkpoint"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>checkpoint</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to save full training checkpoints (including epoch and optimizer state) to allow training to be resumed. Only usable when <code>save_strategy</code> is <code>&quot;epoch&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Callback that will save and push the model to the Hub regularly. By default, it pushes once per epoch, but this can be changed with the <code>save_strategy</code> argument. Pushed models can be accessed like any other model on the hub, such as with the <code>from_pretrained</code> method.</p> <div class="relative group rounded-md"><a id="transformers.PushToHubCallback.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PushToHubCallback.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers.keras_callbacks <span class="hljs-keyword">import</span> PushToHubCallback push_to_hub_callback = PushToHubCallback( output_dir=<span class="hljs-string">&quot;./model_save&quot;</span>, tokenizer=tokenizer, hub_model_id=<span class="hljs-string">&quot;gpt5-7xlarge&quot;</span>, ) model.fit(train_dataset, callbacks=[push_to_hub_callback])<!-- HTML_TAG_END --></pre></div></div></div> <script type="module" data-hydrate="1akds7s"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="1akds7s"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/main_classes/keras_callbacks.mdx-hf-doc-builder.js") ], params: {} } }); </script>
53
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/main_classes/optimizer_schedules.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;optimization&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.AdamW&quot;,&quot;title&quot;:&quot;AdamW (PyTorch)&quot;},{&quot;local&quot;:&quot;transformers.Adafactor&quot;,&quot;title&quot;:&quot;AdaFactor (PyTorch)&quot;},{&quot;local&quot;:&quot;transformers.AdamWeightDecay&quot;,&quot;title&quot;:&quot;AdamWeightDecay (TensorFlow)&quot;},{&quot;local&quot;:&quot;schedules&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.SchedulerType&quot;,&quot;title&quot;:&quot;Learning Rate Schedules (Pytorch)&quot;},{&quot;local&quot;:&quot;transformers.WarmUp&quot;,&quot;title&quot;:&quot;Warmup (TensorFlow)&quot;}],&quot;title&quot;:&quot;Schedules&quot;},{&quot;local&quot;:&quot;gradient-strategies&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.GradientAccumulator&quot;,&quot;title&quot;:&quot;GradientAccumulator (TensorFlow)&quot;}],&quot;title&quot;:&quot;Gradient Strategies&quot;}],&quot;title&quot;:&quot;Optimization&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/main_classes/optimizer_schedules.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Docstring-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/ExampleCodeBlock-hf-doc-builder.js"> <h1 class="relative group"><a id="optimization" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#optimization"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Optimization </span></h1> <p>The <code>.optimization</code> module provides:</p> <ul><li>an optimizer with weight decay fixed that can be used to fine-tuned models, and</li> <li>several schedules in the form of schedule objects that inherit from <code>_LRSchedule</code>:</li> <li>a gradient accumulation class to accumulate the gradients of multiple batches</li></ul> <h2 class="relative group"><a id="transformers.AdamW" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamW"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>AdamW (PyTorch) </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.AdamW"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">AdamW</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.AdamW" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.AdamW"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization.py#L273" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: typing.Iterable[torch.nn.parameter.Parameter]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">lr<span class="opacity-60">: float = 0.001</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">betas<span class="opacity-60">: typing.Tuple[float, float] = (0.9, 0.999)</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eps<span class="opacity-60">: float = 1e-06</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">weight_decay<span class="opacity-60">: float = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">correct_bias<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">no_deprecation_warning<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamW.params" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamW.params"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>params</strong> (<code>Iterable[nn.parameter.Parameter]</code>) &#x2014; Iterable of parameters to optimize or dictionaries defining parameter groups.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamW.lr" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamW.lr"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>lr</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-3) &#x2014; The learning rate to use.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamW.betas" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamW.betas"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>betas</strong> (<code>Tuple[float,float]</code>, <em>optional</em>, defaults to (0.9, 0.999)) &#x2014; Adam&#x2019;s betas parameters (b1, b2).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamW.eps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamW.eps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-6) &#x2014; Adam&#x2019;s epsilon for numerical stability.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamW.weight_decay" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamW.weight_decay"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>weight_decay</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; Decoupled weight decay to apply.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamW.correct_bias" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamW.correct_bias"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>correct_bias</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to correct bias in Adam (for instance, in Bert TF repository they use <code>False</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamW.no_deprecation_warning" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamW.no_deprecation_warning"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>no_deprecation_warning</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; A flag used to disable the deprecation warning (set to <code>True</code> to disable the warning).<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Implements Adam algorithm with weight decay fix as introduced in <a href="https://arxiv.org/abs/1711.05101" rel="nofollow">Decoupled Weight Decay Regularization</a>.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.AdamW.step"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>step</span></h4><!-- HTML_TAG_END --> <a id="transformers.AdamW.step" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.AdamW.step"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization.py#L324" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">closure<span class="opacity-60">: typing.Callable = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamW.step.closure" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamW.step.closure"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>closure</strong> (<code>Callable</code>, <em>optional</em>) &#x2014; A closure that reevaluates the model and returns the loss.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Performs a single optimization step.</p></div></div> <h2 class="relative group"><a id="transformers.Adafactor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Adafactor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>AdaFactor (PyTorch) </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Adafactor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Adafactor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Adafactor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Adafactor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization.py#L386" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">lr<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eps<span class="opacity-60"> = (1e-30, 0.001)</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">clip_threshold<span class="opacity-60"> = 1.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decay_rate<span class="opacity-60"> = -0.8</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">beta1<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">weight_decay<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scale_parameter<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">relative_step<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">warmup_init<span class="opacity-60"> = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Adafactor.params" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Adafactor.params"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>params</strong> (<code>Iterable[nn.parameter.Parameter]</code>) &#x2014; Iterable of parameters to optimize or dictionaries defining parameter groups.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Adafactor.lr" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Adafactor.lr"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>lr</strong> (<code>float</code>, <em>optional</em>) &#x2014; The external learning rate.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Adafactor.eps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Adafactor.eps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eps</strong> (<code>Tuple[float, float]</code>, <em>optional</em>, defaults to (1e-30, 1e-3)) &#x2014; Regularization constants for square gradient and parameter scale respectively<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Adafactor.clip_threshold" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Adafactor.clip_threshold"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>clip_threshold</strong> (<code>float</code>, <em>optional</em>, defaults 1.0) &#x2014; Threshold of root mean square of final gradient update<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Adafactor.decay_rate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Adafactor.decay_rate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decay_rate</strong> (<code>float</code>, <em>optional</em>, defaults to -0.8) &#x2014; Coefficient used to compute running averages of square<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Adafactor.beta1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Adafactor.beta1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>beta1</strong> (<code>float</code>, <em>optional</em>) &#x2014; Coefficient used for computing running averages of gradient<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Adafactor.weight_decay" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Adafactor.weight_decay"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>weight_decay</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; Weight decay (L2 penalty)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Adafactor.scale_parameter" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Adafactor.scale_parameter"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scale_parameter</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If True, learning rate is scaled by root mean square<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Adafactor.relative_step" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Adafactor.relative_step"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>relative_step</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If True, time-dependent learning rate is computed instead of external learning rate<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Adafactor.warmup_init" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Adafactor.warmup_init"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>warmup_init</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Time-dependent learning rate computation depends on whether warm-up initialization is being used<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>AdaFactor pytorch implementation can be used as a drop in replacement for Adam original fairseq code: <a href="https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py" rel="nofollow">https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py</a></p> <p>Paper: <em>Adafactor: Adaptive Learning Rates with Sublinear Memory Cost</em> <a href="https://arxiv.org/abs/1804.04235" rel="nofollow">https://arxiv.org/abs/1804.04235</a> Note that this optimizer internally adjusts the learning rate depending on the <code>scale_parameter</code>, <code>relative_step</code> and <code>warmup_init</code> options. To use a manual (external) learning rate schedule you should set <code>scale_parameter=False</code> and <code>relative_step=False</code>.</p> <p>This implementation handles low-precision (FP16, bfloat) values, but we have not thoroughly tested.</p> <p>Recommended T5 finetuning settings (<a href="https://discuss.huggingface.co/t/t5-finetuning-tips/684/3" rel="nofollow">https://discuss.huggingface.co/t/t5-finetuning-tips/684/3</a>):</p> <ul><li><p>Training without LR warmup or clip_threshold is not recommended.</p> <ul><li>use scheduled LR warm-up to fixed LR</li> <li>use clip_threshold=1.0 (<a href="https://arxiv.org/abs/1804.04235" rel="nofollow">https://arxiv.org/abs/1804.04235</a>)</li></ul></li> <li><p>Disable relative updates</p></li> <li><p>Use scale_parameter=False</p></li> <li><p>Additional optimizer operations like gradient clipping should not be used alongside Adafactor</p></li></ul> <div class="relative group rounded-md"><a id="transformers.Adafactor.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Adafactor.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->Adafactor(model.parameters(), scale_parameter=<span class="hljs-literal">False</span>, relative_step=<span class="hljs-literal">False</span>, warmup_init=<span class="hljs-literal">False</span>, lr=<span class="hljs-number">1e-3</span>)<!-- HTML_TAG_END --></pre></div></div> <div class="relative group rounded-md"><a id="transformers.Adafactor.example-2" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Adafactor.example-2"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Others reported the following combination to work well:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->Adafactor(model.parameters(), scale_parameter=<span class="hljs-literal">True</span>, relative_step=<span class="hljs-literal">True</span>, warmup_init=<span class="hljs-literal">True</span>, lr=<span class="hljs-literal">None</span>)<!-- HTML_TAG_END --></pre></div></div> <p>When using <code>lr=None</code> with <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> you will most likely need to use <code>AdafactorSchedule</code></p> <div class="relative group rounded-md"><a id="transformers.Adafactor.example-3" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Adafactor.example-3"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>scheduler as following:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers.optimization <span class="hljs-keyword">import</span> Adafactor, AdafactorSchedule optimizer = Adafactor(model.parameters(), scale_parameter=<span class="hljs-literal">True</span>, relative_step=<span class="hljs-literal">True</span>, warmup_init=<span class="hljs-literal">True</span>, lr=<span class="hljs-literal">None</span>) lr_scheduler = AdafactorSchedule(optimizer) trainer = Trainer(..., optimizers=(optimizer, lr_scheduler))<!-- HTML_TAG_END --></pre></div></div> <div class="relative group rounded-md"><a id="transformers.Adafactor.example-4" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Adafactor.example-4"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Usage:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment"># replace AdamW with Adafactor</span> optimizer = Adafactor( model.parameters(), lr=<span class="hljs-number">1e-3</span>, eps=(<span class="hljs-number">1e-30</span>, <span class="hljs-number">1e-3</span>), clip_threshold=<span class="hljs-number">1.0</span>, decay_rate=-<span class="hljs-number">0.8</span>, beta1=<span class="hljs-literal">None</span>, weight_decay=<span class="hljs-number">0.0</span>, relative_step=<span class="hljs-literal">False</span>, scale_parameter=<span class="hljs-literal">False</span>, warmup_init=<span class="hljs-literal">False</span>, )<!-- HTML_TAG_END --></pre></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Adafactor.step"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>step</span></h4><!-- HTML_TAG_END --> <a id="transformers.Adafactor.step" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Adafactor.step"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization.py#L532" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">closure<span class="opacity-60"> = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Adafactor.step.closure" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Adafactor.step.closure"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>closure</strong> (callable, optional) &#x2014; A closure that reevaluates the model and returns the loss.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Performs a single optimization step</p></div></div> <h2 class="relative group"><a id="transformers.AdamWeightDecay" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamWeightDecay"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>AdamWeightDecay (TensorFlow) </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.AdamWeightDecay"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">AdamWeightDecay</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.AdamWeightDecay" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.AdamWeightDecay"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization_tf.py#L166" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">learning_rate<span class="opacity-60">: typing.Union[float, keras.optimizers.schedules.learning_rate_schedule.LearningRateSchedule] = 0.001</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">beta_1<span class="opacity-60">: float = 0.9</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">beta_2<span class="opacity-60">: float = 0.999</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">epsilon<span class="opacity-60">: float = 1e-07</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">amsgrad<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">weight_decay_rate<span class="opacity-60">: float = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">include_in_weight_decay<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">exclude_from_weight_decay<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">name<span class="opacity-60">: str = &#39;AdamWeightDecay&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamWeightDecay.learning_rate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamWeightDecay.learning_rate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>learning_rate</strong> (<code>Union[float, tf.keras.optimizers.schedules.LearningRateSchedule]</code>, <em>optional</em>, defaults to 1e-3) &#x2014; The learning rate to use or a schedule.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamWeightDecay.beta_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamWeightDecay.beta_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>beta_1</strong> (<code>float</code>, <em>optional</em>, defaults to 0.9) &#x2014; The beta1 parameter in Adam, which is the exponential decay rate for the 1st momentum estimates.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamWeightDecay.beta_2" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamWeightDecay.beta_2"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>beta_2</strong> (<code>float</code>, <em>optional</em>, defaults to 0.999) &#x2014; The beta2 parameter in Adam, which is the exponential decay rate for the 2nd momentum estimates.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamWeightDecay.epsilon" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamWeightDecay.epsilon"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>epsilon</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-7) &#x2014; The epsilon parameter in Adam, which is a small constant for numerical stability.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamWeightDecay.amsgrad" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamWeightDecay.amsgrad"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>amsgrad</strong> (<code>bool</code>, <em>optional</em>, default to <code>False</code>) &#x2014; Whether to apply AMSGrad variant of this algorithm or not, see <a href="https://arxiv.org/abs/1904.09237" rel="nofollow">On the Convergence of Adam and Beyond</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamWeightDecay.weight_decay_rate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamWeightDecay.weight_decay_rate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>weight_decay_rate</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The weight decay to apply.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamWeightDecay.include_in_weight_decay" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamWeightDecay.include_in_weight_decay"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>include_in_weight_decay</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; List of the parameter names (or re patterns) to apply weight decay to. If none is passed, weight decay is applied to all parameters by default (unless they are in <code>exclude_from_weight_decay</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamWeightDecay.exclude_from_weight_decay" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamWeightDecay.exclude_from_weight_decay"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>exclude_from_weight_decay</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; List of the parameter names (or re patterns) to exclude from applying weight decay to. If a <code>include_in_weight_decay</code> is passed, the names in it will supersede this list.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamWeightDecay.name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamWeightDecay.name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>name</strong> (<code>str</code>, <em>optional</em>, defaults to &#x2018;AdamWeightDecay&#x2019;) &#x2014; Optional name for the operations created when applying gradients. kwargs &#x2014; Keyword arguments. Allowed to be {<code>clipnorm</code>, <code>clipvalue</code>, <code>lr</code>, <code>decay</code>}. <code>clipnorm</code> is clip gradients by norm; <code>clipvalue</code> is clip gradients by value, <code>decay</code> is included for backward compatibility to allow time inverse decay of learning rate. <code>lr</code> is included for backward compatibility, recommended to use <code>learning_rate</code> instead.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Adam enables L2 weight decay and clip_by_global_norm on gradients. Just adding the square of the weights to the loss function is <em>not</em> the correct way of using L2 regularization/weight decay with Adam, since that will interact with the m and v parameters in strange ways as shown in <a href="https://arxiv.org/abs/1711.05101" rel="nofollow">Decoupled Weight Decay Regularization</a>.</p> <p>Instead we want ot decay the weights in a manner that doesn’t interact with the m/v parameters. This is equivalent to adding the square of the weights to the loss with plain (non-momentum) SGD.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.AdamWeightDecay.from_config"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_config</span></h4><!-- HTML_TAG_END --> <a id="transformers.AdamWeightDecay.from_config" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.AdamWeightDecay.from_config"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization_tf.py#L223" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Creates an optimizer from its config with WarmUp custom object.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.create_optimizer"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.create_optimizer</span></h4><!-- HTML_TAG_END --> <a id="transformers.create_optimizer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.create_optimizer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization_tf.py#L82" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">init_lr<span class="opacity-60">: float</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_train_steps<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_warmup_steps<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_lr_ratio<span class="opacity-60">: float = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">adam_beta1<span class="opacity-60">: float = 0.9</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">adam_beta2<span class="opacity-60">: float = 0.999</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">adam_epsilon<span class="opacity-60">: float = 1e-08</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">adam_clipnorm<span class="opacity-60">: typing.Optional[float] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">adam_global_clipnorm<span class="opacity-60">: typing.Optional[float] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">weight_decay_rate<span class="opacity-60">: float = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">power<span class="opacity-60">: float = 1.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">include_in_weight_decay<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.create_optimizer.init_lr" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.create_optimizer.init_lr"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>init_lr</strong> (<code>float</code>) &#x2014; The desired learning rate at the end of the warmup phase.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.create_optimizer.num_train_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.create_optimizer.num_train_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_train_steps</strong> (<code>int</code>) &#x2014; The total number of training steps.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.create_optimizer.num_warmup_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.create_optimizer.num_warmup_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_warmup_steps</strong> (<code>int</code>) &#x2014; The number of warmup steps.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.create_optimizer.min_lr_ratio" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.create_optimizer.min_lr_ratio"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_lr_ratio</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The final learning rate at the end of the linear decay will be <code>init_lr * min_lr_ratio</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.create_optimizer.adam_beta1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.create_optimizer.adam_beta1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>adam_beta1</strong> (<code>float</code>, <em>optional</em>, defaults to 0.9) &#x2014; The beta1 to use in Adam.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.create_optimizer.adam_beta2" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.create_optimizer.adam_beta2"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>adam_beta2</strong> (<code>float</code>, <em>optional</em>, defaults to 0.999) &#x2014; The beta2 to use in Adam.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.create_optimizer.adam_epsilon" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.create_optimizer.adam_epsilon"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>adam_epsilon</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-8) &#x2014; The epsilon to use in Adam. adam_clipnorm &#x2014; (<code>float</code>, <em>optional</em>, defaults to <code>None</code>): If not <code>None</code>, clip the gradient norm for each weight tensor to this value. adam_global_clipnorm &#x2014; (<code>float</code>, <em>optional</em>, defaults to <code>None</code>) If not <code>None</code>, clip gradient norm to this value. When using this argument, the norm is computed over all weight tensors, as if they were concatenated into a single vector.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.create_optimizer.weight_decay_rate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.create_optimizer.weight_decay_rate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>weight_decay_rate</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The weight decay to use.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.create_optimizer.power" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.create_optimizer.power"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>power</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; The power to use for PolynomialDecay.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.create_optimizer.include_in_weight_decay" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.create_optimizer.include_in_weight_decay"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>include_in_weight_decay</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; List of the parameter names (or re patterns) to apply weight decay to. If none is passed, weight decay is applied to all parameters except bias and layer norm parameters.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Creates an optimizer with a learning rate schedule using a warmup phase followed by a linear decay.</p></div> <h2 class="relative group"><a id="schedules" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#schedules"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Schedules </span></h2> <h3 class="relative group"><a id="transformers.SchedulerType" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SchedulerType"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Learning Rate Schedules (Pytorch) </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SchedulerType"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">SchedulerType</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.SchedulerType" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SchedulerType"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_utils.py#L355" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">value<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">names<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">module<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">qualname<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">type<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start<span class="opacity-60"> = 1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>An enumeration.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.get_scheduler"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.get_scheduler</span></h4><!-- HTML_TAG_END --> <a id="transformers.get_scheduler" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.get_scheduler"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization.py#L233" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">name<span class="opacity-60">: typing.Union[str, transformers.trainer_utils.SchedulerType]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">optimizer<span class="opacity-60">: Optimizer</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_warmup_steps<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_training_steps<span class="opacity-60">: typing.Optional[int] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.get_scheduler.name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_scheduler.name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>name</strong> (<code>str</code> or <code>SchedulerType</code>) &#x2014; The name of the scheduler to use.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.get_scheduler.optimizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_scheduler.optimizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>optimizer</strong> (<code>torch.optim.Optimizer</code>) &#x2014; The optimizer that will be used during training.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.get_scheduler.num_warmup_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_scheduler.num_warmup_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_warmup_steps</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of warmup steps to do. This is not required by all schedulers (hence the argument being optional), the function will raise an error if it&#x2019;s unset and the scheduler type requires it.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.get_scheduler.num_training_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_scheduler.num_training_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_training_steps</strong> (`int&#x201C;, <em>optional</em>) &#x2014; The number of training steps to do. This is not required by all schedulers (hence the argument being optional), the function will raise an error if it&#x2019;s unset and the scheduler type requires it.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Unified API to get any scheduler from its name.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.get_constant_schedule"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.get_constant_schedule</span></h4><!-- HTML_TAG_END --> <a id="transformers.get_constant_schedule" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.get_constant_schedule"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization.py#L34" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">optimizer<span class="opacity-60">: Optimizer</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_epoch<span class="opacity-60">: int = -1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.get_constant_schedule.optimizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_constant_schedule.optimizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>optimizer</strong> (<code>~torch.optim.Optimizer</code>) &#x2014; The optimizer for which to schedule the learning rate.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.get_constant_schedule.last_epoch" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_constant_schedule.last_epoch"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_epoch</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The index of the last epoch when resuming training.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Create a schedule with a constant learning rate, using the learning rate set in optimizer.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.get_constant_schedule_with_warmup"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.get_constant_schedule_with_warmup</span></h4><!-- HTML_TAG_END --> <a id="transformers.get_constant_schedule_with_warmup" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.get_constant_schedule_with_warmup"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization.py#L50" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">optimizer<span class="opacity-60">: Optimizer</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_warmup_steps<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_epoch<span class="opacity-60">: int = -1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.get_constant_schedule_with_warmup.optimizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_constant_schedule_with_warmup.optimizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>optimizer</strong> (<code>~torch.optim.Optimizer</code>) &#x2014; The optimizer for which to schedule the learning rate.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.get_constant_schedule_with_warmup.num_warmup_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_constant_schedule_with_warmup.num_warmup_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_warmup_steps</strong> (<code>int</code>) &#x2014; The number of steps for the warmup phase.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.get_constant_schedule_with_warmup.last_epoch" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_constant_schedule_with_warmup.last_epoch"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_epoch</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The index of the last epoch when resuming training.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate increases linearly between 0 and the initial lr set in the optimizer.</p></div> <img alt="" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_constant_schedule.png"> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.get_cosine_schedule_with_warmup"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.get_cosine_schedule_with_warmup</span></h4><!-- HTML_TAG_END --> <a id="transformers.get_cosine_schedule_with_warmup" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.get_cosine_schedule_with_warmup"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization.py#L104" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">optimizer<span class="opacity-60">: Optimizer</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_warmup_steps<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_training_steps<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_cycles<span class="opacity-60">: float = 0.5</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_epoch<span class="opacity-60">: int = -1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.get_cosine_schedule_with_warmup.optimizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_cosine_schedule_with_warmup.optimizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>optimizer</strong> (<code>~torch.optim.Optimizer</code>) &#x2014; The optimizer for which to schedule the learning rate.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.get_cosine_schedule_with_warmup.num_warmup_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_cosine_schedule_with_warmup.num_warmup_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_warmup_steps</strong> (<code>int</code>) &#x2014; The number of steps for the warmup phase.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.get_cosine_schedule_with_warmup.num_training_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_cosine_schedule_with_warmup.num_training_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_training_steps</strong> (<code>int</code>) &#x2014; The total number of training steps.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.get_cosine_schedule_with_warmup.num_cycles" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_cosine_schedule_with_warmup.num_cycles"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_cycles</strong> (<code>float</code>, <em>optional</em>, defaults to 0.5) &#x2014; The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0 following a half-cosine).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.get_cosine_schedule_with_warmup.last_epoch" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_cosine_schedule_with_warmup.last_epoch"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_epoch</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The index of the last epoch when resuming training.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer.</p></div> <img alt="" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_cosine_schedule.png"> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.get_cosine_with_hard_restarts_schedule_with_warmup"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.get_cosine_with_hard_restarts_schedule_with_warmup</span></h4><!-- HTML_TAG_END --> <a id="transformers.get_cosine_with_hard_restarts_schedule_with_warmup" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.get_cosine_with_hard_restarts_schedule_with_warmup"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization.py#L138" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">optimizer<span class="opacity-60">: Optimizer</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_warmup_steps<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_training_steps<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_cycles<span class="opacity-60">: int = 1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_epoch<span class="opacity-60">: int = -1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.get_cosine_with_hard_restarts_schedule_with_warmup.optimizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_cosine_with_hard_restarts_schedule_with_warmup.optimizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>optimizer</strong> (<code>~torch.optim.Optimizer</code>) &#x2014; The optimizer for which to schedule the learning rate.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.get_cosine_with_hard_restarts_schedule_with_warmup.num_warmup_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_cosine_with_hard_restarts_schedule_with_warmup.num_warmup_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_warmup_steps</strong> (<code>int</code>) &#x2014; The number of steps for the warmup phase.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.get_cosine_with_hard_restarts_schedule_with_warmup.num_training_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_cosine_with_hard_restarts_schedule_with_warmup.num_training_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_training_steps</strong> (<code>int</code>) &#x2014; The total number of training steps.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.get_cosine_with_hard_restarts_schedule_with_warmup.num_cycles" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_cosine_with_hard_restarts_schedule_with_warmup.num_cycles"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_cycles</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The number of hard restarts to use.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.get_cosine_with_hard_restarts_schedule_with_warmup.last_epoch" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_cosine_with_hard_restarts_schedule_with_warmup.last_epoch"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_epoch</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The index of the last epoch when resuming training.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer.</p></div> <img alt="" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_cosine_hard_restarts_schedule.png"> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.get_linear_schedule_with_warmup"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.get_linear_schedule_with_warmup</span></h4><!-- HTML_TAG_END --> <a id="transformers.get_linear_schedule_with_warmup" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.get_linear_schedule_with_warmup"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization.py#L75" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">optimizer<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_warmup_steps<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_training_steps<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_epoch<span class="opacity-60"> = -1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.get_linear_schedule_with_warmup.optimizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_linear_schedule_with_warmup.optimizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>optimizer</strong> (<code>~torch.optim.Optimizer</code>) &#x2014; The optimizer for which to schedule the learning rate.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.get_linear_schedule_with_warmup.num_warmup_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_linear_schedule_with_warmup.num_warmup_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_warmup_steps</strong> (<code>int</code>) &#x2014; The number of steps for the warmup phase.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.get_linear_schedule_with_warmup.num_training_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_linear_schedule_with_warmup.num_training_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_training_steps</strong> (<code>int</code>) &#x2014; The total number of training steps.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.get_linear_schedule_with_warmup.last_epoch" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_linear_schedule_with_warmup.last_epoch"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_epoch</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The index of the last epoch when resuming training.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.</p></div> <img alt="" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_linear_schedule.png"> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.get_polynomial_decay_schedule_with_warmup"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.get_polynomial_decay_schedule_with_warmup</span></h4><!-- HTML_TAG_END --> <a id="transformers.get_polynomial_decay_schedule_with_warmup" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.get_polynomial_decay_schedule_with_warmup"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization.py#L173" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">optimizer<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_warmup_steps<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_training_steps<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">lr_end<span class="opacity-60"> = 1e-07</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">power<span class="opacity-60"> = 1.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_epoch<span class="opacity-60"> = -1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.get_polynomial_decay_schedule_with_warmup.optimizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_polynomial_decay_schedule_with_warmup.optimizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>optimizer</strong> (<code>~torch.optim.Optimizer</code>) &#x2014; The optimizer for which to schedule the learning rate.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.get_polynomial_decay_schedule_with_warmup.num_warmup_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_polynomial_decay_schedule_with_warmup.num_warmup_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_warmup_steps</strong> (<code>int</code>) &#x2014; The number of steps for the warmup phase.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.get_polynomial_decay_schedule_with_warmup.num_training_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_polynomial_decay_schedule_with_warmup.num_training_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_training_steps</strong> (<code>int</code>) &#x2014; The total number of training steps.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.get_polynomial_decay_schedule_with_warmup.lr_end" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_polynomial_decay_schedule_with_warmup.lr_end"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>lr_end</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-7) &#x2014; The end LR.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.get_polynomial_decay_schedule_with_warmup.power" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_polynomial_decay_schedule_with_warmup.power"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>power</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Power factor.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.get_polynomial_decay_schedule_with_warmup.last_epoch" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_polynomial_decay_schedule_with_warmup.last_epoch"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_epoch</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The index of the last epoch when resuming training.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the optimizer to end lr defined by <em>lr_end</em>, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.</p> <p>Note: <em>power</em> defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT implementation at <a href="https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37" rel="nofollow">https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37</a></p></div> <h3 class="relative group"><a id="transformers.WarmUp" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.WarmUp"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Warmup (TensorFlow) </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.WarmUp"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">WarmUp</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.WarmUp" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.WarmUp"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization_tf.py#L24" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">initial_learning_rate<span class="opacity-60">: float</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decay_schedule_fn<span class="opacity-60">: typing.Callable</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">warmup_steps<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">power<span class="opacity-60">: float = 1.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">name<span class="opacity-60">: str = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.WarmUp.initial_learning_rate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.WarmUp.initial_learning_rate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>initial_learning_rate</strong> (<code>float</code>) &#x2014; The initial learning rate for the schedule after the warmup (so this will be the learning rate at the end of the warmup).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.WarmUp.decay_schedule_fn" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.WarmUp.decay_schedule_fn"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decay_schedule_fn</strong> (<code>Callable</code>) &#x2014; The schedule function to apply after the warmup for the rest of training.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.WarmUp.warmup_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.WarmUp.warmup_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>warmup_steps</strong> (<code>int</code>) &#x2014; The number of steps for the warmup part of training.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.WarmUp.power" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.WarmUp.power"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>power</strong> (<code>float</code>, <em>optional</em>, defaults to 1) &#x2014; The power to use for the polynomial warmup (defaults is a linear warmup).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.WarmUp.name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.WarmUp.name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>name</strong> (<code>str</code>, <em>optional</em>) &#x2014; Optional name prefix for the returned tensors during the schedule.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Applies a warmup schedule on a given learning rate decay schedule.</p></div> <h2 class="relative group"><a id="gradient-strategies" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#gradient-strategies"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Gradient Strategies </span></h2> <h3 class="relative group"><a id="transformers.GradientAccumulator" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.GradientAccumulator"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>GradientAccumulator (TensorFlow) </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.GradientAccumulator"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">GradientAccumulator</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.GradientAccumulator" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.GradientAccumulator"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization_tf.py#L296" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Gradient accumulation utility. When used with a distribution strategy, the accumulator should be called in a replica context. Gradients will be accumulated locally on each replica and without synchronization. Users should then call <code>.gradients</code>, scale the gradients if required, and pass the result to <code>apply_gradients</code>.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.GradientAccumulator.reset"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>reset</span></h4><!-- HTML_TAG_END --> <a id="transformers.GradientAccumulator.reset" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.GradientAccumulator.reset"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/optimization_tf.py#L358" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Resets the accumulated gradients on the current replica.</p></div></div> <script type="module" data-hydrate="e9wq5u"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="e9wq5u"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/main_classes/optimizer_schedules.mdx-hf-doc-builder.js") ], params: {} } }); </script>
54
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/main_classes/deepspeed.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;deepspeed-integration&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;trainer-deepspeed-integration&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;installation&quot;,&quot;title&quot;:&quot;Installation&quot;},{&quot;local&quot;:&quot;deployment-with-multiple-gpus&quot;,&quot;title&quot;:&quot;Deployment with multiple GPUs&quot;},{&quot;local&quot;:&quot;deployment-with-one-gpu&quot;,&quot;title&quot;:&quot;Deployment with one GPU&quot;},{&quot;local&quot;:&quot;deployment-in-notebooks&quot;,&quot;title&quot;:&quot;Deployment in Notebooks&quot;},{&quot;local&quot;:&quot;configuration&quot;,&quot;title&quot;:&quot;Configuration&quot;},{&quot;local&quot;:&quot;passing-configuration&quot;,&quot;title&quot;:&quot;Passing Configuration&quot;},{&quot;local&quot;:&quot;shared-configuration&quot;,&quot;title&quot;:&quot;Shared Configuration&quot;},{&quot;local&quot;:&quot;zero&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;zero2-config&quot;,&quot;title&quot;:&quot;ZeRO-2 Config&quot;},{&quot;local&quot;:&quot;zero3-config&quot;,&quot;title&quot;:&quot;ZeRO-3 Config&quot;}],&quot;title&quot;:&quot;ZeRO&quot;},{&quot;local&quot;:&quot;nvme-support&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;zero2-vs-zero3-performance&quot;,&quot;title&quot;:&quot;ZeRO-2 vs ZeRO-3 Performance&quot;},{&quot;local&quot;:&quot;zero2-example&quot;,&quot;title&quot;:&quot;ZeRO-2 Example&quot;},{&quot;local&quot;:&quot;zero3-example&quot;,&quot;title&quot;:&quot;ZeRO-3 Example&quot;}],&quot;title&quot;:&quot;NVMe Support&quot;},{&quot;local&quot;:&quot;optimizer-and-scheduler&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;optimizer&quot;,&quot;title&quot;:&quot;Optimizer&quot;},{&quot;local&quot;:&quot;scheduler&quot;,&quot;title&quot;:&quot;Scheduler&quot;}],&quot;title&quot;:&quot;Optimizer and Scheduler&quot;},{&quot;local&quot;:&quot;fp32-precision&quot;,&quot;title&quot;:&quot;fp32 Precision&quot;},{&quot;local&quot;:&quot;automatic-mixed-precision&quot;,&quot;title&quot;:&quot;Automatic Mixed Precision&quot;},{&quot;local&quot;:&quot;fp16&quot;,&quot;title&quot;:&quot;fp16&quot;},{&quot;local&quot;:&quot;bf16&quot;,&quot;title&quot;:&quot;bf16&quot;},{&quot;local&quot;:&quot;apex&quot;,&quot;title&quot;:&quot;apex&quot;},{&quot;local&quot;:&quot;batch-size&quot;,&quot;title&quot;:&quot;Batch Size&quot;},{&quot;local&quot;:&quot;gradient-accumulation&quot;,&quot;title&quot;:&quot;Gradient Accumulation&quot;},{&quot;local&quot;:&quot;gradient-clipping&quot;,&quot;title&quot;:&quot;Gradient Clipping&quot;},{&quot;local&quot;:&quot;getting-the-model-weights-out&quot;,&quot;title&quot;:&quot;Getting The Model Weights Out&quot;},{&quot;local&quot;:&quot;zero3-and-infinity-nuances&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;constructing-massive-models&quot;,&quot;title&quot;:&quot;Constructing Massive Models&quot;},{&quot;local&quot;:&quot;gathering-parameters&quot;,&quot;title&quot;:&quot;Gathering Parameters&quot;}],&quot;title&quot;:&quot;ZeRO-3 and Infinity Nuances&quot;},{&quot;local&quot;:&quot;zero-inference&quot;,&quot;title&quot;:&quot;ZeRO Inference&quot;},{&quot;local&quot;:&quot;memory-requirements&quot;,&quot;title&quot;:&quot;Memory Requirements&quot;},{&quot;local&quot;:&quot;filing-issues&quot;,&quot;title&quot;:&quot;Filing Issues&quot;},{&quot;local&quot;:&quot;troubleshooting&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;the-deepspeed-process-gets-killed-at-startup-without-a-traceback&quot;,&quot;title&quot;:&quot;the `deepspeed` process gets killed at startup without a traceback&quot;},{&quot;local&quot;:&quot;training-andor-evalpredict-loss-is-nan&quot;,&quot;title&quot;:&quot;training and/or eval/predict loss is `NaN`&quot;}],&quot;title&quot;:&quot;Troubleshooting&quot;},{&quot;local&quot;:&quot;notes&quot;,&quot;title&quot;:&quot;Notes&quot;}],&quot;title&quot;:&quot;Trainer Deepspeed Integration&quot;},{&quot;local&quot;:&quot;nontrainer-deepspeed-integration&quot;,&quot;title&quot;:&quot;Non-Trainer Deepspeed Integration&quot;},{&quot;local&quot;:&quot;transformers.deepspeed.HfDeepSpeedConfig&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;custom-deepspeed-zero-inference&quot;,&quot;title&quot;:&quot;Custom DeepSpeed ZeRO Inference&quot;}],&quot;title&quot;:&quot;HfDeepSpeedConfig&quot;},{&quot;local&quot;:&quot;main-deepspeed-resources&quot;,&quot;title&quot;:&quot;Main DeepSpeed Resources&quot;}],&quot;title&quot;:&quot;DeepSpeed Integration&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/main_classes/deepspeed.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Docstring-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/CodeBlock-hf-doc-builder.js"> <h1 class="relative group"><a id="deepspeed-integration" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#deepspeed-integration"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DeepSpeed Integration </span></h1> <p><a href="https://github.com/microsoft/DeepSpeed" rel="nofollow">DeepSpeed</a> implements everything described in the <a href="https://arxiv.org/abs/1910.02054" rel="nofollow">ZeRO paper</a>. Currently it provides full support for:</p> <ol><li>Optimizer state partitioning (ZeRO stage 1)</li> <li>Gradient partitioning (ZeRO stage 2)</li> <li>Parameter partitioning (ZeRO stage 3)</li> <li>Custom mixed precision training handling</li> <li>A range of fast CUDA-extension-based optimizers</li> <li>ZeRO-Offload to CPU and NVMe</li></ol> <p>ZeRO-Offload has its own dedicated paper: <a href="https://arxiv.org/abs/2101.06840" rel="nofollow">ZeRO-Offload: Democratizing Billion-Scale Model Training</a>. And NVMe-support is described in the paper <a href="https://arxiv.org/abs/2104.07857" rel="nofollow">ZeRO-Infinity: Breaking the GPU Memory Wall for Extreme Scale Deep Learning</a>.</p> <p>DeepSpeed ZeRO-2 is primarily used only for training, as its features are of no use to inference.</p> <p>DeepSpeed ZeRO-3 can be used for inference as well, since it allows huge models to be loaded on multiple GPUs, which won’t be possible on a single GPU.</p> <p>🤗 Transformers integrates <a href="https://github.com/microsoft/DeepSpeed" rel="nofollow">DeepSpeed</a> via 2 options:</p> <ol><li>Integration of the core DeepSpeed features via <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>. This is an everything-done-for-you type of integration - just supply your custom config file or use our template and you have nothing else to do. Most of this document is focused on this feature.</li> <li>If you don’t use <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> and want to use your own Trainer where you integrated DeepSpeed yourself, core functionality functions like <code>from_pretrained</code> and <code>from_config</code> include integration of essential parts of DeepSpeed like <code>zero.Init</code> for ZeRO stage 3 and higher. To tap into this feature read the docs on <a href="#nontrainer-deepspeed-integration">non-Trainer DeepSpeed Integration</a>.</li></ol> <p>What is integrated:</p> <p>Training:</p> <ol><li>DeepSpeed ZeRO training supports the full ZeRO stages 1, 2 and 3 with ZeRO-Infinity (CPU and NVME offload).</li></ol> <p>Inference:</p> <ol><li>DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity. It uses the same ZeRO protocol as training, but it doesn’t use an optimizer and a lr scheduler and only stage 3 is relevant. For more details see: <a href="#zero-inference">zero-inference</a>.</li></ol> <p>There is also DeepSpeed Inference - this is a totally different technology which uses Tensor Parallelism instead of ZeRO (coming soon).</p> <a id="deepspeed-trainer-integration"></a> <h2 class="relative group"><a id="trainer-deepspeed-integration" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#trainer-deepspeed-integration"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Trainer Deepspeed Integration </span></h2> <a id="deepspeed-installation"></a> <h3 class="relative group"><a id="installation" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#installation"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Installation </span></h3> <p>Install the library via pypi:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install deepspeed<!-- HTML_TAG_END --></pre></div> <p>or via <code>transformers</code>’ <code>extras</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install transformers[deepspeed]<!-- HTML_TAG_END --></pre></div> <p>or find more details on <a href="https://github.com/microsoft/deepspeed#installation" rel="nofollow">the DeepSpeed’s GitHub page</a> and <a href="https://www.deepspeed.ai/tutorials/advanced-install/" rel="nofollow">advanced install</a>.</p> <p>If you’re still struggling with the build, first make sure to read <a href="trainer#cuda-extension-installation-notes">CUDA Extension Installation Notes</a>.</p> <p>If you don’t prebuild the extensions and rely on them to be built at run time and you tried all of the above solutions to no avail, the next thing to try is to pre-build the modules before installing them.</p> <p>To make a local build for DeepSpeed:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->git <span class="hljs-built_in">clone</span> https://github.com/microsoft/DeepSpeed/ <span class="hljs-built_in">cd</span> DeepSpeed <span class="hljs-built_in">rm</span> -rf build TORCH_CUDA_ARCH_LIST=<span class="hljs-string">&quot;8.6&quot;</span> DS_BUILD_CPU_ADAM=1 DS_BUILD_UTILS=1 pip install . \ --global-option=<span class="hljs-string">&quot;build_ext&quot;</span> --global-option=<span class="hljs-string">&quot;-j8&quot;</span> --no-cache -v \ --disable-pip-version-check 2&gt;&amp;1 | <span class="hljs-built_in">tee</span> build.log<!-- HTML_TAG_END --></pre></div> <p>If you intend to use NVMe offload you will also need to include <code>DS_BUILD_AIO=1</code> in the instructions above (and also install <em>libaio-dev</em> system-wide).</p> <p>Edit <code>TORCH_CUDA_ARCH_LIST</code> to insert the code for the architectures of the GPU cards you intend to use. Assuming all your cards are the same you can get the arch via:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->CUDA_VISIBLE_DEVICES=0 python -c <span class="hljs-string">&quot;import torch; print(torch.cuda.get_device_capability())&quot;</span><!-- HTML_TAG_END --></pre></div> <p>So if you get <code>8, 6</code>, then use <code>TORCH_CUDA_ARCH_LIST=&quot;8.6&quot;</code>. If you have multiple different cards, you can list all of them like so <code>TORCH_CUDA_ARCH_LIST=&quot;6.1;8.6&quot;</code></p> <p>If you need to use the same setup on multiple machines, make a binary wheel:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->git <span class="hljs-built_in">clone</span> https://github.com/microsoft/DeepSpeed/ <span class="hljs-built_in">cd</span> DeepSpeed <span class="hljs-built_in">rm</span> -rf build TORCH_CUDA_ARCH_LIST=<span class="hljs-string">&quot;8.6&quot;</span> DS_BUILD_CPU_ADAM=1 DS_BUILD_UTILS=1 \ python setup.py build_ext -j8 bdist_wheel<!-- HTML_TAG_END --></pre></div> <p>it will generate something like <code>dist/deepspeed-0.3.13+8cd046f-cp38-cp38-linux_x86_64.whl</code> which now you can install as <code>pip install deepspeed-0.3.13+8cd046f-cp38-cp38-linux_x86_64.whl</code> locally or on any other machine.</p> <p>Again, remember to ensure to adjust <code>TORCH_CUDA_ARCH_LIST</code> to the target architectures.</p> <p>You can find the complete list of NVIDIA GPUs and their corresponding <strong>Compute Capabilities</strong> (same as arch in this context) <a href="https://developer.nvidia.com/cuda-gpus" rel="nofollow">here</a>.</p> <p>You can check the archs pytorch was built with using:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -c <span class="hljs-string">&quot;import torch; print(torch.cuda.get_arch_list())&quot;</span><!-- HTML_TAG_END --></pre></div> <p>Here is how to find out the arch for one of the installed GPUs. For example, for GPU 0:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->CUDA_VISIBLE_DEVICES=0 python -c <span class="hljs-string">&quot;import torch; \ print(torch.cuda.get_device_properties(torch.device(&#x27;cuda&#x27;)))&quot;</span><!-- HTML_TAG_END --></pre></div> <p>If the output is:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->_CudaDeviceProperties(name=<span class="hljs-string">&#x27;GeForce RTX 3090&#x27;</span>, major=8, minor=6, total_memory=24268MB, multi_processor_count=82)<!-- HTML_TAG_END --></pre></div> <p>then you know that this card’s arch is <code>8.6</code>.</p> <p>You can also leave <code>TORCH_CUDA_ARCH_LIST</code> out completely and then the build program will automatically query the architecture of the GPUs the build is made on. This may or may not match the GPUs on the target machines, that’s why it’s best to specify the desired archs explicitly.</p> <p>If after trying everything suggested you still encounter build issues, please, proceed with the GitHub Issue of <a href="https://github.com/microsoft/DeepSpeed/issues" rel="nofollow">Deepspeed</a>,</p> <a id="deepspeed-multi-gpu"></a> <h3 class="relative group"><a id="deployment-with-multiple-gpus" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#deployment-with-multiple-gpus"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Deployment with multiple GPUs </span></h3> <p>To deploy this feature with multiple GPUs adjust the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> command line arguments as following:</p> <ol><li>replace <code>python -m torch.distributed.launch</code> with <code>deepspeed</code>.</li> <li>add a new argument <code>--deepspeed ds_config.json</code>, where <code>ds_config.json</code> is the DeepSpeed configuration file as documented <a href="https://www.deepspeed.ai/docs/config-json/" rel="nofollow">here</a>. The file naming is up to you.</li></ol> <p>Therefore, if your original command line looked as follows:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -m torch.distributed.launch --nproc_per_node=2 your_program.py &lt;normal cl args&gt;<!-- HTML_TAG_END --></pre></div> <p>Now it should be:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->deepspeed --num_gpus=2 your_program.py &lt;normal cl args&gt; --deepspeed ds_config.json<!-- HTML_TAG_END --></pre></div> <p>Unlike, <code>torch.distributed.launch</code> where you have to specify how many GPUs to use with <code>--nproc_per_node</code>, with the <code>deepspeed</code> launcher you don’t have to use the corresponding <code>--num_gpus</code> if you want all of your GPUs used. The full details on how to configure various nodes and GPUs can be found <a href="https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node" rel="nofollow">here</a>.</p> <p>In fact, you can continue using <code>-m torch.distributed.launch</code> with DeepSpeed as long as you don’t need to use <code>deepspeed</code> launcher-specific arguments. Typically if you don’t need a multi-node setup you’re not required to use the <code>deepspeed</code> launcher. But since in the DeepSpeed documentation it’ll be used everywhere, for consistency we will use it here as well.</p> <p>Here is an example of running <code>run_translation.py</code> under DeepSpeed deploying all available GPUs:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->deepspeed examples/pytorch/translation/run_translation.py \ --deepspeed tests/deepspeed/ds_config_zero3.json \ --model_name_or_path t5-small --per_device_train_batch_size 1 \ --output_dir output_dir --overwrite_output_dir --fp16 \ --do_train --max_train_samples 500 --num_train_epochs 1 \ --dataset_name wmt16 --dataset_config <span class="hljs-string">&quot;ro-en&quot;</span> \ --source_lang en --target_lang ro<!-- HTML_TAG_END --></pre></div> <p>Note that in the DeepSpeed documentation you are likely to see <code>--deepspeed --deepspeed_config ds_config.json</code> - i.e. two DeepSpeed-related arguments, but for the sake of simplicity, and since there are already so many arguments to deal with, we combined the two into a single argument.</p> <p>For some practical usage examples, please, see this <a href="https://github.com/huggingface/transformers/issues/8771#issuecomment-759248400" rel="nofollow">post</a>.</p> <a id="deepspeed-one-gpu"></a> <h3 class="relative group"><a id="deployment-with-one-gpu" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#deployment-with-one-gpu"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Deployment with one GPU </span></h3> <p>To deploy DeepSpeed with one GPU adjust the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> command line arguments as follows:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->deepspeed --num_gpus=1 examples/pytorch/translation/run_translation.py \ --deepspeed tests/deepspeed/ds_config_zero2.json \ --model_name_or_path t5-small --per_device_train_batch_size 1 \ --output_dir output_dir --overwrite_output_dir --fp16 \ --do_train --max_train_samples 500 --num_train_epochs 1 \ --dataset_name wmt16 --dataset_config <span class="hljs-string">&quot;ro-en&quot;</span> \ --source_lang en --target_lang ro<!-- HTML_TAG_END --></pre></div> <p>This is almost the same as with multiple-GPUs, but here we tell DeepSpeed explicitly to use just one GPU via <code>--num_gpus=1</code>. By default, DeepSpeed deploys all GPUs it can see on the given node. If you have only 1 GPU to start with, then you don’t need this argument. The following <a href="https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node" rel="nofollow">documentation</a> discusses the launcher options.</p> <p>Why would you want to use DeepSpeed with just one GPU?</p> <ol><li>It has a ZeRO-offload feature which can delegate some computations and memory to the host’s CPU and RAM, and thus leave more GPU resources for model’s needs - e.g. larger batch size, or enabling a fitting of a very big model which normally won’t fit.</li> <li>It provides a smart GPU memory management system, that minimizes memory fragmentation, which again allows you to fit bigger models and data batches.</li></ol> <p>While we are going to discuss the configuration in details next, the key to getting a huge improvement on a single GPU with DeepSpeed is to have at least the following configuration in the configuration file:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;stage&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;allgather_partitions&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;allgather_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_scatter&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;overlap_comm&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;contiguous_gradients&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>which enables optimizer offload and some other important features. You may experiment with the buffer sizes, you will find more details in the discussion below.</p> <p>For a practical usage example of this type of deployment, please, see this <a href="https://github.com/huggingface/transformers/issues/8771#issuecomment-759176685" rel="nofollow">post</a>.</p> <p>You may also try the ZeRO-3 with CPU and NVMe offload as explained further in this document.</p> <p>Notes:</p> <ul><li><p>if you need to run on a specific GPU, which is different from GPU 0, you can’t use <code>CUDA_VISIBLE_DEVICES</code> to limit the visible scope of available GPUs. Instead, you have to use the following syntax:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->deepspeed --include localhost:1 examples/pytorch/translation/run_translation.py ...<!-- HTML_TAG_END --></pre></div> <p>In this example, we tell DeepSpeed to use GPU 1 (second gpu).</p></li></ul> <a id="deepspeed-notebook"></a> <h3 class="relative group"><a id="deployment-in-notebooks" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#deployment-in-notebooks"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Deployment in Notebooks </span></h3> <p>The problem with running notebook cells as a script is that there is no normal <code>deepspeed</code> launcher to rely on, so under certain setups we have to emulate it.</p> <p>If you’re using only 1 GPU, here is how you’d have to adjust your training code in the notebook to use DeepSpeed.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment"># DeepSpeed requires a distributed environment even when only one process is used.</span> <span class="hljs-comment"># This emulates a launcher in the notebook</span> <span class="hljs-keyword">import</span> os os.environ[<span class="hljs-string">&quot;MASTER_ADDR&quot;</span>] = <span class="hljs-string">&quot;localhost&quot;</span> os.environ[<span class="hljs-string">&quot;MASTER_PORT&quot;</span>] = <span class="hljs-string">&quot;9994&quot;</span> <span class="hljs-comment"># modify if RuntimeError: Address already in use</span> os.environ[<span class="hljs-string">&quot;RANK&quot;</span>] = <span class="hljs-string">&quot;0&quot;</span> os.environ[<span class="hljs-string">&quot;LOCAL_RANK&quot;</span>] = <span class="hljs-string">&quot;0&quot;</span> os.environ[<span class="hljs-string">&quot;WORLD_SIZE&quot;</span>] = <span class="hljs-string">&quot;1&quot;</span> <span class="hljs-comment"># Now proceed as normal, plus pass the deepspeed config file</span> training_args = TrainingArguments(..., deepspeed=<span class="hljs-string">&quot;ds_config_zero3.json&quot;</span>) trainer = Trainer(...) trainer.train()<!-- HTML_TAG_END --></pre></div> <p>Note: <code>...</code> stands for the normal arguments that you’d pass to the functions.</p> <p>If you want to use more than 1 GPU, you must use a multi-process environment for DeepSpeed to work. That is, you have to use the launcher for that purpose and this cannot be accomplished by emulating the distributed environment presented at the beginning of this section.</p> <p>If you want to create the config file on the fly in the notebook in the current directory, you could have a dedicated cell with:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->%%bash cat &lt;&lt;<span class="hljs-string">&#x27;EOT&#x27;</span> &gt; ds_config_zero3.json { <span class="hljs-string">&quot;fp16&quot;</span>: { <span class="hljs-string">&quot;enabled&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;loss_scale&quot;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&quot;loss_scale_window&quot;</span>: <span class="hljs-number">1000</span>, <span class="hljs-string">&quot;initial_scale_power&quot;</span>: <span class="hljs-number">16</span>, <span class="hljs-string">&quot;hysteresis&quot;</span>: <span class="hljs-number">2</span>, <span class="hljs-string">&quot;min_loss_scale&quot;</span>: <span class="hljs-number">1</span> }, <span class="hljs-string">&quot;optimizer&quot;</span>: { <span class="hljs-string">&quot;type&quot;</span>: <span class="hljs-string">&quot;AdamW&quot;</span>, <span class="hljs-string">&quot;params&quot;</span>: { <span class="hljs-string">&quot;lr&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;betas&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;eps&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;weight_decay&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span> } }, <span class="hljs-string">&quot;scheduler&quot;</span>: { <span class="hljs-string">&quot;type&quot;</span>: <span class="hljs-string">&quot;WarmupLR&quot;</span>, <span class="hljs-string">&quot;params&quot;</span>: { <span class="hljs-string">&quot;warmup_min_lr&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;warmup_max_lr&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;warmup_num_steps&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span> } }, <span class="hljs-string">&quot;zero_optimization&quot;</span>: { <span class="hljs-string">&quot;stage&quot;</span>: <span class="hljs-number">3</span>, <span class="hljs-string">&quot;offload_optimizer&quot;</span>: { <span class="hljs-string">&quot;device&quot;</span>: <span class="hljs-string">&quot;cpu&quot;</span>, <span class="hljs-string">&quot;pin_memory&quot;</span>: true }, <span class="hljs-string">&quot;offload_param&quot;</span>: { <span class="hljs-string">&quot;device&quot;</span>: <span class="hljs-string">&quot;cpu&quot;</span>, <span class="hljs-string">&quot;pin_memory&quot;</span>: true }, <span class="hljs-string">&quot;overlap_comm&quot;</span>: true, <span class="hljs-string">&quot;contiguous_gradients&quot;</span>: true, <span class="hljs-string">&quot;sub_group_size&quot;</span>: <span class="hljs-number">1e9</span>, <span class="hljs-string">&quot;reduce_bucket_size&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;stage3_prefetch_bucket_size&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;stage3_param_persistence_threshold&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;stage3_max_live_parameters&quot;</span>: <span class="hljs-number">1e9</span>, <span class="hljs-string">&quot;stage3_max_reuse_distance&quot;</span>: <span class="hljs-number">1e9</span>, <span class="hljs-string">&quot;stage3_gather_16bit_weights_on_model_save&quot;</span>: true }, <span class="hljs-string">&quot;gradient_accumulation_steps&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;gradient_clipping&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;steps_per_print&quot;</span>: <span class="hljs-number">2000</span>, <span class="hljs-string">&quot;train_batch_size&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;train_micro_batch_size_per_gpu&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;wall_clock_breakdown&quot;</span>: false } EOT<!-- HTML_TAG_END --></pre></div> <p>If the training script is in a normal file and not in the notebook cells, you can launch <code>deepspeed</code> normally via shell from a cell. For example, to use <code>run_translation.py</code> you would launch it with:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->!git clone https://github.com/huggingface/transformers !cd transformers; deepspeed examples/pytorch/translation/run_translation.py ...<!-- HTML_TAG_END --></pre></div> <p>or with <code>%%bash</code> magic, where you can write a multi-line code for the shell program to run:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->%%bash git clone https://github.com/huggingface/transformers cd transformers deepspeed examples/pytorch/translation/run_translation.py ...<!-- HTML_TAG_END --></pre></div> <p>In such case you don’t need any of the code presented at the beginning of this section.</p> <p>Note: While <code>%%bash</code> magic is neat, but currently it buffers the output so you won’t see the logs until the process completes.</p> <a id="deepspeed-config"></a> <h3 class="relative group"><a id="configuration" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#configuration"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Configuration </span></h3> <p>For the complete guide to the DeepSpeed configuration options that can be used in its configuration file please refer to the <a href="https://www.deepspeed.ai/docs/config-json/" rel="nofollow">following documentation</a>.</p> <p>You can find dozens of DeepSpeed configuration examples that address various practical needs in <a href="https://github.com/microsoft/DeepSpeedExamples" rel="nofollow">the DeepSpeedExamples repo</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->git <span class="hljs-built_in">clone</span> https://github.com/microsoft/DeepSpeedExamples <span class="hljs-built_in">cd</span> DeepSpeedExamples find . -name <span class="hljs-string">&#x27;*json&#x27;</span><!-- HTML_TAG_END --></pre></div> <p>Continuing the code from above, let’s say you’re looking to configure the Lamb optimizer. So you can search through the example <code>.json</code> files with:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->grep -i Lamb $(find . -name <span class="hljs-string">&#x27;*json&#x27;</span>)<!-- HTML_TAG_END --></pre></div> <p>Some more examples are to be found in the <a href="https://github.com/microsoft/DeepSpeed" rel="nofollow">main repo</a> as well.</p> <p>When using DeepSpeed you always need to supply a DeepSpeed configuration file, yet some configuration parameters have to be configured via the command line. You will find the nuances in the rest of this guide.</p> <p>To get an idea of what DeepSpeed configuration file looks like, here is one that activates ZeRO stage 2 features, including optimizer states cpu offload, uses <code>AdamW</code> optimizer and <code>WarmupLR</code> scheduler and will enable mixed precision training if <code>--fp16</code> is passed:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;fp16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale_window&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;initial_scale_power&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">16</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;hysteresis&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;min_loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;AdamW&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;betas&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;eps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;weight_decay&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;scheduler&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;WarmupLR&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;warmup_min_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_max_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_num_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;stage&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;allgather_partitions&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;allgather_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;overlap_comm&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_scatter&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;contiguous_gradients&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;gradient_accumulation_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;gradient_clipping&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;train_batch_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;train_micro_batch_size_per_gpu&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>When you execute the program, DeepSpeed will log the configuration it received from the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> to the console, so you can see exactly what was the final configuration passed to it.</p> <a id="deepspeed-config-passing"></a> <h3 class="relative group"><a id="passing-configuration" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#passing-configuration"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Passing Configuration </span></h3> <p>As discussed in this document normally the DeepSpeed configuration is passed as a path to a json file, but if you’re not using the command line interface to configure the training, and instead instantiate the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> via <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> then for the <code>deepspeed</code> argument you can pass a nested <code>dict</code>. This allows you to create the configuration on the fly and doesn’t require you to write it to the file system before passing it to <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>.</p> <p>To summarize you can do:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->TrainingArguments(..., deepspeed=<span class="hljs-string">&quot;/path/to/ds_config.json&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>or:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->ds_config_dict = <span class="hljs-built_in">dict</span>(scheduler=scheduler_params, optimizer=optimizer_params) TrainingArguments(..., deepspeed=ds_config_dict)<!-- HTML_TAG_END --></pre></div> <a id="deepspeed-config-shared"></a> <h3 class="relative group"><a id="shared-configuration" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#shared-configuration"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Shared Configuration </span></h3> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>This section is a must-read</p></div> <p>Some configuration values are required by both the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> and DeepSpeed to function correctly, therefore, to prevent conflicting definitions, which could lead to hard to detect errors, we chose to configure those via the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> command line arguments.</p> <p>Additionally, some configuration values are derived automatically based on the model’s configuration, so instead of remembering to manually adjust multiple values, it’s the best to let the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> do the majority of configuration for you.</p> <p>Therefore, in the rest of this guide you will find a special configuration value: <code>auto</code>, which when set will be automatically replaced with the correct or most efficient value. Please feel free to choose to ignore this recommendation and set the values explicitly, in which case be very careful that your the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> arguments and DeepSpeed configurations agree. For example, are you using the same learning rate, or batch size, or gradient accumulation settings? if these mismatch the training may fail in very difficult to detect ways. You have been warned.</p> <p>There are multiple other values that are specific to DeepSpeed-only and those you will have to set manually to suit your needs.</p> <p>In your own programs, you can also use the following approach if you’d like to modify the DeepSpeed config as a master and configure <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> based on that. The steps are:</p> <ol><li>Create or load the DeepSpeed configuration to be used as a master configuration</li> <li>Create the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> object based on these values</li></ol> <p>Do note that some values, such as <code>scheduler.params.total_num_steps</code> are calculated by <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> during <code>train</code>, but you can of course do the math yourself.</p> <a id="deepspeed-zero"></a> <h3 class="relative group"><a id="zero" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#zero"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ZeRO </span></h3> <p><a href="https://www.deepspeed.ai/tutorials/zero/" rel="nofollow">Zero Redundancy Optimizer (ZeRO)</a> is the workhorse of DeepSpeed. It supports 3 different levels (stages) of optimization. The first one is not quite interesting for scalability purposes, therefore this document focuses on stages 2 and 3. Stage 3 is further improved by the latest addition of ZeRO-Infinity. You will find more indepth information in the DeepSpeed documentation.</p> <p>The <code>zero_optimization</code> section of the configuration file is the most important part (<a href="https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training" rel="nofollow">docs</a>), since that is where you define which ZeRO stages you want to enable and how to configure them. You will find the explanation for each parameter in the DeepSpeed docs.</p> <p>This section has to be configured exclusively via DeepSpeed configuration - the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> provides no equivalent command line arguments.</p> <p>Note: currently DeepSpeed doesn’t validate parameter names, so if you misspell any, it’ll use the default setting for the parameter that got misspelled. You can watch the DeepSpeed engine start up log messages to see what values it is going to use.</p> <a id="deepspeed-zero2-config"></a> <h4 class="relative group"><a id="zero2-config" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#zero2-config"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ZeRO-2 Config </span></h4> <p>The following is an example of configuration for ZeRO stage 2:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;stage&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;allgather_partitions&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;allgather_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">5e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;overlap_comm&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_scatter&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">5e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;contiguous_gradients&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p><strong>Performance tuning:</strong></p> <ul><li>enabling <code>offload_optimizer</code> should reduce GPU RAM usage (it requires <code>&quot;stage&quot;: 2</code>)</li> <li><code>&quot;overlap_comm&quot;: true</code> trades off increased GPU RAM usage to lower all-reduce latency. <code>overlap_comm</code> uses 4.5x the <code>allgather_bucket_size</code> and <code>reduce_bucket_size</code> values. So if they are set to 5e8, this requires a 9GB footprint (<code>5e8 x 2Bytes x 2 x 4.5</code>). Therefore, if you have a GPU with 8GB or less RAM, to avoid getting OOM-errors you will need to reduce those parameters to about <code>2e8</code>, which would require 3.6GB. You will want to do the same on larger capacity GPU as well, if you’re starting to hit OOM.</li> <li>when reducing these buffers you’re trading communication speed to avail more GPU RAM. The smaller the buffer size is, the slower the communication gets, and the more GPU RAM will be available to other tasks. So if a bigger batch size is important, getting a slightly slower training time could be a good trade.</li></ul> <p>Additionally, <code>deepspeed==0.4.4</code> added a new option <code>round_robin_gradients</code> which you can enable with:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;round_robin_gradients&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>This is a stage 2 optimization for CPU offloading that parallelizes gradient copying to CPU memory among ranks by fine-grained gradient partitioning. Performance benefit grows with gradient accumulation steps (more copying between optimizer steps) or GPU count (increased parallelism).</p> <a id="deepspeed-zero3-config"></a> <h4 class="relative group"><a id="zero3-config" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#zero3-config"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ZeRO-3 Config </span></h4> <p>The following is an example of configuration for ZeRO stage 3:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;stage&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_param&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;overlap_comm&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;contiguous_gradients&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;sub_group_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_prefetch_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_param_persistence_threshold&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_max_live_parameters&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_max_reuse_distance&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_gather_16bit_weights_on_model_save&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>If you are getting OOMs, because your model or activations don’t fit into the GPU memory and you have unutilized CPU memory offloading the optimizer states and parameters to CPU memory with <code>&quot;device&quot;: &quot;cpu&quot;</code> may solve this limitation. If you don’t want to offload to CPU memory, use <code>none</code> instead of <code>cpu</code> for the <code>device</code> entry. Offloading to NVMe is discussed further down.</p> <p>Pinned memory is enabled with <code>pin_memory</code> set to <code>true</code>. This feature can improve the throughput at the cost of making less memory available to other processes. Pinned memory is set aside to the specific process that requested it and its typically accessed much faster than normal CPU memory.</p> <p><strong>Performance tuning:</strong></p> <ul><li><code>stage3_max_live_parameters</code>: <code>1e9</code></li> <li><code>stage3_max_reuse_distance</code>: <code>1e9</code></li></ul> <p>If hitting OOM reduce <code>stage3_max_live_parameters</code> and <code>stage3_max_reuse_distance</code>. They should have minimal impact on performance unless you are doing activation checkpointing. <code>1e9</code> would consume ~2GB. The memory is shared by <code>stage3_max_live_parameters</code> and <code>stage3_max_reuse_distance</code>, so it’s not additive, it’s just 2GB total.</p> <p><code>stage3_max_live_parameters</code> is the upper limit on how many full parameters you want to keep on the GPU at any given time. “reuse distance” is a metric we are using to figure out when will a parameter be used again in the future, and we use the <code>stage3_max_reuse_distance</code> to decide whether to throw away the parameter or to keep it. If a parameter is going to be used again in near future (less than <code>stage3_max_reuse_distance</code>) then we keep it to reduce communication overhead. This is super helpful when you have activation checkpointing enabled, where we do a forward recompute and backward passes a a single layer granularity and want to keep the parameter in the forward recompute till the backward</p> <p>The following configuration values depend on the model’s hidden size:</p> <ul><li><code>reduce_bucket_size</code>: <code>hidden_size*hidden_size</code></li> <li><code>stage3_prefetch_bucket_size</code>: <code>0.9 * hidden_size * hidden_size</code></li> <li><code>stage3_param_persistence_threshold</code>: <code>10 * hidden_size</code></li></ul> <p>therefore set these values to <code>auto</code> and the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> will automatically assign the recommended values. But, of course, feel free to set these explicitly as well.</p> <p><code>stage3_gather_16bit_weights_on_model_save</code> enables model fp16 weights consolidation when model gets saved. With large models and multiple GPUs this is an expensive operation both in terms of memory and speed. It’s currently required if you plan to resume the training. Watch out for future updates that will remove this limitation and make things more flexible.</p> <p>If you’re migrating from ZeRO-2 configuration note that <code>allgather_partitions</code>, <code>allgather_bucket_size</code> and <code>reduce_scatter</code> configuration parameters are not used in ZeRO-3. If you keep these in the config file they will just be ignored.</p> <ul><li><code>sub_group_size</code>: <code>1e9</code></li></ul> <p><code>sub_group_size</code> controls the granularity in which parameters are updated during optimizer steps. Parameters are grouped into buckets of <code>sub_group_size</code> and each buckets is updated one at a time. When used with NVMe offload in ZeRO-Infinity, <code>sub_group_size</code> therefore controls the granularity in which model states are moved in and out of CPU memory from NVMe during the optimizer step. This prevents running out of CPU memory for extremely large models.</p> <p>You can leave <code>sub_group_size</code> to its default value of <em>1e9</em> when not using NVMe offload. You may want to change its default value in the following cases:</p> <ol><li>Running into OOM during optimizer step: Reduce <code>sub_group_size</code> to reduce memory utilization of temporary buffers</li> <li>Optimizer Step is taking a long time: Increase <code>sub_group_size</code> to improve bandwidth utilization as a result of the increased data buffers.</li></ol> <a id="deepspeed-nvme"></a> <h3 class="relative group"><a id="nvme-support" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#nvme-support"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>NVMe Support </span></h3> <p>ZeRO-Infinity allows for training incredibly large models by extending GPU and CPU memory with NVMe memory. Thanks to smart partitioning and tiling algorithms each GPU needs to send and receive very small amounts of data during offloading so modern NVMe proved to be fit to allow for an even larger total memory pool available to your training process. ZeRO-Infinity requires ZeRO-3 enabled.</p> <p>The following configuration example enables NVMe to offload both optimizer states and the params:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;stage&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;nvme&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;nvme_path&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;/local_nvme&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;buffer_count&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">4</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;fast_init&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">false</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_param&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;nvme&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;nvme_path&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;/local_nvme&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;buffer_count&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">5</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;buffer_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;max_in_cpu&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;aio&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;block_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">262144</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;queue_depth&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">32</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;thread_count&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;single_submit&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">false</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;overlap_events&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;overlap_comm&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;contiguous_gradients&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;sub_group_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_prefetch_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_param_persistence_threshold&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_max_live_parameters&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_max_reuse_distance&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_gather_16bit_weights_on_model_save&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>You can choose to offload both optimizer states and params to NVMe, or just one of them or none. For example, if you have copious amounts of CPU memory available, by all means offload to CPU memory only as it’d be faster (hint: <em>“device”: “cpu”</em>).</p> <p>Here is the full documentation for offloading <a href="https://www.deepspeed.ai/docs/config-json/#optimizer-offloading" rel="nofollow">optimizer states</a> and <a href="https://www.deepspeed.ai/docs/config-json/#parameter-offloading" rel="nofollow">parameters</a>.</p> <p>Make sure that your <code>nvme_path</code> is actually an NVMe, since it will work with the normal hard drive or SSD, but it’ll be much much slower. The fast scalable training was designed with modern NVMe transfer speeds in mind (as of this writing one can have ~3.5GB/s read, ~3GB/s write peak speeds).</p> <p>In order to figure out the optimal <code>aio</code> configuration block you must run a benchmark on your target setup, as <a href="https://github.com/microsoft/DeepSpeed/issues/998" rel="nofollow">explained here</a>.</p> <a id="deepspeed-zero2-zero3-performance"></a> <h4 class="relative group"><a id="zero2-vs-zero3-performance" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#zero2-vs-zero3-performance"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ZeRO-2 vs ZeRO-3 Performance </span></h4> <p>ZeRO-3 is likely to be slower than ZeRO-2 if everything else is configured the same because the former has to gather model weights in addition to what ZeRO-2 does. If ZeRO-2 meets your needs and you don’t need to scale beyond a few GPUs then you may choose to stick to it. It’s important to understand that ZeRO-3 enables a much higher scalability capacity at a cost of speed.</p> <p>It’s possible to adjust ZeRO-3 configuration to make it perform closer to ZeRO-2:</p> <ul><li>set <code>stage3_param_persistence_threshold</code> to a very large number - larger than the largest parameter, e.g., <code>6 * hidden_size * hidden_size</code>. This will keep the parameters on the GPUs.</li> <li>turn off <code>offload_params</code> since ZeRO-2 doesn’t have that option.</li></ul> <p>The performance will likely improve significantly with just <code>offload_params</code> turned off, even if you don’t change <code>stage3_param_persistence_threshold</code>. Of course, these changes will impact the size of the model you can train. So these help you to trade scalability for speed depending on your needs.</p> <a id="deepspeed-zero2-example"></a> <h4 class="relative group"><a id="zero2-example" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#zero2-example"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ZeRO-2 Example </span></h4> <p>Here is a full ZeRO-2 auto-configuration file <code>ds_config_zero2.json</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;fp16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale_window&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;initial_scale_power&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">16</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;hysteresis&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;min_loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;AdamW&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;betas&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;eps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;weight_decay&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;scheduler&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;WarmupLR&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;warmup_min_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_max_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_num_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;stage&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;allgather_partitions&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;allgather_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;overlap_comm&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_scatter&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;contiguous_gradients&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;gradient_accumulation_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;gradient_clipping&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;steps_per_print&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;train_batch_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;train_micro_batch_size_per_gpu&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;wall_clock_breakdown&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">false</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>Here is a full ZeRO-2 all-enabled manually set configuration file. It is here mainly for you to see what the typical values look like, but we highly recommend using the one with multiple <code>auto</code> settings in it.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;fp16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale_window&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;initial_scale_power&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">16</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;hysteresis&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;min_loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;AdamW&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3e-5</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;betas&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">[</span><span class="hljs-number">0.8</span><span class="hljs-punctuation">,</span> <span class="hljs-number">0.999</span><span class="hljs-punctuation">]</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;eps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e-8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;weight_decay&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3e-7</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;scheduler&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;WarmupLR&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;warmup_min_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_max_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3e-5</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_num_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">500</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;stage&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;allgather_partitions&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;allgather_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;overlap_comm&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_scatter&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;contiguous_gradients&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;steps_per_print&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;wall_clock_breakdown&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">false</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <a id="deepspeed-zero3-example"></a> <h4 class="relative group"><a id="zero3-example" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#zero3-example"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ZeRO-3 Example </span></h4> <p>Here is a full ZeRO-3 auto-configuration file <code>ds_config_zero3.json</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;fp16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale_window&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;initial_scale_power&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">16</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;hysteresis&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;min_loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;AdamW&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;betas&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;eps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;weight_decay&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;scheduler&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;WarmupLR&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;warmup_min_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_max_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_num_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;stage&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_param&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;overlap_comm&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;contiguous_gradients&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;sub_group_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_prefetch_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_param_persistence_threshold&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_max_live_parameters&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_max_reuse_distance&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_gather_16bit_weights_on_model_save&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;gradient_accumulation_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;gradient_clipping&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;steps_per_print&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;train_batch_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;train_micro_batch_size_per_gpu&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;wall_clock_breakdown&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">false</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>Here is a full ZeRO-3 all-enabled manually set configuration file. It is here mainly for you to see what the typical values look like, but we highly recommend using the one with multiple <code>auto</code> settings in it.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;fp16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale_window&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;initial_scale_power&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">16</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;hysteresis&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;min_loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;AdamW&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3e-5</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;betas&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">[</span><span class="hljs-number">0.8</span><span class="hljs-punctuation">,</span> <span class="hljs-number">0.999</span><span class="hljs-punctuation">]</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;eps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e-8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;weight_decay&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3e-7</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;scheduler&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;WarmupLR&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;warmup_min_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_max_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3e-5</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_num_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">500</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;stage&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_param&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;overlap_comm&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;contiguous_gradients&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;sub_group_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e6</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_prefetch_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0.94e6</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_param_persistence_threshold&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e4</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_max_live_parameters&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_max_reuse_distance&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_gather_16bit_weights_on_model_save&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;steps_per_print&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;wall_clock_breakdown&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">false</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="optimizer-and-scheduler" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#optimizer-and-scheduler"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Optimizer and Scheduler </span></h3> <p>As long as you don’t enable <code>offload_optimizer</code> you can mix and match DeepSpeed and HuggingFace schedulers and optimizers, with the exception of using the combination of HuggingFace scheduler and DeepSpeed optimizer:</p> <p>| Combos | HF Scheduler | DS Scheduler | | HF Optimizer | Yes | Yes | | DS Optimizer | No | Yes |</p> <p>It is possible to use a non-DeepSpeed optimizer when <code>offload_optimizer</code> is enabled, as long as it has both CPU and GPU implementation (except LAMB).</p> <a id="deepspeed-optimizer"></a> <h4 class="relative group"><a id="optimizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#optimizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Optimizer </span></h4> <p>DeepSpeed’s main optimizers are Adam, AdamW, OneBitAdam, and Lamb. These have been thoroughly tested with ZeRO and are thus recommended to be used. It, however, can import other optimizers from <code>torch</code>. The full documentation is <a href="https://www.deepspeed.ai/docs/config-json/#optimizer-parameters" rel="nofollow">here</a>.</p> <p>If you don’t configure the <code>optimizer</code> entry in the configuration file, the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> will automatically set it to <code>AdamW</code> and will use the supplied values or the defaults for the following command line arguments: <code>--learning_rate</code>, <code>--adam_beta1</code>, <code>--adam_beta2</code>, <code>--adam_epsilon</code> and <code>--weight_decay</code>.</p> <p>Here is an example of the auto-configured <code>optimizer</code> entry for <code>AdamW</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;AdamW&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;betas&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;eps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;weight_decay&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>Note that the command line arguments will set the values in the configuration file. This is so that there is one definitive source of the values and to avoid hard to find errors when for example, the learning rate is set to different values in different places. Command line rules. The values that get overridden are:</p> <ul><li><code>lr</code> with the value of <code>--learning_rate</code></li> <li><code>betas</code> with the value of <code>--adam_beta1 --adam_beta2</code></li> <li><code>eps</code> with the value of <code>--adam_epsilon</code></li> <li><code>weight_decay</code> with the value of <code>--weight_decay</code></li></ul> <p>Therefore please remember to tune the shared hyperparameters on the command line.</p> <p>You can also set the values explicitly:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;AdamW&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0.001</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;betas&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">[</span><span class="hljs-number">0.8</span><span class="hljs-punctuation">,</span> <span class="hljs-number">0.999</span><span class="hljs-punctuation">]</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;eps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e-8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;weight_decay&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3e-7</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>But then you’re on your own synchronizing the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> command line arguments and the DeepSpeed configuration.</p> <p>If you want to use another optimizer which is not listed above, you will have to add to the top level configuration.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;zero_allow_untested_optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>Similarly to <code>AdamW</code>, you can configure other officially supported optimizers. Just remember that may have different config values. e.g. for Adam you will want <code>weight_decay</code> around <code>0.01</code>.</p> <a id="deepspeed-scheduler"></a> <h4 class="relative group"><a id="scheduler" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#scheduler"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Scheduler </span></h4> <p>DeepSpeed supports <code>LRRangeTest</code>, <code>OneCycle</code>, <code>WarmupLR</code> and <code>WarmupDecayLR</code> learning rate schedulers. The full documentation is <a href="https://www.deepspeed.ai/docs/config-json/#scheduler-parameters" rel="nofollow">here</a>.</p> <p>Here is where the schedulers overlap between 🤗 Transformers and DeepSpeed:</p> <ul><li><code>WarmupLR</code> via <code>--lr_scheduler_type constant_with_warmup</code></li> <li><code>WarmupDecayLR</code> via <code>--lr_scheduler_type linear</code>. This is also the default value for <code>--lr_scheduler_type</code>, therefore, if you don’t configure the scheduler this is scheduler that will get configured by default.</li></ul> <p>If you don’t configure the <code>scheduler</code> entry in the configuration file, the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> will use the values of <code>--lr_scheduler_type</code>, <code>--learning_rate</code> and <code>--warmup_steps</code> or <code>--warmup_ratio</code> to configure a 🤗 Transformers version of it.</p> <p>Here is an example of the auto-configured <code>scheduler</code> entry for <code>WarmupLR</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;scheduler&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;WarmupLR&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;warmup_min_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_max_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_num_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>Since <em>“auto”</em> is used the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> arguments will set the correct values in the configuration file. This is so that there is one definitive source of the values and to avoid hard to find errors when, for example, the learning rate is set to different values in different places. Command line rules. The values that get set are:</p> <ul><li><code>warmup_min_lr</code> with the value of <code>0</code>.</li> <li><code>warmup_max_lr</code> with the value of <code>--learning_rate</code>.</li> <li><code>warmup_num_steps</code> with the value of <code>--warmup_steps</code> if provided. Otherwise will use <code>--warmup_ratio</code> multiplied by the number of training steps and rounded up.</li> <li><code>total_num_steps</code> with either the value of <code>--max_steps</code> or if it is not provided, derived automatically at run time based on the environment and the size of the dataset and other command line arguments (needed for <code>WarmupDecayLR</code>).</li></ul> <p>You can, of course, take over any or all of the configuration values and set those yourself:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;scheduler&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;WarmupLR&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;warmup_min_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_max_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0.001</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_num_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1000</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>But then you’re on your own synchronizing the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> command line arguments and the DeepSpeed configuration.</p> <p>For example, for <code>WarmupDecayLR</code>, you can use the following entry:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;scheduler&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;WarmupDecayLR&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;last_batch_iteration&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">-1</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;total_num_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_min_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_max_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_num_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>and <code>total_num_steps</code>, <code>warmup_max_lr</code>, <code>warmup_num_steps</code> and <code>total_num_steps</code> will be set at loading time.</p> <a id="deepspeed-fp32"></a> <h3 class="relative group"><a id="fp32-precision" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#fp32-precision"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>fp32 Precision </span></h3> <p>Deepspeed supports the full fp32 and the fp16 mixed precision.</p> <p>Because of the much reduced memory needs and faster speed one gets with the fp16 mixed precision, the only time you will want to not use it is when the model you’re using doesn’t behave well under this training mode. Typically this happens when the model wasn’t pretrained in the fp16 mixed precision (e.g. often this happens with bf16-pretrained models). Such models may overflow or underflow leading to <code>NaN</code> loss. If this is your case then you will want to use the full fp32 mode, by explicitly disabling the otherwise default fp16 mixed precision mode with:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;fp16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;false&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>If you’re using the Ampere-architecture based GPU, pytorch version 1.7 and higher will automatically switch to using the much more efficient tf32 format for some operations, but the results will still be in fp32. For details and benchmarks, please, see <a href="https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" rel="nofollow">TensorFloat-32(TF32) on Ampere devices</a>. The document includes instructions on how to disable this automatic conversion if for some reason you prefer not to use it.</p> <p>With the 🤗 Trainer you can use <code>--tf32</code> to enable it, or disable it with <code>--tf32 0</code> or <code>--no_tf32</code>. By default the PyTorch default is used.</p> <a id="deepspeed-amp"></a> <h3 class="relative group"><a id="automatic-mixed-precision" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#automatic-mixed-precision"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Automatic Mixed Precision </span></h3> <p>You can use automatic mixed precision with either a pytorch-like AMP way or the apex-like way:</p> <h3 class="relative group"><a id="fp16" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#fp16"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>fp16 </span></h3> <p>To configure pytorch AMP-like mode with fp16 (float16) set:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;fp16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale_window&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;initial_scale_power&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">16</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;hysteresis&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;min_loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>and the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> will automatically enable or disable it based on the value of <code>args.fp16_backend</code>. The rest of config values are up to you.</p> <p>This mode gets enabled when <code>--fp16 --fp16_backend amp</code> or <code>--fp16_full_eval</code> command line args are passed.</p> <p>You can also enable/disable this mode explicitly:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;fp16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale_window&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;initial_scale_power&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">16</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;hysteresis&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;min_loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>But then you’re on your own synchronizing the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> command line arguments and the DeepSpeed configuration.</p> <p>Here is the <a href="https://www.deepspeed.ai/docs/config-json/#fp16-training-options" rel="nofollow">documentation</a>.</p> <h3 class="relative group"><a id="bf16" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#bf16"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>bf16 </span></h3> <p>If bf16 (bfloat16) is desired instead of fp16 then the following configuration section is to be used:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;bf16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>bf16 has the same dynamic range as fp32 and thus doesn’t require loss scaling.</p> <p>This mode gets enabled when <code>--bf16</code> or <code>--bf16_full_eval</code> command line args are passed.</p> <p>You can also enable/disable this mode explicitly:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;bf16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>As of <code>deepspeed==0.6.0</code> the bf16 support is new and experimental.</p> <p>If you use <a href="#gradient-accumulation">gradient accumulation</a> with bf16-enabled, you need to be aware that it’ll accumulate gradients in bf16, which may not be what you want due to this format’s low precision, as it may lead to a lossy accumulation.</p></div> <h3 class="relative group"><a id="apex" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#apex"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>apex </span></h3> <p>To configure apex AMP-like mode set:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-attr">&quot;amp&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;opt_level&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>and the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> will automatically configure it based on the values of <code>args.fp16_backend</code> and <code>args.fp16_opt_level</code>.</p> <p>This mode gets enabled when <code>--fp16 --fp16_backend apex --fp16_opt_level 01</code> command line args are passed.</p> <p>You can also configure this mode explicitly:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;amp&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;opt_level&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;O1&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>But then you’re on your own synchronizing the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> command line arguments and the DeepSpeed configuration.</p> <p>Here is the <a href="https://www.deepspeed.ai/docs/config-json/#automatic-mixed-precision-amp-training-options" rel="nofollow">documentation</a>.</p> <a id="deepspeed-bs"></a> <h3 class="relative group"><a id="batch-size" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#batch-size"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Batch Size </span></h3> <p>To configure batch size, use:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;train_batch_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;train_micro_batch_size_per_gpu&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>and the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> will automatically set <code>train_micro_batch_size_per_gpu</code> to the value of <code>args.per_device_train_batch_size</code> and <code>train_batch_size</code> to <code>args.world_size * args.per_device_train_batch_size * args.gradient_accumulation_steps</code>.</p> <p>You can also set the values explicitly:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;train_batch_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">12</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;train_micro_batch_size_per_gpu&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">4</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>But then you’re on your own synchronizing the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> command line arguments and the DeepSpeed configuration.</p> <a id="deepspeed-grad-acc"></a> <h3 class="relative group"><a id="gradient-accumulation" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#gradient-accumulation"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Gradient Accumulation </span></h3> <p>To configure gradient accumulation set:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;gradient_accumulation_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>and the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> will automatically set it to the value of <code>args.gradient_accumulation_steps</code>.</p> <p>You can also set the value explicitly:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;gradient_accumulation_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>But then you’re on your own synchronizing the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> command line arguments and the DeepSpeed configuration.</p> <a id="deepspeed-grad-clip"></a> <h3 class="relative group"><a id="gradient-clipping" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#gradient-clipping"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Gradient Clipping </span></h3> <p>To configure gradient gradient clipping set:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;gradient_clipping&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>and the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> will automatically set it to the value of <code>args.max_grad_norm</code>.</p> <p>You can also set the value explicitly:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;gradient_clipping&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1.0</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>But then you’re on your own synchronizing the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> command line arguments and the DeepSpeed configuration.</p> <a id="deepspeed-weight-extraction"></a> <h3 class="relative group"><a id="getting-the-model-weights-out" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#getting-the-model-weights-out"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Getting The Model Weights Out </span></h3> <p>As long as you continue training and resuming using DeepSpeed you don’t need to worry about anything. DeepSpeed stores fp32 master weights in its custom checkpoint optimizer files, which are <code>global_step*/*optim_states.pt</code> (this is glob pattern), and are saved under the normal checkpoint.</p> <p><strong>FP16 Weights:</strong></p> <p>When a model is saved under ZeRO-2, you end up having the normal <code>pytorch_model.bin</code> file with the model weights, but they are only the fp16 version of the weights.</p> <p>Under ZeRO-3, things are much more complicated, since the model weights are partitioned out over multiple GPUs, therefore <code>&quot;stage3_gather_16bit_weights_on_model_save&quot;: true</code> is required to get the <code>Trainer</code> to save the fp16 version of the weights. If this setting is <code>False</code> <code>pytorch_model.bin</code> won’t be created. This is because by default DeepSpeed’s <code>state_dict</code> contains a placeholder and not the real weights. If we were to save this <code>state_dict</code> it won’t be possible to load it back.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;stage3_gather_16bit_weights_on_model_save&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p><strong>FP32 Weights:</strong></p> <p>While the fp16 weights are fine for resuming training, if you finished finetuning your model and want to upload it to the <a href="https://huggingface.co/models" rel="nofollow">models hub</a> or pass it to someone else you most likely will want to get the fp32 weights. This ideally shouldn’t be done during training since this is a process that requires a lot of memory, and therefore best to be performed offline after the training is complete. But if desired and you have plenty of free CPU memory it can be done in the same training script. The following sections will discuss both approaches.</p> <p><strong>Live FP32 Weights Recovery:</strong></p> <p>This approach may not work if you model is large and you have little free CPU memory left, at the end of the training.</p> <p>If you have saved at least one checkpoint, and you want to use the latest one, you can do the following:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers.trainer_utils <span class="hljs-keyword">import</span> get_last_checkpoint <span class="hljs-keyword">from</span> deepspeed.utils.zero_to_fp32 <span class="hljs-keyword">import</span> load_state_dict_from_zero_checkpoint checkpoint_dir = get_last_checkpoint(trainer.args.output_dir) fp32_model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)<!-- HTML_TAG_END --></pre></div> <p>If you’re using the <code>--load_best_model_at_end</code> class:<em>~transformers.TrainingArguments</em> argument (to track the best checkpoint), then you can finish the training by first saving the final model explicitly and then do the same as above:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> deepspeed.utils.zero_to_fp32 <span class="hljs-keyword">import</span> load_state_dict_from_zero_checkpoint checkpoint_dir = os.path.join(trainer.args.output_dir, <span class="hljs-string">&quot;checkpoint-final&quot;</span>) trainer.deepspeed.save_checkpoint(checkpoint_dir) fp32_model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Note, that once <code>load_state_dict_from_zero_checkpoint</code> was run, the <code>model</code> will no longer be useable in the DeepSpeed context of the same application. i.e. you will need to re-initialize the deepspeed engine, since <code>model.load_state_dict(state_dict)</code> will remove all the DeepSpeed magic from it. So do this only at the very end of the training.</p></div> <p>Of course, you don’t have to use class:<em>~transformers.Trainer</em> and you can adjust the examples above to your own trainer.</p> <p>If for some reason you want more refinement, you can also extract the fp32 <code>state_dict</code> of the weights and apply these yourself as is shown in the following example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> deepspeed.utils.zero_to_fp32 <span class="hljs-keyword">import</span> get_fp32_state_dict_from_zero_checkpoint state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) <span class="hljs-comment"># already on cpu</span> model = model.cpu() model.load_state_dict(state_dict)<!-- HTML_TAG_END --></pre></div> <p><strong>Offline FP32 Weights Recovery:</strong></p> <p>DeepSpeed creates a special conversion script <code>zero_to_fp32.py</code> which it places in the top-level of the checkpoint folder. Using this script you can extract the weights at any point. The script is standalone and you no longer need to have the configuration file or a <code>Trainer</code> to do the extraction.</p> <p>Let’s say your checkpoint folder looks like this:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->$ <span class="hljs-built_in">ls</span> -l output_dir/checkpoint-1/ -rw-rw-r-- 1 stas stas 1.4K Mar 27 20:42 config.json drwxrwxr-x 2 stas stas 4.0K Mar 25 19:52 global_step1/ -rw-rw-r-- 1 stas stas 12 Mar 27 13:16 latest -rw-rw-r-- 1 stas stas 827K Mar 27 20:42 optimizer.pt -rw-rw-r-- 1 stas stas 231M Mar 27 20:42 pytorch_model.bin -rw-rw-r-- 1 stas stas 623 Mar 27 20:42 scheduler.pt -rw-rw-r-- 1 stas stas 1.8K Mar 27 20:42 special_tokens_map.json -rw-rw-r-- 1 stas stas 774K Mar 27 20:42 spiece.model -rw-rw-r-- 1 stas stas 1.9K Mar 27 20:42 tokenizer_config.json -rw-rw-r-- 1 stas stas 339 Mar 27 20:42 trainer_state.json -rw-rw-r-- 1 stas stas 2.3K Mar 27 20:42 training_args.bin -rwxrw-r-- 1 stas stas 5.5K Mar 27 13:16 zero_to_fp32.py*<!-- HTML_TAG_END --></pre></div> <p>In this example there is just one DeepSpeed checkpoint sub-folder <em>global_step1</em>. Therefore to reconstruct the fp32 weights just run:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python zero_to_fp32.py . pytorch_model.bin<!-- HTML_TAG_END --></pre></div> <p>This is it. <code>pytorch_model.bin</code> will now contain the full fp32 model weights consolidated from multiple GPUs.</p> <p>The script will automatically be able to handle either a ZeRO-2 or ZeRO-3 checkpoint.</p> <p><code>python zero_to_fp32.py -h</code> will give you usage details.</p> <p>The script will auto-discover the deepspeed sub-folder using the contents of the file <code>latest</code>, which in the current example will contain <code>global_step1</code>.</p> <p>Note: currently the script requires 2x general RAM of the final fp32 model weights.</p> <h3 class="relative group"><a id="zero3-and-infinity-nuances" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#zero3-and-infinity-nuances"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ZeRO-3 and Infinity Nuances </span></h3> <p>ZeRO-3 is quite different from ZeRO-2 because of its param sharding feature.</p> <p>ZeRO-Infinity further extends ZeRO-3 to support NVMe memory and multiple other speed and scalability improvements.</p> <p>While all the efforts were made for things to just work without needing any special changes to your models, in certain circumstances you may find the following information to be needed.</p> <h4 class="relative group"><a id="constructing-massive-models" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#constructing-massive-models"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Constructing Massive Models </span></h4> <p>DeepSpeed/ZeRO-3 can handle models with Trillions of parameters which may not fit onto the existing RAM. In such cases, but also if you want the initialization to happen much faster, initialize the model using <em>deepspeed.zero.Init()</em> context manager (which is also a function decorator), like so:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5ForConditionalGeneration, T5Config <span class="hljs-keyword">import</span> deepspeed <span class="hljs-keyword">with</span> deepspeed.zero.Init(): config = T5Config.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>) model = T5ForConditionalGeneration(config)<!-- HTML_TAG_END --></pre></div> <p>As you can see this gives you a randomly initialized model.</p> <p>If you want to use a pretrained model, <code>model_class.from_pretrained</code> will activate this feature as long as <code>is_deepspeed_zero3_enabled()</code> returns <code>True</code>, which currently is setup by the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> object if the passed DeepSpeed configuration file contains ZeRO-3 config section. Thus you must create the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> object <strong>before</strong> calling <code>from_pretrained</code>. Here is an example of a possible sequence:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModel, Trainer, TrainingArguments training_args = TrainingArguments(..., deepspeed=ds_config) model = AutoModel.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>) trainer = Trainer(model=model, args=training_args, ...)<!-- HTML_TAG_END --></pre></div> <p>If you’re using the official example scripts and your command line arguments include <code>--deepspeed ds_config.json</code> with ZeRO-3 config enabled, then everything is already done for you, since this is how example scripts are written.</p> <p>Note: If the fp16 weights of the model can’t fit onto the memory of a single GPU this feature must be used.</p> <p>For full details on this method and other related features please refer to <a href="https://deepspeed.readthedocs.io/en/latest/zero3.html#constructing-massive-models" rel="nofollow">Constructing Massive Models</a>.</p> <p>Also when loading fp16-pretrained models, you will want to tell <code>from_pretrained</code> to use <code>torch_dtype=torch.float16</code>. For details, please, see <a href="#from_pretrained-torch-dtype">from_pretrained-torch-dtype</a>.</p> <h4 class="relative group"><a id="gathering-parameters" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#gathering-parameters"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Gathering Parameters </span></h4> <p>Under ZeRO-3 on multiple GPUs no single GPU has all the parameters unless it’s the parameters for the currently executing layer. So if you need to access all parameters from all layers at once there is a specific method to do it. Most likely you won’t need it, but if you do please refer to <a href="https://deepspeed.readthedocs.io/en/latest/zero3.html#manual-parameter-coordination" rel="nofollow">Gathering Parameters</a></p> <p>We do however use it internally in several places, one such example is when loading pretrained model weights in <code>from_pretrained</code>. We load one layer at a time and immediately partition it to all participating GPUs, as for very large models it won’t be possible to load it on one GPU and then spread it out to multiple GPUs, due to memory limitations.</p> <p>Also under ZeRO-3, if you write your own code and run into a model parameter weight that looks like:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->tensor([<span class="hljs-number">1.0</span>], device=<span class="hljs-string">&quot;cuda:0&quot;</span>, dtype=torch.float16, requires_grad=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <p>stress on <code>tensor([1.])</code>, or if you get an error where it says the parameter is of size <code>1</code>, instead of some much larger multi-dimensional shape, this means that the parameter is partitioned and what you see is a ZeRO-3 placeholder.</p> <a id="deepspeed-zero-inference"></a> <h3 class="relative group"><a id="zero-inference" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#zero-inference"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ZeRO Inference </span></h3> <p>ZeRO Inference uses the same config as ZeRO-3 Training. You just don’t need the optimizer and scheduler sections. In fact you can leave these in the config file if you want to share the same one with the training. They will just be ignored.</p> <p>Otherwise you just need to pass the usual <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> arguments. For example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->deepspeed --num_gpus=2 your_program.py &lt;normal cl args&gt; --do_eval --deepspeed ds_config.json<!-- HTML_TAG_END --></pre></div> <p>The only important thing is that you need to use a ZeRO-3 configuration, since ZeRO-2 provides no benefit whatsoever for the inference as only ZeRO-3 performs sharding of parameters, whereas ZeRO-1 shards gradients and optimizer states.</p> <p>Here is an example of running <code>run_translation.py</code> under DeepSpeed deploying all available GPUs:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->deepspeed examples/pytorch/translation/run_translation.py \ --deepspeed tests/deepspeed/ds_config_zero3.json \ --model_name_or_path t5-small --output_dir output_dir \ --do_eval --max_eval_samples 50 --warmup_steps 50 \ --max_source_length 128 --val_max_target_length 128 \ --overwrite_output_dir --per_device_eval_batch_size 4 \ --predict_with_generate --dataset_config <span class="hljs-string">&quot;ro-en&quot;</span> --fp16 \ --source_lang en --target_lang ro --dataset_name wmt16 \ --source_prefix <span class="hljs-string">&quot;translate English to Romanian: &quot;</span><!-- HTML_TAG_END --></pre></div> <p>Since for inference there is no need for additional large memory used by the optimizer states and the gradients you should be able to fit much larger batches and/or sequence length onto the same hardware.</p> <p>Additionally DeepSpeed is currently developing a related product called Deepspeed-Inference which has no relationship to the ZeRO technology, but instead uses tensor parallelism to scale models that can’t fit onto a single GPU. This is a work in progress and we will provide the integration once that product is complete.</p> <h3 class="relative group"><a id="memory-requirements" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#memory-requirements"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Memory Requirements </span></h3> <p>Since Deepspeed ZeRO can offload memory to CPU (and NVMe) the framework provides utils that allow one to tell how much CPU and GPU memory will be needed depending on the number of GPUs being used.</p> <p>Let’s estimate how much memory is needed to finetune “bigscience/T0_3B” on a single GPU:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->$ python -c <span class="hljs-string">&#x27;from transformers import AutoModel; \ from deepspeed.runtime.zero.stage3 import estimate_zero3_model_states_mem_needs_all_live; \ model = AutoModel.from_pretrained(&quot;bigscience/T0_3B&quot;); \ estimate_zero3_model_states_mem_needs_all_live(model, num_gpus_per_node=1, num_nodes=1)&#x27;</span> [...] Estimated memory needed <span class="hljs-keyword">for</span> params, optim states and gradients <span class="hljs-keyword">for</span> a: HW: Setup with 1 node, 1 GPU per node. SW: Model with 2783M total params, 65M largest layer params. per CPU | per GPU | Options 70.00GB | 0.25GB | offload_param=cpu , offload_optimizer=cpu , zero_init=1 70.00GB | 0.25GB | offload_param=cpu , offload_optimizer=cpu , zero_init=0 62.23GB | 5.43GB | offload_param=none, offload_optimizer=cpu , zero_init=1 62.23GB | 5.43GB | offload_param=none, offload_optimizer=cpu , zero_init=0 0.37GB | 46.91GB | offload_param=none, offload_optimizer=none, zero_init=1 15.56GB | 46.91GB | offload_param=none, offload_optimizer=none, zero_init=0<!-- HTML_TAG_END --></pre></div> <p>So you can fit it on a single 80GB GPU and no CPU offload, or a tiny 8GB GPU but then need ~60GB of CPU memory. (Remember this is just the memory for params, optimizer states and gradients - you will need a bit more memory for cuda kernels, activations and temps.)</p> <p>Then it’s a tradeoff of cost vs speed. It’ll be cheaper to buy/rent a smaller GPU (or less GPUs since you can use multiple GPUs with Deepspeed ZeRO. But then it’ll be slower, so even if you don’t care about how fast something will be done, the slowdown has a direct impact on the duration of using the GPU and thus bigger cost. So experiment and compare which works the best.</p> <p>If you have enough GPU memory make sure to disable the CPU/NVMe offload as it’ll make everything faster.</p> <p>For example, let’s repeat the same for 2 GPUs:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->$ python -c <span class="hljs-string">&#x27;from transformers import AutoModel; \ from deepspeed.runtime.zero.stage3 import estimate_zero3_model_states_mem_needs_all_live; \ model = AutoModel.from_pretrained(&quot;bigscience/T0_3B&quot;); \ estimate_zero3_model_states_mem_needs_all_live(model, num_gpus_per_node=2, num_nodes=1)&#x27;</span> [...] Estimated memory needed <span class="hljs-keyword">for</span> params, optim states and gradients <span class="hljs-keyword">for</span> a: HW: Setup with 1 node, 2 GPUs per node. SW: Model with 2783M total params, 65M largest layer params. per CPU | per GPU | Options 70.00GB | 0.25GB | offload_param=cpu , offload_optimizer=cpu , zero_init=1 70.00GB | 0.25GB | offload_param=cpu , offload_optimizer=cpu , zero_init=0 62.23GB | 2.84GB | offload_param=none, offload_optimizer=cpu , zero_init=1 62.23GB | 2.84GB | offload_param=none, offload_optimizer=cpu , zero_init=0 0.74GB | 23.58GB | offload_param=none, offload_optimizer=none, zero_init=1 31.11GB | 23.58GB | offload_param=none, offload_optimizer=none, zero_init=0 <!-- HTML_TAG_END --></pre></div> <p>So here you’d want 2x 32GB GPUs or higher without offloading to CPU.</p> <p>For full information please see <a href="https://deepspeed.readthedocs.io/en/latest/memory.html" rel="nofollow">memory estimators</a>.</p> <h3 class="relative group"><a id="filing-issues" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#filing-issues"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Filing Issues </span></h3> <p>Here is how to file an issue so that we could quickly get to the bottom of the issue and help you to unblock your work.</p> <p>In your report please always include:</p> <ol><li><p>the full Deepspeed config file in the report</p></li> <li><p>either the command line arguments if you were using the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> or <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> arguments if you were scripting the Trainer setup yourself. Please do not dump the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> as it has dozens of entries that are irrelevant.</p></li> <li><p>Output of:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -c <span class="hljs-string">&#x27;import torch; print(f&quot;torch: {torch.__version__}&quot;)&#x27;</span> python -c <span class="hljs-string">&#x27;import transformers; print(f&quot;transformers: {transformers.__version__}&quot;)&#x27;</span> python -c <span class="hljs-string">&#x27;import deepspeed; print(f&quot;deepspeed: {deepspeed.__version__}&quot;)&#x27;</span><!-- HTML_TAG_END --></pre></div></li> <li><p>If possible include a link to a Google Colab notebook that we can reproduce the problem with. You can use this <a href="https://github.com/stas00/porting/blob/master/transformers/deepspeed/DeepSpeed_on_colab_CLI.ipynb" rel="nofollow">notebook</a> as a starting point.</p></li> <li><p>Unless it’s impossible please always use a standard dataset that we can use and not something custom.</p></li> <li><p>If possible try to use one of the existing <a href="https://github.com/huggingface/transformers/tree/main/examples/pytorch" rel="nofollow">examples</a> to reproduce the problem with.</p></li></ol> <p>Things to consider:</p> <ul><li><p>Deepspeed is often not the cause of the problem.</p> <p>Some of the filed issues proved to be Deepspeed-unrelated. That is once Deepspeed was removed from the setup, the problem was still there.</p> <p>Therefore, if it’s not absolutely obvious it’s a DeepSpeed-related problem, as in you can see that there is an exception and you can see that DeepSpeed modules are involved, first re-test your setup without DeepSpeed in it. And only if the problem persists then do mentioned Deepspeed and supply all the required details.</p></li> <li><p>If it’s clear to you that the issue is in the DeepSpeed core and not the integration part, please file the Issue directly with <a href="https://github.com/microsoft/DeepSpeed/" rel="nofollow">Deepspeed</a>. If you aren’t sure, please do not worry, either Issue tracker will do, we will figure it out once you posted it and redirect you to another Issue tracker if need be.</p></li></ul> <h3 class="relative group"><a id="troubleshooting" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#troubleshooting"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Troubleshooting </span></h3> <h4 class="relative group"><a id="the-deepspeed-process-gets-killed-at-startup-without-a-traceback" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#the-deepspeed-process-gets-killed-at-startup-without-a-traceback"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>the <code>deepspeed</code> process gets killed at startup without a traceback </span></h4> <p>If the <code>deepspeed</code> process gets killed at launch time without a traceback, that usually means that the program tried to allocate more CPU memory than your system has or your process is allowed to allocate and the OS kernel killed that process. This is because your configuration file most likely has either <code>offload_optimizer</code> or <code>offload_param</code> or both configured to offload to <code>cpu</code>. If you have NVMe, experiment with offloading to NVMe if you’re running under ZeRO-3. Here is how you can <a href="https://deepspeed.readthedocs.io/en/latest/memory.html" rel="nofollow">estimate how much memory is needed for a specific model</a>.</p> <h4 class="relative group"><a id="training-andor-evalpredict-loss-is-nan" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#training-andor-evalpredict-loss-is-nan"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>training and/or eval/predict loss is <code>NaN</code></span></h4> <p>This often happens when one takes a model pre-trained in bf16 mixed precision mode and tries to use it under fp16 (with or without mixed precision). Most models trained on TPU and often the ones released by Google are in this category (e.g. almost all t5-based models). Here the solution is to either use fp32 or bf16 if your hardware supports it (TPU, Ampere GPUs or newer).</p> <p>The other problem may have to do with using fp16. When you configure this section:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;fp16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale_window&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;initial_scale_power&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">16</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;hysteresis&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;min_loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>and you see in your log that Deepspeed reports <code>OVERFLOW!</code> as follows:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-number">0</span>%| | <span class="hljs-number">0</span>/<span class="hljs-number">189</span> [<span class="hljs-number">00</span>:<span class="hljs-number">00</span>&lt;?, ?it/s] [deepscale] OVERFLOW! <span class="hljs-built_in">Rank</span> <span class="hljs-number">0</span> Skipping <span class="hljs-built_in">step</span>. Attempted loss scale: <span class="hljs-number">262144</span>, reducing <span class="hljs-keyword">to</span> <span class="hljs-number">262144</span> <span class="hljs-number">1</span>%|▌ | <span class="hljs-number">1</span>/<span class="hljs-number">189</span> [<span class="hljs-number">00</span>:<span class="hljs-number">00</span>&lt;<span class="hljs-number">01</span>:<span class="hljs-number">26</span>, <span class="hljs-number">2.17</span>it/s] [deepscale] OVERFLOW! <span class="hljs-built_in">Rank</span> <span class="hljs-number">0</span> Skipping <span class="hljs-built_in">step</span>. Attempted loss scale: <span class="hljs-number">262144</span>, reducing <span class="hljs-keyword">to</span> <span class="hljs-number">131072.0</span> <span class="hljs-number">1</span>%|█▏ [...] [deepscale] OVERFLOW! <span class="hljs-built_in">Rank</span> <span class="hljs-number">0</span> Skipping <span class="hljs-built_in">step</span>. Attempted loss scale: <span class="hljs-number">1</span>, reducing <span class="hljs-keyword">to</span> <span class="hljs-number">1</span> <span class="hljs-number">14</span>%|████████████████▌ | <span class="hljs-number">27</span>/<span class="hljs-number">189</span> [<span class="hljs-number">00</span>:<span class="hljs-number">14</span>&lt;<span class="hljs-number">01</span>:<span class="hljs-number">13</span>, <span class="hljs-number">2.21</span>it/s] [deepscale] OVERFLOW! <span class="hljs-built_in">Rank</span> <span class="hljs-number">0</span> Skipping <span class="hljs-built_in">step</span>. Attempted loss scale: <span class="hljs-number">1</span>, reducing <span class="hljs-keyword">to</span> <span class="hljs-number">1</span> <span class="hljs-number">15</span>%|█████████████████▏ | <span class="hljs-number">28</span>/<span class="hljs-number">189</span> [<span class="hljs-number">00</span>:<span class="hljs-number">14</span>&lt;<span class="hljs-number">01</span>:<span class="hljs-number">13</span>, <span class="hljs-number">2.18</span>it/s] [deepscale] OVERFLOW! <span class="hljs-built_in">Rank</span> <span class="hljs-number">0</span> Skipping <span class="hljs-built_in">step</span>. Attempted loss scale: <span class="hljs-number">1</span>, reducing <span class="hljs-keyword">to</span> <span class="hljs-number">1</span> <span class="hljs-number">15</span>%|█████████████████▊ | <span class="hljs-number">29</span>/<span class="hljs-number">189</span> [<span class="hljs-number">00</span>:<span class="hljs-number">15</span>&lt;<span class="hljs-number">01</span>:<span class="hljs-number">13</span>, <span class="hljs-number">2.18</span>it/s] [deepscale] OVERFLOW! <span class="hljs-built_in">Rank</span> <span class="hljs-number">0</span> Skipping <span class="hljs-built_in">step</span>. Attempted loss scale: <span class="hljs-number">1</span>, reducing <span class="hljs-keyword">to</span> <span class="hljs-number">1</span> [...]<!-- HTML_TAG_END --></pre></div> <p>that means that the Deepspeed loss scaler can’t figure out a scaling co-efficient that overcomes loss overflow.</p> <p>(the log was massaged to be more readable here.)</p> <p>In this case you usually need to raise the value of <code>initial_scale_power</code>. Setting it to <code>&quot;initial_scale_power&quot;: 32</code> will typically resolve the problem.</p> <h3 class="relative group"><a id="notes" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#notes"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Notes </span></h3> <ul><li>DeepSpeed works with the PyTorch <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> but not TF <code>TFTrainer</code>.</li> <li>While DeepSpeed has a pip installable PyPI package, it is highly recommended that it gets installed from <a href="https://github.com/microsoft/deepspeed#installation" rel="nofollow">source</a> to best match your hardware and also if you need to enable certain features, like 1-bit Adam, which aren’t available in the pypi distribution.</li> <li>You don’t have to use the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> to use DeepSpeed with 🤗 Transformers - you can use any model with your own trainer, and you will have to adapt the latter according to <a href="https://www.deepspeed.ai/getting-started/#writing-deepspeed-models" rel="nofollow">the DeepSpeed integration instructions</a>.</li></ul> <h2 class="relative group"><a id="nontrainer-deepspeed-integration" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#nontrainer-deepspeed-integration"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Non-Trainer Deepspeed Integration </span></h2> <p>The <a href="/docs/transformers/pr_19429/en/main_classes/deepspeed#transformers.deepspeed.HfDeepSpeedConfig">HfDeepSpeedConfig</a> is used to integrate Deepspeed into the 🤗 Transformers core functionality, when <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> is not used. The only thing that it does is handling Deepspeed ZeRO-3 param gathering and automatically splitting the model onto multiple gpus during <code>from_pretrained</code> call. Everything else you have to do by yourself.</p> <p>When using <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> everything is automatically taken care of.</p> <p>When not using <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>, to efficiently deploy DeepSpeed ZeRO-3, you must instantiate the <a href="/docs/transformers/pr_19429/en/main_classes/deepspeed#transformers.deepspeed.HfDeepSpeedConfig">HfDeepSpeedConfig</a> object before instantiating the model and keep that object alive.</p> <p>If you’re using Deepspeed ZeRO-1 or ZeRO-2 you don’t need to use <code>HfDeepSpeedConfig</code> at all.</p> <p>For example for a pretrained model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers.deepspeed <span class="hljs-keyword">import</span> HfDeepSpeedConfig <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModel <span class="hljs-keyword">import</span> deepspeed ds_config = {...} <span class="hljs-comment"># deepspeed config object or path to the file</span> <span class="hljs-comment"># must run before instantiating the model to detect zero 3</span> dschf = HfDeepSpeedConfig(ds_config) <span class="hljs-comment"># keep this object alive</span> model = AutoModel.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) engine = deepspeed.initialize(model=model, config_params=ds_config, ...)<!-- HTML_TAG_END --></pre></div> <p>or for non-pretrained model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers.deepspeed <span class="hljs-keyword">import</span> HfDeepSpeedConfig <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModel, AutoConfig <span class="hljs-keyword">import</span> deepspeed ds_config = {...} <span class="hljs-comment"># deepspeed config object or path to the file</span> <span class="hljs-comment"># must run before instantiating the model to detect zero 3</span> dschf = HfDeepSpeedConfig(ds_config) <span class="hljs-comment"># keep this object alive</span> config = AutoConfig.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) model = AutoModel.from_config(config) engine = deepspeed.initialize(model=model, config_params=ds_config, ...)<!-- HTML_TAG_END --></pre></div> <p>Please note that if you’re not using the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> integration, you’re completely on your own. Basically follow the documentation on the <a href="https://www.deepspeed.ai/" rel="nofollow">Deepspeed</a> website. Also you have to configure explicitly the config file - you can’t use <code>&quot;auto&quot;</code> values and you will have to put real values instead.</p> <h2 class="relative group"><a id="transformers.deepspeed.HfDeepSpeedConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.deepspeed.HfDeepSpeedConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>HfDeepSpeedConfig </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.deepspeed.HfDeepSpeedConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.deepspeed.</span><span class="font-semibold">HfDeepSpeedConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.deepspeed.HfDeepSpeedConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.deepspeed.HfDeepSpeedConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/deepspeed.py#L45" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config_file_or_dict<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.deepspeed.HfDeepSpeedConfig.config_file_or_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.deepspeed.HfDeepSpeedConfig.config_file_or_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config_file_or_dict</strong> (<code>Union[str, Dict]</code>) &#x2014; path to DeepSpeed config file or dict.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This object contains a DeepSpeed configuration dictionary and can be quickly queried for things like zero stage.</p> <p>A <code>weakref</code> of this object is stored in the module’s globals to be able to access the config from areas where things like the Trainer object is not available (e.g. <code>from_pretrained</code> and <code>_get_resized_embeddings</code>). Therefore it’s important that this object remains alive while the program is still running.</p> <p><a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> uses the <code>HfTrainerDeepSpeedConfig</code> subclass instead. That subclass has logic to sync the configuration with values of <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> by replacing special placeholder values: <code>&quot;auto&quot;</code>. Without this special logic the DeepSpeed configuration is not modified in any way.</p></div> <h3 class="relative group"><a id="custom-deepspeed-zero-inference" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#custom-deepspeed-zero-inference"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Custom DeepSpeed ZeRO Inference </span></h3> <p>Here is an example of how one could do DeepSpeed ZeRO Inference without using <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> when one can’t fit a model onto a single GPU. The solution includes using additional GPUs or/and offloading GPU memory to CPU memory.</p> <p>The important nuance to understand here is that the way ZeRO is designed you can process different inputs on different GPUs in parallel.</p> <p>The example has copious notes and is self-documenting.</p> <p>Make sure to:</p> <ol><li>disable CPU offload if you have enough GPU memory (since it slows things down)</li> <li>enable bf16 if you own an Ampere or a newer GPU to make things faster. If you don’t have that hardware you may enable fp16 as long as you don’t use any model that was pre-trained in bf16 mixed precision (such as most t5 models). These usually overflow in fp16 and you will see garbage as output.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment">#!/usr/bin/env python</span> <span class="hljs-comment"># This script demonstrates how to use Deepspeed ZeRO in an inference mode when one can&#x27;t fit a model</span> <span class="hljs-comment"># into a single GPU</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># 1. Use 1 GPU with CPU offload</span> <span class="hljs-comment"># 2. Or use multiple GPUs instead</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># First you need to install deepspeed: pip install deepspeed</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># Here we use a 3B &quot;bigscience/T0_3B&quot; model which needs about 15GB GPU RAM - so 1 largish or 2</span> <span class="hljs-comment"># small GPUs can handle it. or 1 small GPU and a lot of CPU memory.</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># To use a larger model like &quot;bigscience/T0&quot; which needs about 50GB, unless you have an 80GB GPU -</span> <span class="hljs-comment"># you will need 2-4 gpus. And then you can adapt the script to handle more gpus if you want to</span> <span class="hljs-comment"># process multiple inputs at once.</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># The provided deepspeed config also activates CPU memory offloading, so chances are that if you</span> <span class="hljs-comment"># have a lot of available CPU memory and you don&#x27;t mind a slowdown you should be able to load a</span> <span class="hljs-comment"># model that doesn&#x27;t normally fit into a single GPU. If you have enough GPU memory the program will</span> <span class="hljs-comment"># run faster if you don&#x27;t want offload to CPU - so disable that section then.</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># To deploy on 1 gpu:</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># deepspeed --num_gpus 1 t0.py</span> <span class="hljs-comment"># or:</span> <span class="hljs-comment"># python -m torch.distributed.run --nproc_per_node=1 t0.py</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># To deploy on 2 gpus:</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># deepspeed --num_gpus 2 t0.py</span> <span class="hljs-comment"># or:</span> <span class="hljs-comment"># python -m torch.distributed.run --nproc_per_node=2 t0.py</span> <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoConfig, AutoModelForSeq2SeqLM <span class="hljs-keyword">from</span> transformers.deepspeed <span class="hljs-keyword">import</span> HfDeepSpeedConfig <span class="hljs-keyword">import</span> deepspeed <span class="hljs-keyword">import</span> os <span class="hljs-keyword">import</span> torch os.environ[<span class="hljs-string">&quot;TOKENIZERS_PARALLELISM&quot;</span>] = <span class="hljs-string">&quot;false&quot;</span> <span class="hljs-comment"># To avoid warnings about parallelism in tokenizers</span> <span class="hljs-comment"># distributed setup</span> local_rank = <span class="hljs-built_in">int</span>(os.getenv(<span class="hljs-string">&quot;LOCAL_RANK&quot;</span>, <span class="hljs-string">&quot;0&quot;</span>)) world_size = <span class="hljs-built_in">int</span>(os.getenv(<span class="hljs-string">&quot;WORLD_SIZE&quot;</span>, <span class="hljs-string">&quot;1&quot;</span>)) torch.cuda.set_device(local_rank) deepspeed.init_distributed() model_name = <span class="hljs-string">&quot;bigscience/T0_3B&quot;</span> config = AutoConfig.from_pretrained(model_name) model_hidden_size = config.d_model <span class="hljs-comment"># batch size has to be divisible by world_size, but can be bigger than world_size</span> train_batch_size = <span class="hljs-number">1</span> * world_size <span class="hljs-comment"># ds_config notes</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># - enable bf16 if you use Ampere or higher GPU - this will run in mixed precision and will be</span> <span class="hljs-comment"># faster.</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># - for older GPUs you can enable fp16, but it&#x27;ll only work for non-bf16 pretrained models - e.g.</span> <span class="hljs-comment"># all official t5 models are bf16-pretrained</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># - set offload_param.device to &quot;none&quot; or completely remove the `offload_param` section if you don&#x27;t</span> <span class="hljs-comment"># - want CPU offload</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># - if using `offload_param` you can manually finetune stage3_param_persistence_threshold to control</span> <span class="hljs-comment"># - which params should remain on gpus - the larger the value the smaller the offload size</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># For indepth info on Deepspeed config see</span> <span class="hljs-comment"># https://huggingface.co/docs/transformers/main/main_classes/deepspeed</span> <span class="hljs-comment"># keeping the same format as json for consistency, except it uses lower case for true/false</span> <span class="hljs-comment"># fmt: off</span> ds_config = { <span class="hljs-string">&quot;fp16&quot;</span>: { <span class="hljs-string">&quot;enabled&quot;</span>: <span class="hljs-literal">False</span> }, <span class="hljs-string">&quot;bf16&quot;</span>: { <span class="hljs-string">&quot;enabled&quot;</span>: <span class="hljs-literal">False</span> }, <span class="hljs-string">&quot;zero_optimization&quot;</span>: { <span class="hljs-string">&quot;stage&quot;</span>: <span class="hljs-number">3</span>, <span class="hljs-string">&quot;offload_param&quot;</span>: { <span class="hljs-string">&quot;device&quot;</span>: <span class="hljs-string">&quot;cpu&quot;</span>, <span class="hljs-string">&quot;pin_memory&quot;</span>: <span class="hljs-literal">True</span> }, <span class="hljs-string">&quot;overlap_comm&quot;</span>: <span class="hljs-literal">True</span>, <span class="hljs-string">&quot;contiguous_gradients&quot;</span>: <span class="hljs-literal">True</span>, <span class="hljs-string">&quot;reduce_bucket_size&quot;</span>: model_hidden_size * model_hidden_size, <span class="hljs-string">&quot;stage3_prefetch_bucket_size&quot;</span>: <span class="hljs-number">0.9</span> * model_hidden_size * model_hidden_size, <span class="hljs-string">&quot;stage3_param_persistence_threshold&quot;</span>: <span class="hljs-number">10</span> * model_hidden_size }, <span class="hljs-string">&quot;steps_per_print&quot;</span>: <span class="hljs-number">2000</span>, <span class="hljs-string">&quot;train_batch_size&quot;</span>: train_batch_size, <span class="hljs-string">&quot;train_micro_batch_size_per_gpu&quot;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&quot;wall_clock_breakdown&quot;</span>: <span class="hljs-literal">False</span> } <span class="hljs-comment"># fmt: on</span> <span class="hljs-comment"># next line instructs transformers to partition the model directly over multiple gpus using</span> <span class="hljs-comment"># deepspeed.zero.Init when model&#x27;s `from_pretrained` method is called.</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># **it has to be run before loading the model AutoModelForSeq2SeqLM.from_pretrained(model_name)**</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># otherwise the model will first be loaded normally and only partitioned at forward time which is</span> <span class="hljs-comment"># less efficient and when there is little CPU RAM may fail</span> dschf = HfDeepSpeedConfig(ds_config) <span class="hljs-comment"># keep this object alive</span> <span class="hljs-comment"># now a model can be loaded.</span> model = AutoModelForSeq2SeqLM.from_pretrained(model_name) <span class="hljs-comment"># initialise Deepspeed ZeRO and store only the engine object</span> ds_engine = deepspeed.initialize(model=model, config_params=ds_config)[<span class="hljs-number">0</span>] ds_engine.module.<span class="hljs-built_in">eval</span>() <span class="hljs-comment"># inference</span> <span class="hljs-comment"># Deepspeed ZeRO can process unrelated inputs on each GPU. So for 2 gpus you process 2 inputs at once.</span> <span class="hljs-comment"># If you use more GPUs adjust for more.</span> <span class="hljs-comment"># And of course if you have just one input to process you then need to pass the same string to both gpus</span> <span class="hljs-comment"># If you use only one GPU, then you will have only rank 0.</span> rank = torch.distributed.get_rank() <span class="hljs-keyword">if</span> rank == <span class="hljs-number">0</span>: text_in = <span class="hljs-string">&quot;Is this review positive or negative? Review: this is the best cast iron skillet you will ever buy&quot;</span> <span class="hljs-keyword">elif</span> rank == <span class="hljs-number">1</span>: text_in = <span class="hljs-string">&quot;Is this review positive or negative? Review: this is the worst restaurant ever&quot;</span> tokenizer = AutoTokenizer.from_pretrained(model_name) inputs = tokenizer.encode(text_in, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).to(device=local_rank) <span class="hljs-keyword">with</span> torch.no_grad(): outputs = ds_engine.module.generate(inputs, synced_gpus=<span class="hljs-literal">True</span>) text_out = tokenizer.decode(outputs[<span class="hljs-number">0</span>], skip_special_tokens=<span class="hljs-literal">True</span>) <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;rank<span class="hljs-subst">{rank}</span>:\n in=<span class="hljs-subst">{text_in}</span>\n out=<span class="hljs-subst">{text_out}</span>&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Let’s save it as <code>t0.py</code> and run it:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->$ deepspeed --num_gpus <span class="hljs-number">2</span> t0.py rank0: <span class="hljs-keyword">in</span>=Is <span class="hljs-keyword">this</span> review positive or negative? Review: <span class="hljs-keyword">this</span> <span class="hljs-keyword">is</span> the best cast iron skillet you will ever buy <span class="hljs-keyword">out</span>=Positive rank1: <span class="hljs-keyword">in</span>=Is <span class="hljs-keyword">this</span> review positive or negative? Review: <span class="hljs-keyword">this</span> <span class="hljs-keyword">is</span> the worst restaurant ever <span class="hljs-keyword">out</span>=negative<!-- HTML_TAG_END --></pre></div> <p>This was a very basic example and you will want to adapt it to your needs.</p> <h2 class="relative group"><a id="main-deepspeed-resources" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#main-deepspeed-resources"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Main DeepSpeed Resources </span></h2> <ul><li><a href="https://github.com/microsoft/deepspeed" rel="nofollow">Project’s github</a></li> <li><a href="https://www.deepspeed.ai/getting-started/" rel="nofollow">Usage docs</a></li> <li><a href="https://deepspeed.readthedocs.io/en/latest/index.html" rel="nofollow">API docs</a></li> <li><a href="https://www.microsoft.com/en-us/research/search/?q=deepspeed" rel="nofollow">Blog posts</a></li></ul> <p>Papers:</p> <ul><li><a href="https://arxiv.org/abs/1910.02054" rel="nofollow">ZeRO: Memory Optimizations Toward Training Trillion Parameter Models</a></li> <li><a href="https://arxiv.org/abs/2101.06840" rel="nofollow">ZeRO-Offload: Democratizing Billion-Scale Model Training</a></li> <li><a href="https://arxiv.org/abs/2104.07857" rel="nofollow">ZeRO-Infinity: Breaking the GPU Memory Wall for Extreme Scale Deep Learning</a></li></ul> <p>Finally, please, remember that, HuggingFace <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> only integrates DeepSpeed, therefore if you have any problems or questions with regards to DeepSpeed usage, please, file an issue with <a href="https://github.com/microsoft/DeepSpeed/issues" rel="nofollow">DeepSpeed GitHub</a>.</p> <script type="module" data-hydrate="1qg5yeg"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="1qg5yeg"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/main_classes/deepspeed.mdx-hf-doc-builder.js") ], params: {} } }); </script>
55
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/main_classes/output.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;model-outputs&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.utils.ModelOutput&quot;,&quot;title&quot;:&quot;ModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.BaseModelOutput&quot;,&quot;title&quot;:&quot;BaseModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.BaseModelOutputWithPooling&quot;,&quot;title&quot;:&quot;BaseModelOutputWithPooling&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.BaseModelOutputWithCrossAttentions&quot;,&quot;title&quot;:&quot;BaseModelOutputWithCrossAttentions&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions&quot;,&quot;title&quot;:&quot;BaseModelOutputWithPoolingAndCrossAttentions&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.BaseModelOutputWithPast&quot;,&quot;title&quot;:&quot;BaseModelOutputWithPast&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions&quot;,&quot;title&quot;:&quot;BaseModelOutputWithPastAndCrossAttentions&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.Seq2SeqModelOutput&quot;,&quot;title&quot;:&quot;Seq2SeqModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.CausalLMOutput&quot;,&quot;title&quot;:&quot;CausalLMOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.CausalLMOutputWithCrossAttentions&quot;,&quot;title&quot;:&quot;CausalLMOutputWithCrossAttentions&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.CausalLMOutputWithPast&quot;,&quot;title&quot;:&quot;CausalLMOutputWithPast&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.MaskedLMOutput&quot;,&quot;title&quot;:&quot;MaskedLMOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.Seq2SeqLMOutput&quot;,&quot;title&quot;:&quot;Seq2SeqLMOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.NextSentencePredictorOutput&quot;,&quot;title&quot;:&quot;NextSentencePredictorOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.SequenceClassifierOutput&quot;,&quot;title&quot;:&quot;SequenceClassifierOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput&quot;,&quot;title&quot;:&quot;Seq2SeqSequenceClassifierOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.MultipleChoiceModelOutput&quot;,&quot;title&quot;:&quot;MultipleChoiceModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.TokenClassifierOutput&quot;,&quot;title&quot;:&quot;TokenClassifierOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.QuestionAnsweringModelOutput&quot;,&quot;title&quot;:&quot;QuestionAnsweringModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput&quot;,&quot;title&quot;:&quot;Seq2SeqQuestionAnsweringModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.SemanticSegmenterOutput&quot;,&quot;title&quot;:&quot;SemanticSegmenterOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.ImageClassifierOutput&quot;,&quot;title&quot;:&quot;ImageClassifierOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.ImageClassifierOutputWithNoAttention&quot;,&quot;title&quot;:&quot;ImageClassifierOutputWithNoAttention&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.DepthEstimatorOutput&quot;,&quot;title&quot;:&quot;DepthEstimatorOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.Wav2Vec2BaseModelOutput&quot;,&quot;title&quot;:&quot;Wav2Vec2BaseModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.XVectorOutput&quot;,&quot;title&quot;:&quot;XVectorOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFBaseModelOutput&quot;,&quot;title&quot;:&quot;TFBaseModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling&quot;,&quot;title&quot;:&quot;TFBaseModelOutputWithPooling&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions&quot;,&quot;title&quot;:&quot;TFBaseModelOutputWithPoolingAndCrossAttentions&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFBaseModelOutputWithPast&quot;,&quot;title&quot;:&quot;TFBaseModelOutputWithPast&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions&quot;,&quot;title&quot;:&quot;TFBaseModelOutputWithPastAndCrossAttentions&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFSeq2SeqModelOutput&quot;,&quot;title&quot;:&quot;TFSeq2SeqModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFCausalLMOutput&quot;,&quot;title&quot;:&quot;TFCausalLMOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions&quot;,&quot;title&quot;:&quot;TFCausalLMOutputWithCrossAttentions&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFCausalLMOutputWithPast&quot;,&quot;title&quot;:&quot;TFCausalLMOutputWithPast&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFMaskedLMOutput&quot;,&quot;title&quot;:&quot;TFMaskedLMOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFSeq2SeqLMOutput&quot;,&quot;title&quot;:&quot;TFSeq2SeqLMOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFNextSentencePredictorOutput&quot;,&quot;title&quot;:&quot;TFNextSentencePredictorOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFSequenceClassifierOutput&quot;,&quot;title&quot;:&quot;TFSequenceClassifierOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput&quot;,&quot;title&quot;:&quot;TFSeq2SeqSequenceClassifierOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput&quot;,&quot;title&quot;:&quot;TFMultipleChoiceModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFTokenClassifierOutput&quot;,&quot;title&quot;:&quot;TFTokenClassifierOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput&quot;,&quot;title&quot;:&quot;TFQuestionAnsweringModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput&quot;,&quot;title&quot;:&quot;TFSeq2SeqQuestionAnsweringModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_flax_outputs.FlaxBaseModelOutput&quot;,&quot;title&quot;:&quot;FlaxBaseModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast&quot;,&quot;title&quot;:&quot;FlaxBaseModelOutputWithPast&quot;},{&quot;local&quot;:&quot;transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling&quot;,&quot;title&quot;:&quot;FlaxBaseModelOutputWithPooling&quot;},{&quot;local&quot;:&quot;transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions&quot;,&quot;title&quot;:&quot;FlaxBaseModelOutputWithPastAndCrossAttentions&quot;},{&quot;local&quot;:&quot;transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput&quot;,&quot;title&quot;:&quot;FlaxSeq2SeqModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions&quot;,&quot;title&quot;:&quot;FlaxCausalLMOutputWithCrossAttentions&quot;},{&quot;local&quot;:&quot;transformers.modeling_flax_outputs.FlaxMaskedLMOutput&quot;,&quot;title&quot;:&quot;FlaxMaskedLMOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput&quot;,&quot;title&quot;:&quot;FlaxSeq2SeqLMOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput&quot;,&quot;title&quot;:&quot;FlaxNextSentencePredictorOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput&quot;,&quot;title&quot;:&quot;FlaxSequenceClassifierOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput&quot;,&quot;title&quot;:&quot;FlaxSeq2SeqSequenceClassifierOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput&quot;,&quot;title&quot;:&quot;FlaxMultipleChoiceModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_flax_outputs.FlaxTokenClassifierOutput&quot;,&quot;title&quot;:&quot;FlaxTokenClassifierOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput&quot;,&quot;title&quot;:&quot;FlaxQuestionAnsweringModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput&quot;,&quot;title&quot;:&quot;FlaxSeq2SeqQuestionAnsweringModelOutput&quot;}],&quot;title&quot;:&quot;Model outputs&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/main_classes/output.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Docstring-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/CodeBlock-hf-doc-builder.js"> <h1 class="relative group"><a id="model-outputs" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#model-outputs"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Model outputs </span></h1> <p>All models have outputs that are instances of subclasses of <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a>. Those are data structures containing all the information returned by the model, but that can also be used as tuples or dictionaries.</p> <p>Let’s see how this looks in an example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, BertForSequenceClassification <span class="hljs-keyword">import</span> torch tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) model = BertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> outputs = model(**inputs, labels=labels)<!-- HTML_TAG_END --></pre></div> <p>The <code>outputs</code> object is a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput">SequenceClassifierOutput</a>, as we can see in the documentation of that class below, it means it has an optional <code>loss</code>, a <code>logits</code> an optional <code>hidden_states</code> and an optional <code>attentions</code> attribute. Here we have the <code>loss</code> since we passed along <code>labels</code>, but we don’t have <code>hidden_states</code> and <code>attentions</code> because we didn’t pass <code>output_hidden_states=True</code> or <code>output_attentions=True</code>.</p> <p>You can access each attribute as you would usually do, and if that attribute has not been returned by the model, you will get <code>None</code>. Here for instance <code>outputs.loss</code> is the loss computed by the model, and <code>outputs.attentions</code> is <code>None</code>.</p> <p>When considering our <code>outputs</code> object as tuple, it only considers the attributes that don’t have <code>None</code> values. Here for instance, it has two elements, <code>loss</code> then <code>logits</code>, so</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->outputs[:<span class="hljs-number">2</span>]<!-- HTML_TAG_END --></pre></div> <p>will return the tuple <code>(outputs.loss, outputs.logits)</code> for instance.</p> <p>When considering our <code>outputs</code> object as dictionary, it only considers the attributes that don’t have <code>None</code> values. Here for instance, it has two keys that are <code>loss</code> and <code>logits</code>.</p> <p>We document here the generic model outputs that are used by more than one model type. Specific output types are documented on their corresponding model page.</p> <h2 class="relative group"><a id="transformers.utils.ModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.utils.ModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ModelOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.ModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.utils.</span><span class="font-semibold">ModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.utils.ModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.ModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/generic.py#L148" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Base class for all model outputs as dataclass. Has a <code>__getitem__</code> that allows indexing by integer or slice (like a tuple) or strings (like a dictionary) that will ignore the <code>None</code> attributes. Otherwise behaves like a regular python dictionary.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>You can’t unpack a <code>ModelOutput</code> directly. Use the <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput.to_tuple">to_tuple()</a> method to convert it to a tuple before.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.ModelOutput.to_tuple"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_tuple</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.ModelOutput.to_tuple" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.ModelOutput.to_tuple"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/generic.py#L237" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Convert self to a tuple containing all the attributes/keys that are not <code>None</code>.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_outputs.BaseModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BaseModelOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.BaseModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">BaseModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.BaseModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.BaseModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L24" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutput.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutput.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs, with potential hidden states and attentions.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.BaseModelOutputWithPooling" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPooling"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BaseModelOutputWithPooling </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.BaseModelOutputWithPooling"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">BaseModelOutputWithPooling</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.BaseModelOutputWithPooling" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.BaseModelOutputWithPooling"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L69" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pooler_output<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPooling.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPooling.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPooling.pooler_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPooling.pooler_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) &#x2014; Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPooling.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPooling.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPooling.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPooling.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs that also contains a pooling of the last hidden states.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.BaseModelOutputWithCrossAttentions" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithCrossAttentions"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BaseModelOutputWithCrossAttentions </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.BaseModelOutputWithCrossAttentions"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">BaseModelOutputWithCrossAttentions</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.BaseModelOutputWithCrossAttentions" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.BaseModelOutputWithCrossAttentions"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L162" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithCrossAttentions.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithCrossAttentions.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithCrossAttentions.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithCrossAttentions.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithCrossAttentions.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithCrossAttentions.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithCrossAttentions.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithCrossAttentions.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs, with potential hidden states and attentions.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BaseModelOutputWithPoolingAndCrossAttentions </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">BaseModelOutputWithPoolingAndCrossAttentions</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L195" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pooler_output<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions.pooler_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions.pooler_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) &#x2014; Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs that also contains a pooling of the last hidden states.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.BaseModelOutputWithPast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BaseModelOutputWithPast </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.BaseModelOutputWithPast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">BaseModelOutputWithPast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.BaseModelOutputWithPast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.BaseModelOutputWithPast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L123" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPast.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPast.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPast.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPast.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPast.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPast.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPast.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPast.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs that may also contain a past key/values (to speed up sequential decoding).</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BaseModelOutputWithPastAndCrossAttentions </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">BaseModelOutputWithPastAndCrossAttentions</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L244" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs that may also contain a past key/values (to speed up sequential decoding).</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.Seq2SeqModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Seq2SeqModelOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.Seq2SeqModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">Seq2SeqModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.Seq2SeqModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.Seq2SeqModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L290" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqModelOutput.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqModelOutput.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqModelOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqModelOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqModelOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqModelOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqModelOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqModelOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqModelOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqModelOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqModelOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqModelOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqModelOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqModelOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqModelOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqModelOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model encoder’s outputs that also contains : pre-computed hidden states that can speed up sequential decoding.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.CausalLMOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>CausalLMOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.CausalLMOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">CausalLMOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.CausalLMOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.CausalLMOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L351" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.CausalLMOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Language modeling loss (for next-token prediction).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.CausalLMOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.CausalLMOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.CausalLMOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for causal language model (or autoregressive) outputs.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>CausalLMOutputWithCrossAttentions </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.CausalLMOutputWithCrossAttentions"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">CausalLMOutputWithCrossAttentions</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L416" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.CausalLMOutputWithCrossAttentions.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Language modeling loss (for next-token prediction).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.CausalLMOutputWithCrossAttentions.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.CausalLMOutputWithCrossAttentions.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.CausalLMOutputWithCrossAttentions.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.CausalLMOutputWithCrossAttentions.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.CausalLMOutputWithCrossAttentions.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for causal language model (or autoregressive) outputs.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.CausalLMOutputWithPast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutputWithPast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>CausalLMOutputWithPast </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.CausalLMOutputWithPast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">CausalLMOutputWithPast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.CausalLMOutputWithPast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.CausalLMOutputWithPast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L380" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.CausalLMOutputWithPast.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutputWithPast.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Language modeling loss (for next-token prediction).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.CausalLMOutputWithPast.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutputWithPast.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.CausalLMOutputWithPast.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutputWithPast.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>)</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.CausalLMOutputWithPast.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutputWithPast.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.CausalLMOutputWithPast.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutputWithPast.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for causal language model (or autoregressive) outputs.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.MaskedLMOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.MaskedLMOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MaskedLMOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.MaskedLMOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">MaskedLMOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.MaskedLMOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.MaskedLMOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L496" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.MaskedLMOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.MaskedLMOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Masked language modeling (MLM) loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.MaskedLMOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.MaskedLMOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.MaskedLMOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.MaskedLMOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.MaskedLMOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.MaskedLMOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for masked language models outputs.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.Seq2SeqLMOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqLMOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Seq2SeqLMOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.Seq2SeqLMOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">Seq2SeqLMOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.Seq2SeqLMOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.Seq2SeqLMOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L525" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqLMOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqLMOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Language modeling loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqLMOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqLMOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqLMOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqLMOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqLMOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqLMOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqLMOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqLMOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqLMOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqLMOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqLMOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqLMOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqLMOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqLMOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqLMOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqLMOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for sequence-to-sequence language models outputs.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.NextSentencePredictorOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.NextSentencePredictorOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>NextSentencePredictorOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.NextSentencePredictorOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">NextSentencePredictorOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.NextSentencePredictorOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.NextSentencePredictorOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L585" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.NextSentencePredictorOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.NextSentencePredictorOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>next_sentence_label</code> is provided) &#x2014; Next sequence prediction (classification) loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.NextSentencePredictorOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.NextSentencePredictorOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2)</code>) &#x2014; Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.NextSentencePredictorOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.NextSentencePredictorOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.NextSentencePredictorOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.NextSentencePredictorOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of models predicting if two sentences are consecutive or not.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.SequenceClassifierOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.SequenceClassifierOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>SequenceClassifierOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.SequenceClassifierOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">SequenceClassifierOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.SequenceClassifierOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.SequenceClassifierOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L615" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.SequenceClassifierOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.SequenceClassifierOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification (or regression if config.num_labels==1) loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.SequenceClassifierOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.SequenceClassifierOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.SequenceClassifierOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.SequenceClassifierOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.SequenceClassifierOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.SequenceClassifierOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of sentence classification models.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Seq2SeqSequenceClassifierOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">Seq2SeqSequenceClassifierOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L644" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>label</code> is provided) &#x2014; Classification (or regression if config.num_labels==1) loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of sequence-to-sequence sentence classification models.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.MultipleChoiceModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.MultipleChoiceModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MultipleChoiceModelOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.MultipleChoiceModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">MultipleChoiceModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.MultipleChoiceModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.MultipleChoiceModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L704" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.MultipleChoiceModelOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.MultipleChoiceModelOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.MultipleChoiceModelOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.MultipleChoiceModelOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) &#x2014; <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.MultipleChoiceModelOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.MultipleChoiceModelOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.MultipleChoiceModelOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.MultipleChoiceModelOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of multiple choice models.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.TokenClassifierOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.TokenClassifierOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TokenClassifierOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.TokenClassifierOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">TokenClassifierOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.TokenClassifierOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.TokenClassifierOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L735" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.TokenClassifierOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.TokenClassifierOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.TokenClassifierOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.TokenClassifierOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) &#x2014; Classification scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.TokenClassifierOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.TokenClassifierOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.TokenClassifierOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.TokenClassifierOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of token classification models.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.QuestionAnsweringModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.QuestionAnsweringModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>QuestionAnsweringModelOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.QuestionAnsweringModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">QuestionAnsweringModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.QuestionAnsweringModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.QuestionAnsweringModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L764" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.QuestionAnsweringModelOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.QuestionAnsweringModelOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.QuestionAnsweringModelOutput.start_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.QuestionAnsweringModelOutput.start_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-start scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.QuestionAnsweringModelOutput.end_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.QuestionAnsweringModelOutput.end_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-end scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.QuestionAnsweringModelOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.QuestionAnsweringModelOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.QuestionAnsweringModelOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.QuestionAnsweringModelOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of question answering models.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Seq2SeqQuestionAnsweringModelOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">Seq2SeqQuestionAnsweringModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L796" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.start_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.start_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-start scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.end_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.end_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-end scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of sequence-to-sequence question answering models.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.SemanticSegmenterOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.SemanticSegmenterOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>SemanticSegmenterOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.SemanticSegmenterOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">SemanticSegmenterOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.SemanticSegmenterOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.SemanticSegmenterOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L859" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.SemanticSegmenterOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.SemanticSegmenterOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification (or regression if config.num_labels==1) loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.SemanticSegmenterOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.SemanticSegmenterOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels, logits_height, logits_width)</code>) &#x2014; Classification scores for each pixel.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>The logits returned do not necessarily have the same size as the <code>pixel_values</code> passed as inputs. This is to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the original image size as post-processing. You should always check your logits shape and resize as needed.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.SemanticSegmenterOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.SemanticSegmenterOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, patch_size, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.SemanticSegmenterOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.SemanticSegmenterOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, patch_size, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of semantic segmentation models.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.ImageClassifierOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.ImageClassifierOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ImageClassifierOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.ImageClassifierOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">ImageClassifierOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.ImageClassifierOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.ImageClassifierOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L897" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.ImageClassifierOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.ImageClassifierOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification (or regression if config.num_labels==1) loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.ImageClassifierOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.ImageClassifierOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.ImageClassifierOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.ImageClassifierOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each stage) of shape <code>(batch_size, sequence_length, hidden_size)</code>. Hidden-states (also called feature maps) of the model at the output of each stage.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.ImageClassifierOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.ImageClassifierOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, patch_size, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of image classification models.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.ImageClassifierOutputWithNoAttention" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.ImageClassifierOutputWithNoAttention"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ImageClassifierOutputWithNoAttention </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.ImageClassifierOutputWithNoAttention"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">ImageClassifierOutputWithNoAttention</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.ImageClassifierOutputWithNoAttention" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.ImageClassifierOutputWithNoAttention"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L925" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.ImageClassifierOutputWithNoAttention.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.ImageClassifierOutputWithNoAttention.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification (or regression if config.num_labels==1) loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.ImageClassifierOutputWithNoAttention.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.ImageClassifierOutputWithNoAttention.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.ImageClassifierOutputWithNoAttention.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.ImageClassifierOutputWithNoAttention.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each stage) of shape <code>(batch_size, num_channels, height, width)</code>. Hidden-states (also called feature maps) of the model at the output of each stage.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of image classification models.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.DepthEstimatorOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.DepthEstimatorOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DepthEstimatorOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.DepthEstimatorOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">DepthEstimatorOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.DepthEstimatorOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.DepthEstimatorOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L946" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">predicted_depth<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.DepthEstimatorOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.DepthEstimatorOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification (or regression if config.num_labels==1) loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.DepthEstimatorOutput.predicted_depth" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.DepthEstimatorOutput.predicted_depth"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>predicted_depth</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, height, width)</code>) &#x2014; Predicted depth for each pixel.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.DepthEstimatorOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.DepthEstimatorOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape <code>(batch_size, num_channels, height, width)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.DepthEstimatorOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.DepthEstimatorOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, patch_size, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of depth estimation models.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.Wav2Vec2BaseModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Wav2Vec2BaseModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Wav2Vec2BaseModelOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.Wav2Vec2BaseModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">Wav2Vec2BaseModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.Wav2Vec2BaseModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.Wav2Vec2BaseModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L976" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">extract_features<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Wav2Vec2BaseModelOutput.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Wav2Vec2BaseModelOutput.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Wav2Vec2BaseModelOutput.extract_features" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Wav2Vec2BaseModelOutput.extract_features"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>extract_features</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, conv_dim[-1])</code>) &#x2014; Sequence of extracted feature vectors of the last convolutional layer of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Wav2Vec2BaseModelOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Wav2Vec2BaseModelOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Wav2Vec2BaseModelOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Wav2Vec2BaseModelOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for models that have been trained with the Wav2Vec2 loss objective.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.XVectorOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.XVectorOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XVectorOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.XVectorOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">XVectorOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.XVectorOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.XVectorOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_outputs.py#L1005" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">embeddings<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.XVectorOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.XVectorOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.XVectorOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.XVectorOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.xvector_output_dim)</code>) &#x2014; Classification hidden states before AMSoftmax.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.XVectorOutput.embeddings" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.XVectorOutput.embeddings"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>embeddings</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.xvector_output_dim)</code>) &#x2014; Utterance embeddings used for vector similarity-based retrieval.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.XVectorOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.XVectorOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.XVectorOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.XVectorOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Output type of <a href="/docs/transformers/pr_19429/en/model_doc/wav2vec2#transformers.Wav2Vec2ForXVector">Wav2Vec2ForXVector</a>.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFBaseModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFBaseModelOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFBaseModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFBaseModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFBaseModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFBaseModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L24" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutput.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutput.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tf.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs, with potential hidden states and attentions.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFBaseModelOutputWithPooling </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFBaseModelOutputWithPooling</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L69" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pooler_output<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling.pooler_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling.pooler_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pooler_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, hidden_size)</code>) &#x2014; Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> <p>This output is usually <em>not</em> a good summary of the semantic content of the input, you&#x2019;re often better with averaging or pooling the sequence of hidden-states for the whole input sequence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs that also contains a pooling of the last hidden states.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFBaseModelOutputWithPoolingAndCrossAttentions </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFBaseModelOutputWithPoolingAndCrossAttentions</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L125" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pooler_output<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions.pooler_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions.pooler_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pooler_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, hidden_size)</code>) &#x2014; Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> <p>This output is usually <em>not</em> a good summary of the semantic content of the input, you&#x2019;re often better with averaging or pooling the sequence of hidden-states for the whole input sequence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs that also contains a pooling of the last hidden states.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFBaseModelOutputWithPast </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFBaseModelOutputWithPast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L173" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPast.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPast.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPast.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPast.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPast.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPast.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPast.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPast.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs that may also contain a past key/values (to speed up sequential decoding).</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFBaseModelOutputWithPastAndCrossAttentions </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFBaseModelOutputWithPastAndCrossAttentions</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L242" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tf.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs that may also contain a past key/values (to speed up sequential decoding).</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFSeq2SeqModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFSeq2SeqModelOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFSeq2SeqModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFSeq2SeqModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFSeq2SeqModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L285" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model encoder’s outputs that also contains : pre-computed hidden states that can speed up sequential decoding.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFCausalLMOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFCausalLMOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFCausalLMOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFCausalLMOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFCausalLMOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFCausalLMOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L345" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFCausalLMOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) &#x2014; Language modeling loss (for next-token prediction).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFCausalLMOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFCausalLMOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFCausalLMOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for causal language model (or autoregressive) outputs.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFCausalLMOutputWithCrossAttentions </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFCausalLMOutputWithCrossAttentions</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L410" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) &#x2014; Language modeling loss (for next-token prediction).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for causal language model (or autoregressive) outputs.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFCausalLMOutputWithPast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutputWithPast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFCausalLMOutputWithPast </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFCausalLMOutputWithPast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFCausalLMOutputWithPast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFCausalLMOutputWithPast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFCausalLMOutputWithPast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L374" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFCausalLMOutputWithPast.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutputWithPast.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) &#x2014; Language modeling loss (for next-token prediction).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFCausalLMOutputWithPast.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutputWithPast.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFCausalLMOutputWithPast.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutputWithPast.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFCausalLMOutputWithPast.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutputWithPast.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFCausalLMOutputWithPast.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutputWithPast.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for causal language model (or autoregressive) outputs.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFMaskedLMOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFMaskedLMOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFMaskedLMOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFMaskedLMOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFMaskedLMOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFMaskedLMOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFMaskedLMOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L453" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFMaskedLMOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFMaskedLMOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) &#x2014; Masked language modeling (MLM) loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFMaskedLMOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFMaskedLMOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFMaskedLMOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFMaskedLMOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFMaskedLMOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFMaskedLMOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for masked language models outputs.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFSeq2SeqLMOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFSeq2SeqLMOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFSeq2SeqLMOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L482" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) &#x2014; Language modeling loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for sequence-to-sequence language models outputs.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFNextSentencePredictorOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFNextSentencePredictorOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFNextSentencePredictorOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFNextSentencePredictorOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFNextSentencePredictorOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFNextSentencePredictorOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFNextSentencePredictorOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L541" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFNextSentencePredictorOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFNextSentencePredictorOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>next_sentence_label</code> is provided) &#x2014; Next sentence prediction loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFNextSentencePredictorOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFNextSentencePredictorOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, 2)</code>) &#x2014; Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFNextSentencePredictorOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFNextSentencePredictorOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFNextSentencePredictorOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFNextSentencePredictorOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of models predicting if two sentences are consecutive or not.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFSequenceClassifierOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSequenceClassifierOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFSequenceClassifierOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFSequenceClassifierOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFSequenceClassifierOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFSequenceClassifierOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFSequenceClassifierOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L571" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSequenceClassifierOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSequenceClassifierOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification (or regression if config.num_labels==1) loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSequenceClassifierOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSequenceClassifierOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSequenceClassifierOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSequenceClassifierOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSequenceClassifierOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSequenceClassifierOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of sentence classification models.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFSeq2SeqSequenceClassifierOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFSeq2SeqSequenceClassifierOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L600" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>label</code> is provided) &#x2014; Classification (or regression if config.num_labels==1) loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of sequence-to-sequence sentence classification models.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFMultipleChoiceModelOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFMultipleChoiceModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L747" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>tf.Tensor</code> of shape <em>(batch_size, )</em>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices)</code>) &#x2014; <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of multiple choice models.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFTokenClassifierOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFTokenClassifierOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFTokenClassifierOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFTokenClassifierOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFTokenClassifierOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFTokenClassifierOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFTokenClassifierOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L778" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFTokenClassifierOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFTokenClassifierOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of unmasked labels, returned when <code>labels</code> is provided) &#x2014; Classification loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFTokenClassifierOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFTokenClassifierOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) &#x2014; Classification scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFTokenClassifierOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFTokenClassifierOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFTokenClassifierOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFTokenClassifierOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of token classification models.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFQuestionAnsweringModelOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFQuestionAnsweringModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L807" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_logits<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_logits<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>start_positions</code> and <code>end_positions</code> are provided) &#x2014; Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput.start_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput.start_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-start scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput.end_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput.end_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-end scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of question answering models.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFSeq2SeqQuestionAnsweringModelOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFSeq2SeqQuestionAnsweringModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_outputs.py#L839" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_logits<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_logits<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.start_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.start_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-start scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.end_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.end_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-end scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of sequence-to-sequence question answering models.</p></div> <h2 class="relative group"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxBaseModelOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxBaseModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_flax_outputs.</span><span class="font-semibold">FlaxBaseModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxBaseModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_outputs.py#L23" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutput.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutput.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs, with potential hidden states and attentions.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxBaseModelOutput.replace"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxBaseModelOutput.replace" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutput.replace"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxBaseModelOutputWithPast </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_flax_outputs.</span><span class="font-semibold">FlaxBaseModelOutputWithPast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_outputs.py#L49" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Union[typing.Dict[str, jax._src.numpy.ndarray.ndarray], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>Dict[str, jnp.ndarray]</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs, with potential hidden states and attentions.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast.replace"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast.replace" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast.replace"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxBaseModelOutputWithPooling </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_flax_outputs.</span><span class="font-semibold">FlaxBaseModelOutputWithPooling</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_outputs.py#L79" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pooler_output<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling.pooler_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling.pooler_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pooler_output</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, hidden_size)</code>) &#x2014; Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs that also contains a pooling of the last hidden states.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling.replace"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling.replace" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling.replace"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxBaseModelOutputWithPastAndCrossAttentions </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_flax_outputs.</span><span class="font-semibold">FlaxBaseModelOutputWithPastAndCrossAttentions</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_outputs.py#L159" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[jax._src.numpy.ndarray.ndarray]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs that may also contain a past key/values (to speed up sequential decoding).</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions.replace"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions.replace" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions.replace"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxSeq2SeqModelOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_flax_outputs.</span><span class="font-semibold">FlaxSeq2SeqModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_outputs.py#L205" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[jax._src.numpy.ndarray.ndarray]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[jax._src.numpy.ndarray.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model encoder’s outputs that also contains : pre-computed hidden states that can speed up sequential decoding.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.replace"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.replace" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.replace"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxCausalLMOutputWithCrossAttentions </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_flax_outputs.</span><span class="font-semibold">FlaxCausalLMOutputWithCrossAttentions</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_outputs.py#L266" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[jax._src.numpy.ndarray.ndarray]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for causal language model (or autoregressive) outputs.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions.replace"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions.replace" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions.replace"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_flax_outputs.FlaxMaskedLMOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxMaskedLMOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxMaskedLMOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxMaskedLMOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_flax_outputs.</span><span class="font-semibold">FlaxMaskedLMOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxMaskedLMOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxMaskedLMOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_outputs.py#L307" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxMaskedLMOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxMaskedLMOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxMaskedLMOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxMaskedLMOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxMaskedLMOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxMaskedLMOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for masked language models outputs.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxMaskedLMOutput.replace"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxMaskedLMOutput.replace" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxMaskedLMOutput.replace"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxSeq2SeqLMOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_flax_outputs.</span><span class="font-semibold">FlaxSeq2SeqLMOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_outputs.py#L336" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[jax._src.numpy.ndarray.ndarray]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[jax._src.numpy.ndarray.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for sequence-to-sequence language models outputs.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.replace"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.replace" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.replace"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxNextSentencePredictorOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_flax_outputs.</span><span class="font-semibold">FlaxNextSentencePredictorOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_outputs.py#L393" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, 2)</code>) &#x2014; Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of models predicting if two sentences are consecutive or not.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput.replace"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput.replace" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput.replace"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxSequenceClassifierOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_flax_outputs.</span><span class="font-semibold">FlaxSequenceClassifierOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_outputs.py#L420" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of sentence classification models.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput.replace"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput.replace" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput.replace"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxSeq2SeqSequenceClassifierOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_flax_outputs.</span><span class="font-semibold">FlaxSeq2SeqSequenceClassifierOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_outputs.py#L446" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[jax._src.numpy.ndarray.ndarray]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[jax._src.numpy.ndarray.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of sequence-to-sequence sentence classification models.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.replace"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.replace" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.replace"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxMultipleChoiceModelOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_flax_outputs.</span><span class="font-semibold">FlaxMultipleChoiceModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_outputs.py#L503" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, num_choices)</code>) &#x2014; <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of multiple choice models.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput.replace"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput.replace" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput.replace"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_flax_outputs.FlaxTokenClassifierOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxTokenClassifierOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxTokenClassifierOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxTokenClassifierOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_flax_outputs.</span><span class="font-semibold">FlaxTokenClassifierOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxTokenClassifierOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxTokenClassifierOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_outputs.py#L531" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxTokenClassifierOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxTokenClassifierOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) &#x2014; Classification scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxTokenClassifierOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxTokenClassifierOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxTokenClassifierOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxTokenClassifierOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of token classification models.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxTokenClassifierOutput.replace"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxTokenClassifierOutput.replace" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxTokenClassifierOutput.replace"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxQuestionAnsweringModelOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_flax_outputs.</span><span class="font-semibold">FlaxQuestionAnsweringModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_outputs.py#L557" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_logits<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_logits<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput.start_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput.start_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-start scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput.end_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput.end_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-end scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of question answering models.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput.replace"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput.replace" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput.replace"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxSeq2SeqQuestionAnsweringModelOutput </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_flax_outputs.</span><span class="font-semibold">FlaxSeq2SeqQuestionAnsweringModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_flax_outputs.py#L586" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_logits<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_logits<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[jax._src.numpy.ndarray.ndarray]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[jax._src.numpy.ndarray.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.ndarray.ndarray]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.start_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.start_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-start scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.end_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.end_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-end scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of sequence-to-sequence question answering models.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.replace"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.replace" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.replace"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <script type="module" data-hydrate="1xzqie3"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="1xzqie3"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/main_classes/output.mdx-hf-doc-builder.js") ], params: {} } }); </script>
56
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/main_classes/logging.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;logging&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.utils.logging.set_verbosity_error&quot;,&quot;title&quot;:&quot;Base setters&quot;},{&quot;local&quot;:&quot;transformers.utils.logging.get_verbosity&quot;,&quot;title&quot;:&quot;Other functions&quot;}],&quot;title&quot;:&quot;Logging&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/main_classes/logging.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Docstring-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/ExampleCodeBlock-hf-doc-builder.js"> <h1 class="relative group"><a id="logging" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#logging"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Logging </span></h1> <p>🤗 Transformers has a centralized logging system, so that you can setup the verbosity of the library easily.</p> <p>Currently the default verbosity of the library is <code>WARNING</code>.</p> <p>To change the level of verbosity, just use one of the direct setters. For instance, here is how to change the verbosity to the INFO level.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">import</span> transformers transformers.logging.set_verbosity_info()<!-- HTML_TAG_END --></pre></div> <p>You can also use the environment variable <code>TRANSFORMERS_VERBOSITY</code> to override the default verbosity. You can set it to one of the following: <code>debug</code>, <code>info</code>, <code>warning</code>, <code>error</code>, <code>critical</code>. For example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->TRANSFORMERS_VERBOSITY=error ./myprogram.py<!-- HTML_TAG_END --></pre></div> <p>Additionally, some <code>warnings</code> can be disabled by setting the environment variable <code>TRANSFORMERS_NO_ADVISORY_WARNINGS</code> to a true value, like <em>1</em>. This will disable any warning that is logged using <code>logger.warning_advice()</code>. For example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->TRANSFORMERS_NO_ADVISORY_WARNINGS=1 ./myprogram.py<!-- HTML_TAG_END --></pre></div> <p>Here is an example of how to use the same logger as the library in your own module or script:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers.utils <span class="hljs-keyword">import</span> logging logging.set_verbosity_info() logger = logging.get_logger(<span class="hljs-string">&quot;transformers&quot;</span>) logger.info(<span class="hljs-string">&quot;INFO&quot;</span>) logger.warning(<span class="hljs-string">&quot;WARN&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>All the methods of this logging module are documented below, the main ones are <a href="/docs/transformers/pr_19429/en/main_classes/logging#transformers.utils.logging.get_verbosity">logging.get_verbosity()</a> to get the current level of verbosity in the logger and <a href="/docs/transformers/pr_19429/en/main_classes/logging#transformers.utils.logging.set_verbosity">logging.set_verbosity()</a> to set the verbosity to the level of your choice. In order (from the least verbose to the most verbose), those levels (with their corresponding int values in parenthesis) are:</p> <ul><li><code>transformers.logging.CRITICAL</code> or <code>transformers.logging.FATAL</code> (int value, 50): only report the most critical errors.</li> <li><code>transformers.logging.ERROR</code> (int value, 40): only report errors.</li> <li><code>transformers.logging.WARNING</code> or <code>transformers.logging.WARN</code> (int value, 30): only reports error and warnings. This the default level used by the library.</li> <li><code>transformers.logging.INFO</code> (int value, 20): reports error, warnings and basic information.</li> <li><code>transformers.logging.DEBUG</code> (int value, 10): report all information.</li></ul> <p>By default, <code>tqdm</code> progress bars will be displayed during model download. <a href="/docs/transformers/pr_19429/en/main_classes/logging#transformers.utils.logging.disable_progress_bar">logging.disable_progress_bar()</a> and <a href="/docs/transformers/pr_19429/en/main_classes/logging#transformers.utils.logging.enable_progress_bar">logging.enable_progress_bar()</a> can be used to suppress or unsuppress this behavior.</p> <h2 class="relative group"><a id="transformers.utils.logging.set_verbosity_error" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.utils.logging.set_verbosity_error"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Base setters </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.logging.set_verbosity_error"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.utils.logging.set_verbosity_error</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.logging.set_verbosity_error" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.logging.set_verbosity_error"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/logging.py#L186" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Set the verbosity to the <code>ERROR</code> level.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.logging.set_verbosity_warning"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.utils.logging.set_verbosity_warning</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.logging.set_verbosity_warning" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.logging.set_verbosity_warning"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/logging.py#L176" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Set the verbosity to the <code>WARNING</code> level.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.logging.set_verbosity_info"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.utils.logging.set_verbosity_info</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.logging.set_verbosity_info" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.logging.set_verbosity_info"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/logging.py#L171" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Set the verbosity to the <code>INFO</code> level.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.logging.set_verbosity_debug"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.utils.logging.set_verbosity_debug</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.logging.set_verbosity_debug" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.logging.set_verbosity_debug"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/logging.py#L181" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Set the verbosity to the <code>DEBUG</code> level.</p></div> <h2 class="relative group"><a id="transformers.utils.logging.get_verbosity" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.utils.logging.get_verbosity"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Other functions </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.logging.get_verbosity"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.utils.logging.get_verbosity</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.logging.get_verbosity" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.logging.get_verbosity"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/logging.py#L129" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div id="transformers.utils.logging.get_verbosity.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The logging level.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Return the current level for the 🤗 Transformers’s root logger as an int.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>🤗 Transformers has following logging levels:</p> <ul><li>50: <code>transformers.logging.CRITICAL</code> or <code>transformers.logging.FATAL</code></li> <li>40: <code>transformers.logging.ERROR</code></li> <li>30: <code>transformers.logging.WARNING</code> or <code>transformers.logging.WARN</code></li> <li>20: <code>transformers.logging.INFO</code></li> <li>10: <code>transformers.logging.DEBUG</code></li></ul></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.logging.set_verbosity"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.utils.logging.set_verbosity</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.logging.set_verbosity" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.logging.set_verbosity"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/logging.py#L152" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">verbosity<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.utils.logging.set_verbosity.verbosity" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.utils.logging.set_verbosity.verbosity"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>verbosity</strong> (<code>int</code>) &#x2014; Logging level, e.g., one of:</p> <ul> <li><code>transformers.logging.CRITICAL</code> or <code>transformers.logging.FATAL</code></li> <li><code>transformers.logging.ERROR</code></li> <li><code>transformers.logging.WARNING</code> or <code>transformers.logging.WARN</code></li> <li><code>transformers.logging.INFO</code></li> <li><code>transformers.logging.DEBUG</code></li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Set the verbosity level for the 🤗 Transformers’s root logger.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.logging.get_logger"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.utils.logging.get_logger</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.logging.get_logger" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.logging.get_logger"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/logging.py#L115" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">name<span class="opacity-60">: typing.Optional[str] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Return a logger with the specified name.</p> <p>This function is not supposed to be directly accessed unless you are writing a custom transformers module.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.logging.enable_default_handler"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.utils.logging.enable_default_handler</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.logging.enable_default_handler" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.logging.enable_default_handler"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/logging.py#L200" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Enable the default handler of the HuggingFace Transformers’s root logger.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.logging.disable_default_handler"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.utils.logging.disable_default_handler</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.logging.disable_default_handler" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.logging.disable_default_handler"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/logging.py#L191" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Disable the default handler of the HuggingFace Transformers’s root logger.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.logging.enable_explicit_format"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.utils.logging.enable_explicit_format</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.logging.enable_explicit_format" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.logging.enable_explicit_format"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/logging.py#L246" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <div class="relative group rounded-md"><a id="transformers.utils.logging.enable_explicit_format.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.utils.logging.enable_explicit_format.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Enable explicit formatting for every HuggingFace Transformers’s logger. The explicit formatter is as follows:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --> [LEVELNAME|<span class="hljs-type">FILENAME</span>|<span class="hljs-type">LINE</span> NUMBER] TIME &gt;&gt; MESSAGE<!-- HTML_TAG_END --></pre></div></div> All handlers currently bound to the root logger are affected by this method. </div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.logging.reset_format"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.utils.logging.reset_format</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.logging.reset_format" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.logging.reset_format"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/logging.py#L261" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Resets the formatting for HuggingFace Transformers’s loggers.</p> <p>All handlers currently bound to the root logger are affected by this method.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.logging.enable_progress_bar"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.utils.logging.enable_progress_bar</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.logging.enable_progress_bar" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.logging.enable_progress_bar"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/logging.py#L337" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Enable tqdm progress bar.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.logging.disable_progress_bar"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.utils.logging.disable_progress_bar</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.logging.disable_progress_bar" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.logging.disable_progress_bar"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/logging.py#L344" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Disable tqdm progress bar.</p></div> <script type="module" data-hydrate="1bhu95a"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="1bhu95a"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/main_classes/logging.mdx-hf-doc-builder.js") ], params: {} } }); </script>
57
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/main_classes/onnx.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;exporting-transformers-models-to-onnx&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;onnx-configurations&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.onnx.OnnxConfig&quot;,&quot;title&quot;:&quot;OnnxConfig&quot;},{&quot;local&quot;:&quot;transformers.onnx.OnnxConfigWithPast&quot;,&quot;title&quot;:&quot;OnnxConfigWithPast&quot;},{&quot;local&quot;:&quot;transformers.onnx.OnnxSeq2SeqConfigWithPast&quot;,&quot;title&quot;:&quot;OnnxSeq2SeqConfigWithPast&quot;}],&quot;title&quot;:&quot;ONNX Configurations&quot;},{&quot;local&quot;:&quot;onnx-features&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.onnx.FeaturesManager&quot;,&quot;title&quot;:&quot;FeaturesManager&quot;}],&quot;title&quot;:&quot;ONNX Features&quot;}],&quot;title&quot;:&quot;Exporting 🤗 Transformers models to ONNX&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/main_classes/onnx.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Docstring-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <h1 class="relative group"><a id="exporting-transformers-models-to-onnx" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#exporting-transformers-models-to-onnx"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Exporting 🤗 Transformers models to ONNX </span></h1> <p>🤗 Transformers provides a <code>transformers.onnx</code> package that enables you to convert model checkpoints to an ONNX graph by leveraging configuration objects.</p> <p>See the <a href="../serialization">guide</a> on exporting 🤗 Transformers models for more details.</p> <h2 class="relative group"><a id="onnx-configurations" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#onnx-configurations"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ONNX Configurations </span></h2> <p>We provide three abstract classes that you should inherit from, depending on the type of model architecture you wish to export:</p> <ul><li>Encoder-based models inherit from <a href="/docs/transformers/pr_19429/en/main_classes/onnx#transformers.onnx.OnnxConfig">OnnxConfig</a></li> <li>Decoder-based models inherit from <a href="/docs/transformers/pr_19429/en/main_classes/onnx#transformers.onnx.OnnxConfigWithPast">OnnxConfigWithPast</a></li> <li>Encoder-decoder models inherit from <a href="/docs/transformers/pr_19429/en/main_classes/onnx#transformers.onnx.OnnxSeq2SeqConfigWithPast">OnnxSeq2SeqConfigWithPast</a></li></ul> <h3 class="relative group"><a id="transformers.onnx.OnnxConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.OnnxConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>OnnxConfig </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.onnx.OnnxConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.onnx.</span><span class="font-semibold">OnnxConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.onnx.OnnxConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.onnx.OnnxConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/config.py#L67" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: PretrainedConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">task<span class="opacity-60">: str = &#39;default&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">patching_specs<span class="opacity-60">: typing.List[transformers.onnx.config.PatchingSpec] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Base class for ONNX exportable model describing metadata on how to export the model through the ONNX format.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.onnx.OnnxConfig.flatten_output_collection_property"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>flatten_output_collection_property</span></h4><!-- HTML_TAG_END --> <a id="transformers.onnx.OnnxConfig.flatten_output_collection_property" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.onnx.OnnxConfig.flatten_output_collection_property"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/config.py#L362" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">name<span class="opacity-60">: str</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">field<span class="opacity-60">: typing.Iterable[typing.Any]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>(Dict[str, Any])</span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div id="transformers.onnx.OnnxConfig.flatten_output_collection_property.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>(Dict[str, Any])</p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Outputs with flattened structure and key mapping this new structure.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Flatten any potential nested structure expanding the name of the field with the index of the element within the structure.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.onnx.OnnxConfig.from_model_config"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_model_config</span></h4><!-- HTML_TAG_END --> <a id="transformers.onnx.OnnxConfig.from_model_config" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.onnx.OnnxConfig.from_model_config"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/config.py#L124" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: PretrainedConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">task<span class="opacity-60">: str = &#39;default&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Instantiate a OnnxConfig for a specific model</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.onnx.OnnxConfig.generate_dummy_inputs"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>generate_dummy_inputs</span></h4><!-- HTML_TAG_END --> <a id="transformers.onnx.OnnxConfig.generate_dummy_inputs" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.onnx.OnnxConfig.generate_dummy_inputs"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/config.py#L264" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">preprocessor<span class="opacity-60">: typing.Union[ForwardRef(&#39;PreTrainedTokenizerBase&#39;), ForwardRef(&#39;FeatureExtractionMixin&#39;)]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_size<span class="opacity-60">: int = -1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seq_length<span class="opacity-60">: int = -1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_choices<span class="opacity-60">: int = -1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_pair<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">framework<span class="opacity-60">: typing.Optional[transformers.utils.generic.TensorType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_channels<span class="opacity-60">: int = 3</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image_width<span class="opacity-60">: int = 40</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image_height<span class="opacity-60">: int = 40</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: PreTrainedTokenizerBase = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.OnnxConfig.generate_dummy_inputs.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.OnnxConfig.generate_dummy_inputs.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The batch size to export the model for (-1 means dynamic axis).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.OnnxConfig.generate_dummy_inputs.num_choices" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.OnnxConfig.generate_dummy_inputs.num_choices"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_choices</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The number of candidate answers provided for multiple choice task (-1 means dynamic axis).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.OnnxConfig.generate_dummy_inputs.seq_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.OnnxConfig.generate_dummy_inputs.seq_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>seq_length</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The sequence length to export the model for (-1 means dynamic axis).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.OnnxConfig.generate_dummy_inputs.is_pair" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.OnnxConfig.generate_dummy_inputs.is_pair"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_pair</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Indicate if the input is a pair (sentence 1, sentence 2)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.OnnxConfig.generate_dummy_inputs.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.OnnxConfig.generate_dummy_inputs.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>TensorType</code>, <em>optional</em>, defaults to <code>None</code>) &#x2014; The framework (PyTorch or TensorFlow) that the tokenizer will generate tensors for.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.OnnxConfig.generate_dummy_inputs.num_channels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.OnnxConfig.generate_dummy_inputs.num_channels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_channels</strong> (<code>int</code>, <em>optional</em>, defaults to 3) &#x2014; The number of channels of the generated images.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.OnnxConfig.generate_dummy_inputs.image_width" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.OnnxConfig.generate_dummy_inputs.image_width"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image_width</strong> (<code>int</code>, <em>optional</em>, defaults to 40) &#x2014; The width of the generated images.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.OnnxConfig.generate_dummy_inputs.image_height" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.OnnxConfig.generate_dummy_inputs.image_height"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image_height</strong> (<code>int</code>, <em>optional</em>, defaults to 40) &#x2014; The height of the generated images.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Generate inputs to provide to the ONNX exporter for the specific framework</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.onnx.OnnxConfig.use_external_data_format"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>use_external_data_format</span></h4><!-- HTML_TAG_END --> <a id="transformers.onnx.OnnxConfig.use_external_data_format" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.onnx.OnnxConfig.use_external_data_format"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/config.py#L238" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_parameters<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Flag indicating if the model requires using external data format</p></div></div> <h3 class="relative group"><a id="transformers.onnx.OnnxConfigWithPast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.OnnxConfigWithPast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>OnnxConfigWithPast </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.onnx.OnnxConfigWithPast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.onnx.</span><span class="font-semibold">OnnxConfigWithPast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.onnx.OnnxConfigWithPast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.onnx.OnnxConfigWithPast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/config.py#L381" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: PretrainedConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">task<span class="opacity-60">: str = &#39;default&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">patching_specs<span class="opacity-60">: typing.List[transformers.onnx.config.PatchingSpec] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_past<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.onnx.OnnxConfigWithPast.fill_with_past_key_values_"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>fill_with_past_key_values_</span></h4><!-- HTML_TAG_END --> <a id="transformers.onnx.OnnxConfigWithPast.fill_with_past_key_values_" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.onnx.OnnxConfigWithPast.fill_with_past_key_values_"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/config.py#L489" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_or_outputs<span class="opacity-60">: typing.Mapping[str, typing.Mapping[int, str]]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">direction<span class="opacity-60">: str</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Fill the input_or_outputs mapping with past_key_values dynamic axes considering.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.onnx.OnnxConfigWithPast.with_past"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>with_past</span></h4><!-- HTML_TAG_END --> <a id="transformers.onnx.OnnxConfigWithPast.with_past" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.onnx.OnnxConfigWithPast.with_past"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/config.py#L392" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: PretrainedConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">task<span class="opacity-60">: str = &#39;default&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Instantiate a OnnxConfig with <code>use_past</code> attribute set to True</p></div></div> <h3 class="relative group"><a id="transformers.onnx.OnnxSeq2SeqConfigWithPast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.OnnxSeq2SeqConfigWithPast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>OnnxSeq2SeqConfigWithPast </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.onnx.OnnxSeq2SeqConfigWithPast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.onnx.</span><span class="font-semibold">OnnxSeq2SeqConfigWithPast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.onnx.OnnxSeq2SeqConfigWithPast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.onnx.OnnxSeq2SeqConfigWithPast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/config.py#L522" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: PretrainedConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">task<span class="opacity-60">: str = &#39;default&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">patching_specs<span class="opacity-60">: typing.List[transformers.onnx.config.PatchingSpec] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_past<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div> <h2 class="relative group"><a id="onnx-features" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#onnx-features"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ONNX Features </span></h2> <p>Each ONNX configuration is associated with a set of <em>features</em> that enable you to export models for different types of topologies or tasks.</p> <h3 class="relative group"><a id="transformers.onnx.FeaturesManager" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.FeaturesManager"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FeaturesManager </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.onnx.FeaturesManager"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.onnx.</span><span class="font-semibold">FeaturesManager</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.onnx.FeaturesManager" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.onnx.FeaturesManager"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/features.py#L83" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.onnx.FeaturesManager.check_supported_model_or_raise"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>check_supported_model_or_raise</span></h4><!-- HTML_TAG_END --> <a id="transformers.onnx.FeaturesManager.check_supported_model_or_raise" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.onnx.FeaturesManager.check_supported_model_or_raise"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/features.py#L670" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60">: typing.Union[ForwardRef(&#39;PreTrainedModel&#39;), ForwardRef(&#39;TFPreTrainedModel&#39;)]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">feature<span class="opacity-60">: str = &#39;default&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Check whether or not the model has the requested features.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.onnx.FeaturesManager.determine_framework"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>determine_framework</span></h4><!-- HTML_TAG_END --> <a id="transformers.onnx.FeaturesManager.determine_framework" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.onnx.FeaturesManager.determine_framework"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/features.py#L587" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">framework<span class="opacity-60">: str = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.FeaturesManager.determine_framework.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.FeaturesManager.determine_framework.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<code>str</code>) &#x2014; The name of the model to export.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.FeaturesManager.determine_framework.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.FeaturesManager.determine_framework.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>, defaults to <code>None</code>) &#x2014; The framework to use for the export. See above for priority if none provided.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Determines the framework to use for the export.</p> <p>The priority is in the following order:</p> <ol><li>User input via <code>framework</code>.</li> <li>If local checkpoint is provided, use the same framework as the checkpoint.</li> <li>Available framework in environment, with priority given to PyTorch</li></ol></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.onnx.FeaturesManager.get_config"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_config</span></h4><!-- HTML_TAG_END --> <a id="transformers.onnx.FeaturesManager.get_config" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.onnx.FeaturesManager.get_config"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/features.py#L695" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model_type<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">feature<span class="opacity-60">: str</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>OnnxConfig</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.FeaturesManager.get_config.model_type" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.FeaturesManager.get_config.model_type"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_type</strong> (<code>str</code>) &#x2014; The model type to retrieve the config for.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.FeaturesManager.get_config.feature" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.FeaturesManager.get_config.feature"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>feature</strong> (<code>str</code>) &#x2014; The feature to retrieve the config for.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.onnx.FeaturesManager.get_config.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>OnnxConfig</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>config for the combination</p> <!-- HTML_TAG_END --></p> </div></div> <p>Gets the OnnxConfig for a model_type and feature combination.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.onnx.FeaturesManager.get_model_class_for_feature"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_model_class_for_feature</span></h4><!-- HTML_TAG_END --> <a id="transformers.onnx.FeaturesManager.get_model_class_for_feature" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.onnx.FeaturesManager.get_model_class_for_feature"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/features.py#L561" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">feature<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">framework<span class="opacity-60">: str = &#39;pt&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.FeaturesManager.get_model_class_for_feature.feature" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.FeaturesManager.get_model_class_for_feature.feature"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>feature</strong> (<code>str</code>) &#x2014; The feature required.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.FeaturesManager.get_model_class_for_feature.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.FeaturesManager.get_model_class_for_feature.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;pt&quot;</code>) &#x2014; The framework to use for the export.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Attempts to retrieve an AutoModel class from a feature name.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.onnx.FeaturesManager.get_model_from_feature"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_model_from_feature</span></h4><!-- HTML_TAG_END --> <a id="transformers.onnx.FeaturesManager.get_model_from_feature" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.onnx.FeaturesManager.get_model_from_feature"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/features.py#L637" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">feature<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">framework<span class="opacity-60">: str = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cache_dir<span class="opacity-60">: str = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.FeaturesManager.get_model_from_feature.feature" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.FeaturesManager.get_model_from_feature.feature"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>feature</strong> (<code>str</code>) &#x2014; The feature required.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.FeaturesManager.get_model_from_feature.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.FeaturesManager.get_model_from_feature.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<code>str</code>) &#x2014; The name of the model to export.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.FeaturesManager.get_model_from_feature.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.FeaturesManager.get_model_from_feature.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>, defaults to <code>None</code>) &#x2014; The framework to use for the export. See <code>FeaturesManager.determine_framework</code> for the priority should none be provided.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Attempts to retrieve a model from a model’s name and the feature to be enabled.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.onnx.FeaturesManager.get_supported_features_for_model_type"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_supported_features_for_model_type</span></h4><!-- HTML_TAG_END --> <a id="transformers.onnx.FeaturesManager.get_supported_features_for_model_type" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.onnx.FeaturesManager.get_supported_features_for_model_type"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/onnx/features.py#L516" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model_type<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model_name<span class="opacity-60">: typing.Optional[str] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.FeaturesManager.get_supported_features_for_model_type.model_type" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.FeaturesManager.get_supported_features_for_model_type.model_type"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_type</strong> (<code>str</code>) &#x2014; The model type to retrieve the supported features for.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.FeaturesManager.get_supported_features_for_model_type.model_name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.FeaturesManager.get_supported_features_for_model_type.model_name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; The name attribute of the model object, only used for the exception message.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Tries to retrieve the feature -&gt; OnnxConfig constructor map from the model type.</p></div></div> <script type="module" data-hydrate="1ei2q1x"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="1ei2q1x"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/main_classes/onnx.mdx-hf-doc-builder.js") ], params: {} } }); </script>
58
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/tasks/language_modeling.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;language-modeling&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;load-eli5-dataset&quot;,&quot;title&quot;:&quot;Load ELI5 dataset&quot;},{&quot;local&quot;:&quot;preprocess&quot;,&quot;title&quot;:&quot;Preprocess&quot;},{&quot;local&quot;:&quot;causal-language-modeling&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;train&quot;,&quot;title&quot;:&quot;Train&quot;}],&quot;title&quot;:&quot;Causal language modeling&quot;},{&quot;local&quot;:&quot;masked-language-modeling&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;train&quot;,&quot;title&quot;:&quot;Train&quot;}],&quot;title&quot;:&quot;Masked language modeling&quot;}],&quot;title&quot;:&quot;Language modeling&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/tasks/language_modeling.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Youtube-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Markdown-hf-doc-builder.js"> <h1 class="relative group"><a id="language-modeling" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#language-modeling"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Language modeling </span></h1> <p>Language modeling predicts words in a sentence. There are two forms of language modeling.</p> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/Vpjb1lu0MDk" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Causal language modeling predicts the next token in a sequence of tokens, and the model can only attend to tokens on the left.</p> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/mqElG5QJWUg" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Masked language modeling predicts a masked token in a sequence, and the model can attend to tokens bidirectionally.</p> <p>This guide will show you how to fine-tune <a href="https://huggingface.co/distilgpt2" rel="nofollow">DistilGPT2</a> for causal language modeling and <a href="https://huggingface.co/distilroberta-base" rel="nofollow">DistilRoBERTa</a> for masked language modeling on the <a href="https://www.reddit.com/r/askscience/" rel="nofollow">r/askscience</a> subset of the <a href="https://huggingface.co/datasets/eli5" rel="nofollow">ELI5</a> dataset.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>You can fine-tune other architectures for language modeling such as <a href="https://huggingface.co/EleutherAI/gpt-neo-125M" rel="nofollow">GPT-Neo</a>, <a href="https://huggingface.co/EleutherAI/gpt-j-6B" rel="nofollow">GPT-J</a>, and <a href="https://huggingface.co/bert-base-uncased" rel="nofollow">BERT</a>, following the same steps presented in this guide!</p> <p>See the text generation <a href="https://huggingface.co/tasks/text-generation" rel="nofollow">task page</a> and fill mask <a href="https://huggingface.co/tasks/fill-mask" rel="nofollow">task page</a> for more information about their associated models, datasets, and metrics.</p></div> <h2 class="relative group"><a id="load-eli5-dataset" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#load-eli5-dataset"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Load ELI5 dataset </span></h2> <p>Load only the first 5000 rows of the ELI5 dataset from the 🤗 Datasets library since it is pretty large:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>eli5 = load_dataset(<span class="hljs-string">&quot;eli5&quot;</span>, split=<span class="hljs-string">&quot;train_asks[:5000]&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Split this dataset into a train and test set:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->eli5 = eli5.train_test_split(test_size=<span class="hljs-number">0.2</span>)<!-- HTML_TAG_END --></pre></div> <p>Then take a look at an example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>eli5[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;answers&#x27;</span>: {<span class="hljs-string">&#x27;a_id&#x27;</span>: [<span class="hljs-string">&#x27;c3d1aib&#x27;</span>, <span class="hljs-string">&#x27;c3d4lya&#x27;</span>], <span class="hljs-string">&#x27;score&#x27;</span>: [<span class="hljs-number">6</span>, <span class="hljs-number">3</span>], <span class="hljs-string">&#x27;text&#x27;</span>: [<span class="hljs-string">&quot;The velocity needed to remain in orbit is equal to the square root of Newton&#x27;s constant times the mass of earth divided by the distance from the center of the earth. I don&#x27;t know the altitude of that specific mission, but they&#x27;re usually around 300 km. That means he&#x27;s going 7-8 km/s.\n\nIn space there are no other forces acting on either the shuttle or the guy, so they stay in the same position relative to each other. If he were to become unable to return to the ship, he would presumably run out of oxygen, or slowly fall into the atmosphere and burn up.&quot;</span>, <span class="hljs-string">&quot;Hope you don&#x27;t mind me asking another question, but why aren&#x27;t there any stars visible in this photo?&quot;</span>]}, <span class="hljs-string">&#x27;answers_urls&#x27;</span>: {<span class="hljs-string">&#x27;url&#x27;</span>: []}, <span class="hljs-string">&#x27;document&#x27;</span>: <span class="hljs-string">&#x27;&#x27;</span>, <span class="hljs-string">&#x27;q_id&#x27;</span>: <span class="hljs-string">&#x27;nyxfp&#x27;</span>, <span class="hljs-string">&#x27;selftext&#x27;</span>: <span class="hljs-string">&#x27;_URL_0_\n\nThis was on the front page earlier and I have a few questions about it. Is it possible to calculate how fast the astronaut would be orbiting the earth? Also how does he stay close to the shuttle so that he can return safely, i.e is he orbiting at the same speed and can therefore stay next to it? And finally if his propulsion system failed, would he eventually re-enter the atmosphere and presumably die?&#x27;</span>, <span class="hljs-string">&#x27;selftext_urls&#x27;</span>: {<span class="hljs-string">&#x27;url&#x27;</span>: [<span class="hljs-string">&#x27;http://apod.nasa.gov/apod/image/1201/freeflyer_nasa_3000.jpg&#x27;</span>]}, <span class="hljs-string">&#x27;subreddit&#x27;</span>: <span class="hljs-string">&#x27;askscience&#x27;</span>, <span class="hljs-string">&#x27;title&#x27;</span>: <span class="hljs-string">&#x27;Few questions about this space walk photograph.&#x27;</span>, <span class="hljs-string">&#x27;title_urls&#x27;</span>: {<span class="hljs-string">&#x27;url&#x27;</span>: []}}<!-- HTML_TAG_END --></pre></div> <p>Notice <code>text</code> is a subfield nested inside the <code>answers</code> dictionary. When you preprocess the dataset, you will need to extract the <code>text</code> subfield into a separate column.</p> <h2 class="relative group"><a id="preprocess" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#preprocess"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Preprocess </span></h2> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/ma1TrR7gE7I" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>For causal language modeling, load the DistilGPT2 tokenizer to process the <code>text</code> subfield:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>)<!-- HTML_TAG_END --></pre></div> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/8PmhEIXhBvI" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>For masked language modeling, load the DistilRoBERTa tokenizer instead:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilroberta-base&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Extract the <code>text</code> subfield from its nested structure with the <a href="https://huggingface.co/docs/datasets/process.html#flatten" rel="nofollow"><code>flatten</code></a> method:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>eli5 = eli5.flatten() <span class="hljs-meta">&gt;&gt;&gt; </span>eli5[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;answers.a_id&#x27;</span>: [<span class="hljs-string">&#x27;c3d1aib&#x27;</span>, <span class="hljs-string">&#x27;c3d4lya&#x27;</span>], <span class="hljs-string">&#x27;answers.score&#x27;</span>: [<span class="hljs-number">6</span>, <span class="hljs-number">3</span>], <span class="hljs-string">&#x27;answers.text&#x27;</span>: [<span class="hljs-string">&quot;The velocity needed to remain in orbit is equal to the square root of Newton&#x27;s constant times the mass of earth divided by the distance from the center of the earth. I don&#x27;t know the altitude of that specific mission, but they&#x27;re usually around 300 km. That means he&#x27;s going 7-8 km/s.\n\nIn space there are no other forces acting on either the shuttle or the guy, so they stay in the same position relative to each other. If he were to become unable to return to the ship, he would presumably run out of oxygen, or slowly fall into the atmosphere and burn up.&quot;</span>, <span class="hljs-string">&quot;Hope you don&#x27;t mind me asking another question, but why aren&#x27;t there any stars visible in this photo?&quot;</span>], <span class="hljs-string">&#x27;answers_urls.url&#x27;</span>: [], <span class="hljs-string">&#x27;document&#x27;</span>: <span class="hljs-string">&#x27;&#x27;</span>, <span class="hljs-string">&#x27;q_id&#x27;</span>: <span class="hljs-string">&#x27;nyxfp&#x27;</span>, <span class="hljs-string">&#x27;selftext&#x27;</span>: <span class="hljs-string">&#x27;_URL_0_\n\nThis was on the front page earlier and I have a few questions about it. Is it possible to calculate how fast the astronaut would be orbiting the earth? Also how does he stay close to the shuttle so that he can return safely, i.e is he orbiting at the same speed and can therefore stay next to it? And finally if his propulsion system failed, would he eventually re-enter the atmosphere and presumably die?&#x27;</span>, <span class="hljs-string">&#x27;selftext_urls.url&#x27;</span>: [<span class="hljs-string">&#x27;http://apod.nasa.gov/apod/image/1201/freeflyer_nasa_3000.jpg&#x27;</span>], <span class="hljs-string">&#x27;subreddit&#x27;</span>: <span class="hljs-string">&#x27;askscience&#x27;</span>, <span class="hljs-string">&#x27;title&#x27;</span>: <span class="hljs-string">&#x27;Few questions about this space walk photograph.&#x27;</span>, <span class="hljs-string">&#x27;title_urls.url&#x27;</span>: []}<!-- HTML_TAG_END --></pre></div> <p>Each subfield is now a separate column as indicated by the <code>answers</code> prefix. Notice that <code>answers.text</code> is a list. Instead of tokenizing each sentence separately, convert the list to a string to jointly tokenize them.</p> <p>Here is how you can create a preprocessing function to convert the list to a string and truncate sequences to be no longer than DistilGPT2’s maximum input length:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> tokenizer([<span class="hljs-string">&quot; &quot;</span>.join(x) <span class="hljs-keyword">for</span> x <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;answers.text&quot;</span>]], truncation=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <p>Use 🤗 Datasets <a href="https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.map" rel="nofollow">map</a> function to apply the preprocessing function over the entire dataset. You can speed up the <code>map</code> function by setting <code>batched=True</code> to process multiple elements of the dataset at once and increasing the number of processes with <code>num_proc</code>. Remove the columns you don’t need:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_eli5 = eli5.<span class="hljs-built_in">map</span>( <span class="hljs-meta">... </span> preprocess_function, <span class="hljs-meta">... </span> batched=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> num_proc=<span class="hljs-number">4</span>, <span class="hljs-meta">... </span> remove_columns=eli5[<span class="hljs-string">&quot;train&quot;</span>].column_names, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Now you need a second preprocessing function to capture text truncated from any lengthy examples to prevent loss of information. This preprocessing function should:</p> <ul><li>Concatenate all the text.</li> <li>Split the concatenated text into smaller chunks defined by <code>block_size</code>.</li></ul> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>block_size = <span class="hljs-number">128</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">group_texts</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> concatenated_examples = {k: <span class="hljs-built_in">sum</span>(examples[k], []) <span class="hljs-keyword">for</span> k <span class="hljs-keyword">in</span> examples.keys()} <span class="hljs-meta">... </span> total_length = <span class="hljs-built_in">len</span>(concatenated_examples[<span class="hljs-built_in">list</span>(examples.keys())[<span class="hljs-number">0</span>]]) <span class="hljs-meta">... </span> total_length = (total_length // block_size) * block_size <span class="hljs-meta">... </span> result = { <span class="hljs-meta">... </span> k: [t[i : i + block_size] <span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(<span class="hljs-number">0</span>, total_length, block_size)] <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> k, t <span class="hljs-keyword">in</span> concatenated_examples.items() <span class="hljs-meta">... </span> } <span class="hljs-meta">... </span> result[<span class="hljs-string">&quot;labels&quot;</span>] = result[<span class="hljs-string">&quot;input_ids&quot;</span>].copy() <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> result<!-- HTML_TAG_END --></pre></div> <p>Apply the <code>group_texts</code> function over the entire dataset:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>lm_dataset = tokenized_eli5.<span class="hljs-built_in">map</span>(group_texts, batched=<span class="hljs-literal">True</span>, num_proc=<span class="hljs-number">4</span>)<!-- HTML_TAG_END --></pre></div> <p>For causal language modeling, use <a href="/docs/transformers/pr_19429/en/main_classes/data_collator#transformers.DataCollatorForLanguageModeling">DataCollatorForLanguageModeling</a> to create a batch of examples. It will also <em>dynamically pad</em> your text to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the <code>tokenizer</code> function by setting <code>padding=True</code>, dynamic padding is more efficient. </p> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <p>You can use the end of sequence token as the padding token, and set <code>mlm=False</code>. This will use the inputs as labels shifted to the right by one element:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForLanguageModeling <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.pad_token = tokenizer.eos_token <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=<span class="hljs-literal">False</span>)<!-- HTML_TAG_END --></pre></div> <p>For masked language modeling, use the same <a href="/docs/transformers/pr_19429/en/main_classes/data_collator#transformers.DataCollatorForLanguageModeling">DataCollatorForLanguageModeling</a> except you should specify <code>mlm_probability</code> to randomly mask tokens each time you iterate over the data.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForLanguageModeling <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.pad_token = tokenizer.eos_token <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=<span class="hljs-number">0.15</span>)<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <p>You can use the end of sequence token as the padding token, and set <code>mlm=False</code>. This will use the inputs as labels shifted to the right by one element:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForLanguageModeling <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=<span class="hljs-literal">False</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>For masked language modeling, use the same <a href="/docs/transformers/pr_19429/en/main_classes/data_collator#transformers.DataCollatorForLanguageModeling">DataCollatorForLanguageModeling</a> except you should specify <code>mlm_probability</code> to randomly mask tokens each time you iterate over the data.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForLanguageModeling <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=<span class="hljs-literal">False</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <h2 class="relative group"><a id="causal-language-modeling" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#causal-language-modeling"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Causal language modeling </span></h2> <p>Causal language modeling is frequently used for text generation. This section shows you how to fine-tune <a href="https://huggingface.co/distilgpt2" rel="nofollow">DistilGPT2</a> to generate new text.</p> <h3 class="relative group"><a id="train" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#train"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Train </span></h3> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <p>Load DistilGPT2 with <a href="/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoModelForCausalLM">AutoModelForCausalLM</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForCausalLM, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>, take a look at the basic tutorial <a href="../training#finetune-with-trainer">here</a>!</p></div> <p>At this point, only three steps remain:</p> <ol><li>Define your training hyperparameters in <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>.</li> <li>Pass the training arguments to <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> along with the model, datasets, and data collator.</li> <li>Call <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.train">train()</a> to fine-tune your model.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=lm_dataset[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=lm_dataset[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <p>To fine-tune a model in TensorFlow, start by converting your datasets to the <code>tf.data.Dataset</code> format with <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset">prepare_tf_dataset()</a>.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> lm_dataset[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_test_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> lm_dataset[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with Keras, take a look at the basic tutorial <a href="training#finetune-with-keras">here</a>!</p></div> <p>Set up an optimizer function, learning rate, and some training hyperparameters:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer, AdamWeightDecay <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer = AdamWeightDecay(learning_rate=<span class="hljs-number">2e-5</span>, weight_decay_rate=<span class="hljs-number">0.01</span>)<!-- HTML_TAG_END --></pre></div> <p>Load DistilGPT2 with <a href="/docs/transformers/pr_19429/en/model_doc/auto#transformers.TFAutoModelForCausalLM">TFAutoModelForCausalLM</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Configure the model for training with <a href="https://keras.io/api/models/model_training_apis/#compile-method" rel="nofollow"><code>compile</code></a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)<!-- HTML_TAG_END --></pre></div> <p>Call <a href="https://keras.io/api/models/model_training_apis/#fit-method" rel="nofollow"><code>fit</code></a> to fine-tune the model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=<span class="hljs-number">3</span>)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <h2 class="relative group"><a id="masked-language-modeling" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#masked-language-modeling"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Masked language modeling </span></h2> <p>Masked language modeling is also known as a fill-mask task because it predicts a masked token in a sequence. Models for masked language modeling require a good contextual understanding of an entire sequence instead of only the left context. This section shows you how to fine-tune <a href="https://huggingface.co/distilroberta-base" rel="nofollow">DistilRoBERTa</a> to predict a masked word.</p> <h3 class="relative group"><a id="train" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#train"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Train </span></h3> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <p>Load DistilRoBERTa with <code>AutoModelForMaskedlM</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForMaskedLM.from_pretrained(<span class="hljs-string">&quot;distilroberta-base&quot;</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>, take a look at the basic tutorial <a href="../training#finetune-with-trainer">here</a>!</p></div> <p>At this point, only three steps remain:</p> <ol><li>Define your training hyperparameters in <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>.</li> <li>Pass the training arguments to <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> along with the model, datasets, and data collator.</li> <li>Call <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.train">train()</a> to fine-tune your model.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=lm_dataset[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=lm_dataset[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <p>To fine-tune a model in TensorFlow, start by converting your datasets to the <code>tf.data.Dataset</code> format with <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset">prepare_tf_dataset()</a>.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> lm_dataset[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_test_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> lm_dataset[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with Keras, take a look at the basic tutorial <a href="training#finetune-with-keras">here</a>!</p></div> <p>Set up an optimizer function, learning rate, and some training hyperparameters:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer, AdamWeightDecay <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer = AdamWeightDecay(learning_rate=<span class="hljs-number">2e-5</span>, weight_decay_rate=<span class="hljs-number">0.01</span>)<!-- HTML_TAG_END --></pre></div> <p>Load DistilRoBERTa with <a href="/docs/transformers/pr_19429/en/model_doc/auto#transformers.TFAutoModelForMaskedLM">TFAutoModelForMaskedLM</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;distilroberta-base&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Configure the model for training with <a href="https://keras.io/api/models/model_training_apis/#compile-method" rel="nofollow"><code>compile</code></a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)<!-- HTML_TAG_END --></pre></div> <p>Call <a href="https://keras.io/api/models/model_training_apis/#fit-method" rel="nofollow"><code>fit</code></a> to fine-tune the model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=<span class="hljs-number">3</span>)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>For a more in-depth example of how to fine-tune a model for causal language modeling, take a look at the corresponding <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb" rel="nofollow">PyTorch notebook</a> or <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb" rel="nofollow">TensorFlow notebook</a>.</p></div> <script type="module" data-hydrate="1r303t4"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="1r303t4"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/tasks/language_modeling.mdx-hf-doc-builder.js") ], params: {} } }); </script>
59
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/tasks/semantic_segmentation.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;semantic-segmentation&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;load-sceneparse150-dataset&quot;,&quot;title&quot;:&quot;Load SceneParse150 dataset&quot;},{&quot;local&quot;:&quot;preprocess&quot;,&quot;title&quot;:&quot;Preprocess&quot;},{&quot;local&quot;:&quot;train&quot;,&quot;title&quot;:&quot;Train&quot;},{&quot;local&quot;:&quot;inference&quot;,&quot;title&quot;:&quot;Inference&quot;}],&quot;title&quot;:&quot;Semantic segmentation&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/tasks/semantic_segmentation.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Youtube-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/DocNotebookDropdown-hf-doc-builder.js"> <h1 class="relative group"><a id="semantic-segmentation" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#semantic-segmentation"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Semantic segmentation </span></h1> <div class="flex space-x-1 absolute z-10 right-0 top-0"> <div class="relative colab-dropdown "> <button class=" " type="button"> <img alt="Open In Colab" class="!m-0" src="https://colab.research.google.com/assets/colab-badge.svg"> </button> </div> <div class="relative colab-dropdown "> <button class=" " type="button"> <img alt="Open In Studio Lab" class="!m-0" src="https://studiolab.sagemaker.aws/studiolab.svg"> </button> </div></div> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/dKE8SIt9C-w" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Semantic segmentation assigns a label or class to each individual pixel of an image. There are several types of segmentation, and in the case of semantic segmentation, no distinction is made between unique instances of the same object. Both objects are given the same label (for example, “car” instead of “car-1” and “car-2”). Common real-world applications of semantic segmentation include training self-driving cars to identify pedestrians and important traffic information, identifying cells and abnormalities in medical imagery, and monitoring environmental changes from satellite imagery.</p> <p>This guide will show you how to finetune <a href="https://huggingface.co/docs/transformers/main/en/model_doc/segformer#segformer" rel="nofollow">SegFormer</a> on the <a href="https://huggingface.co/datasets/scene_parse_150" rel="nofollow">SceneParse150</a> dataset.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>See the image segmentation <a href="https://huggingface.co/tasks/image-segmentation" rel="nofollow">task page</a> for more information about its associated models, datasets, and metrics.</p></div> <p>Before you begin, make sure you have all the necessary libraries installed:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install -q datasets transformers evaluate<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="load-sceneparse150-dataset" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#load-sceneparse150-dataset"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Load SceneParse150 dataset </span></h2> <p>Load the first 50 examples of the SceneParse150 dataset from the 🤗 Datasets library so you can quickly train and test a model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>ds = load_dataset(<span class="hljs-string">&quot;scene_parse_150&quot;</span>, split=<span class="hljs-string">&quot;train[:50]&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Split this dataset into a train and test set:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>ds = ds.train_test_split(test_size=<span class="hljs-number">0.2</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>train_ds = ds[<span class="hljs-string">&quot;train&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>test_ds = ds[<span class="hljs-string">&quot;test&quot;</span>]<!-- HTML_TAG_END --></pre></div> <p>Then take a look at an example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>train_ds[<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;image&#x27;</span>: &lt;PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=512x683 at <span class="hljs-number">0x7F9B0C201F90</span>&gt;, <span class="hljs-string">&#x27;annotation&#x27;</span>: &lt;PIL.PngImagePlugin.PngImageFile image mode=L size=512x683 at <span class="hljs-number">0x7F9B0C201DD0</span>&gt;, <span class="hljs-string">&#x27;scene_category&#x27;</span>: <span class="hljs-number">368</span>}<!-- HTML_TAG_END --></pre></div> <p>There is an <code>image</code>, an <code>annotation</code> (this is the segmentation map or label), and a <code>scene_category</code> field that describes the image scene, like “kitchen” or “office”. In this guide, you’ll only need <code>image</code> and <code>annotation</code>, both of which are PIL images.</p> <p>You’ll also want to create a dictionary that maps a label id to a label class which will be useful when you set up the model later. Download the mappings from the Hub and create the <code>id2label</code> and <code>label2id</code> dictionaries:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> json <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> huggingface_hub <span class="hljs-keyword">import</span> cached_download, hf_hub_url <span class="hljs-meta">&gt;&gt;&gt; </span>repo_id = <span class="hljs-string">&quot;huggingface/label-files&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>filename = <span class="hljs-string">&quot;ade20k-hf-doc-builder.json&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>id2label = json.load(<span class="hljs-built_in">open</span>(cached_download(hf_hub_url(repo_id, filename, repo_type=<span class="hljs-string">&quot;dataset&quot;</span>)), <span class="hljs-string">&quot;r&quot;</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>id2label = {<span class="hljs-built_in">int</span>(k): v <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> id2label.items()} <span class="hljs-meta">&gt;&gt;&gt; </span>label2id = {v: k <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> id2label.items()} <span class="hljs-meta">&gt;&gt;&gt; </span>num_labels = <span class="hljs-built_in">len</span>(id2label)<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="preprocess" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#preprocess"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Preprocess </span></h2> <p>Next, load a SegFormer feature extractor to prepare the images and annotations for the model. Some datasets, like this one, use the zero-index as the background class. However, the background class isn’t included in the 150 classes, so you’ll need to set <code>reduce_labels=True</code> to subtract one from all the labels. The zero-index is replaced by <code>255</code> so it’s ignored by SegFormer’s loss function:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoFeatureExtractor <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = AutoFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;nvidia/mit-b0&quot;</span>, reduce_labels=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <p>It is common to apply some data augmentations to an image dataset to make a model more robust against overfitting. In this guide, you’ll use the <a href="https://pytorch.org/vision/stable/generated/torchvision.transforms.ColorJitter.html" rel="nofollow"><code>ColorJitter</code></a> function from <a href="https://pytorch.org/vision/stable/index.html" rel="nofollow">torchvision</a> to randomly change the color properties of an image:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> torchvision.transforms <span class="hljs-keyword">import</span> ColorJitter <span class="hljs-meta">&gt;&gt;&gt; </span>jitter = ColorJitter(brightness=<span class="hljs-number">0.25</span>, contrast=<span class="hljs-number">0.25</span>, saturation=<span class="hljs-number">0.25</span>, hue=<span class="hljs-number">0.1</span>)<!-- HTML_TAG_END --></pre></div> <p>Now create two preprocessing functions to prepare the images and annotations for the model. These functions convert the images into <code>pixel_values</code> and annotations to <code>labels</code>. For the training set, <code>jitter</code> is applied before providing the images to the feature extractor. For the test set, the feature extractor crops and normalizes the <code>images</code>, and only crops the <code>labels</code> because no data augmentation is applied during testing.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">train_transforms</span>(<span class="hljs-params">example_batch</span>): <span class="hljs-meta">... </span> images = [jitter(x) <span class="hljs-keyword">for</span> x <span class="hljs-keyword">in</span> example_batch[<span class="hljs-string">&quot;image&quot;</span>]] <span class="hljs-meta">... </span> labels = [x <span class="hljs-keyword">for</span> x <span class="hljs-keyword">in</span> example_batch[<span class="hljs-string">&quot;annotation&quot;</span>]] <span class="hljs-meta">... </span> inputs = feature_extractor(images, labels) <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> inputs <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">val_transforms</span>(<span class="hljs-params">example_batch</span>): <span class="hljs-meta">... </span> images = [x <span class="hljs-keyword">for</span> x <span class="hljs-keyword">in</span> example_batch[<span class="hljs-string">&quot;image&quot;</span>]] <span class="hljs-meta">... </span> labels = [x <span class="hljs-keyword">for</span> x <span class="hljs-keyword">in</span> example_batch[<span class="hljs-string">&quot;annotation&quot;</span>]] <span class="hljs-meta">... </span> inputs = feature_extractor(images, labels) <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> inputs<!-- HTML_TAG_END --></pre></div> <p>To apply the <code>jitter</code> over the entire dataset, use the 🤗 Datasets <a href="https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.set_transform" rel="nofollow">set_transform</a> function. The transform is applied on the fly which is faster and consumes less disk space:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>train_ds.set_transform(train_transforms) <span class="hljs-meta">&gt;&gt;&gt; </span>test_ds.set_transform(val_transforms)<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="train" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#train"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Train </span></h2> <p>Load SegFormer with <a href="/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoModelForSemanticSegmentation">AutoModelForSemanticSegmentation</a>, and pass the model the mapping between label ids and label classes:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSemanticSegmentation <span class="hljs-meta">&gt;&gt;&gt; </span>pretrained_model_name = <span class="hljs-string">&quot;nvidia/mit-b0&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSemanticSegmentation.from_pretrained( <span class="hljs-meta">... </span> pretrained_model_name, id2label=id2label, label2id=label2id <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with finetuning a model with the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>, take a look at the basic tutorial <a href="../training#finetune-with-trainer">here</a>!</p></div> <p>Define your training hyperparameters in <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>. It is important not to remove unused columns because this will drop the <code>image</code> column. Without the <code>image</code> column, you can’t create <code>pixel_values</code>. Set <code>remove_unused_columns=False</code> to prevent this behavior!</p> <p>To save and push a model under your namespace to the Hub, set <code>push_to_hub=True</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TrainingArguments <span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;segformer-b0-scene-parse-150&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">6e-5</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">50</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">2</span>, <span class="hljs-meta">... </span> per_device_eval_batch_size=<span class="hljs-number">2</span>, <span class="hljs-meta">... </span> save_total_limit=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;steps&quot;</span>, <span class="hljs-meta">... </span> save_strategy=<span class="hljs-string">&quot;steps&quot;</span>, <span class="hljs-meta">... </span> save_steps=<span class="hljs-number">20</span>, <span class="hljs-meta">... </span> eval_steps=<span class="hljs-number">20</span>, <span class="hljs-meta">... </span> logging_steps=<span class="hljs-number">1</span>, <span class="hljs-meta">... </span> eval_accumulation_steps=<span class="hljs-number">5</span>, <span class="hljs-meta">... </span> remove_unused_columns=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> push_to_hub=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>To evaluate model performance during training, you’ll need to create a function to compute and report metrics. For semantic segmentation, you’ll typically compute the <a href="https://huggingface.co/spaces/evaluate-metric/mean_iou" rel="nofollow">mean Intersection over Union</a> (IoU). The mean IoU measures the overlapping area between the predicted and ground truth segmentation maps. </p> <p>Load the mean IoU from the 🤗 Evaluate library:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> evaluate <span class="hljs-meta">&gt;&gt;&gt; </span>metric = evaluate.load(<span class="hljs-string">&quot;mean_iou&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Then create a function to <a href="https://huggingface.co/docs/evaluate/main/en/package_reference/main_classes#evaluate.EvaluationModule.compute" rel="nofollow">compute</a> the metrics. Your predictions need to be converted to logits first, and then reshaped to match the size of the labels before you can call <a href="https://huggingface.co/docs/evaluate/main/en/package_reference/main_classes#evaluate.EvaluationModule.compute" rel="nofollow">compute</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">compute_metrics</span>(<span class="hljs-params">eval_pred</span>): <span class="hljs-meta">... </span> <span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> logits, labels = eval_pred <span class="hljs-meta">... </span> logits_tensor = torch.from_numpy(logits) <span class="hljs-meta">... </span> logits_tensor = nn.functional.interpolate( <span class="hljs-meta">... </span> logits_tensor, <span class="hljs-meta">... </span> size=labels.shape[-<span class="hljs-number">2</span>:], <span class="hljs-meta">... </span> mode=<span class="hljs-string">&quot;bilinear&quot;</span>, <span class="hljs-meta">... </span> align_corners=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> ).argmax(dim=<span class="hljs-number">1</span>) <span class="hljs-meta">... </span> pred_labels = logits_tensor.detach().cpu().numpy() <span class="hljs-meta">... </span> metrics = metric.compute( <span class="hljs-meta">... </span> predictions=pred_labels, <span class="hljs-meta">... </span> references=labels, <span class="hljs-meta">... </span> num_labels=num_labels, <span class="hljs-meta">... </span> ignore_index=<span class="hljs-number">255</span>, <span class="hljs-meta">... </span> reduce_labels=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> key, value <span class="hljs-keyword">in</span> metrics.items(): <span class="hljs-meta">... </span> <span class="hljs-keyword">if</span> <span class="hljs-built_in">type</span>(value) <span class="hljs-keyword">is</span> np.ndarray: <span class="hljs-meta">... </span> metrics[key] = value.tolist() <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> metrics<!-- HTML_TAG_END --></pre></div> <p>Pass your model, training arguments, datasets, and metrics function to the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=train_ds, <span class="hljs-meta">... </span> eval_dataset=test_ds, <span class="hljs-meta">... </span> compute_metrics=compute_metrics, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Lastly, call <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.train">train()</a> to finetune your model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="inference" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#inference"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Inference </span></h2> <p>Great, now that you’ve finetuned a model, you can use it for inference!</p> <p>Load an image for inference:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>image = ds[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;image&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>image<!-- HTML_TAG_END --></pre></div> <div class="flex justify-center"><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/semantic-seg-image.png" alt="Image of bedroom"></div> <p>Process the image with a feature extractor and place the <code>pixel_values</code> on a GPU:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>device = torch.device(<span class="hljs-string">&quot;cuda&quot;</span> <span class="hljs-keyword">if</span> torch.cuda.is_available() <span class="hljs-keyword">else</span> <span class="hljs-string">&quot;cpu&quot;</span>) <span class="hljs-comment"># use GPU if available, otherwise use a CPU</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoding = feature_extractor(image, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pixel_values = encoding.pixel_values.to(device)<!-- HTML_TAG_END --></pre></div> <p>Pass your input to the model and return the <code>logits</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(pixel_values=pixel_values) <span class="hljs-meta">&gt;&gt;&gt; </span>logits = outputs.logits.cpu()<!-- HTML_TAG_END --></pre></div> <p>Next, rescale the logits to the original image size:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>upsampled_logits = nn.functional.interpolate( <span class="hljs-meta">... </span> logits, <span class="hljs-meta">... </span> size=image.size[::-<span class="hljs-number">1</span>], <span class="hljs-meta">... </span> mode=<span class="hljs-string">&quot;bilinear&quot;</span>, <span class="hljs-meta">... </span> align_corners=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pred_seg = upsampled_logits.argmax(dim=<span class="hljs-number">1</span>)[<span class="hljs-number">0</span>]<!-- HTML_TAG_END --></pre></div> <p>To visualize the results, load the <a href="https://github.com/tensorflow/models/blob/3f1ca33afe3c1631b733ea7e40c294273b9e406d/research/deeplab/utils/get_dataset_colormap.py#L51" rel="nofollow">dataset color palette</a> that maps each class to their RGB values. Then you can combine and plot your image and the predicted segmentation map:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> matplotlib.pyplot <span class="hljs-keyword">as</span> plt <span class="hljs-meta">&gt;&gt;&gt; </span>color_seg = np.zeros((pred_seg.shape[<span class="hljs-number">0</span>], pred_seg.shape[<span class="hljs-number">1</span>], <span class="hljs-number">3</span>), dtype=np.uint8) <span class="hljs-meta">&gt;&gt;&gt; </span>palette = np.array(ade_palette()) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> label, color <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(palette): <span class="hljs-meta">... </span> color_seg[pred_seg == label, :] = color <span class="hljs-meta">&gt;&gt;&gt; </span>color_seg = color_seg[..., ::-<span class="hljs-number">1</span>] <span class="hljs-comment"># convert to BGR</span> <span class="hljs-meta">&gt;&gt;&gt; </span>img = np.array(image) * <span class="hljs-number">0.5</span> + color_seg * <span class="hljs-number">0.5</span> <span class="hljs-comment"># plot the image with the segmentation map</span> <span class="hljs-meta">&gt;&gt;&gt; </span>img = img.astype(np.uint8) <span class="hljs-meta">&gt;&gt;&gt; </span>plt.figure(figsize=(<span class="hljs-number">15</span>, <span class="hljs-number">10</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>plt.imshow(img) <span class="hljs-meta">&gt;&gt;&gt; </span>plt.show()<!-- HTML_TAG_END --></pre></div> <div class="flex justify-center"><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/semantic-seg-preds.png" alt="Image of bedroom overlayed with segmentation map"></div> <script type="module" data-hydrate="1hjkmte"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="1hjkmte"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/tasks/semantic_segmentation.mdx-hf-doc-builder.js") ], params: {} } }); </script>
60
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/tasks/sequence_classification.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;text-classification&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;load-imdb-dataset&quot;,&quot;title&quot;:&quot;Load IMDb dataset&quot;},{&quot;local&quot;:&quot;preprocess&quot;,&quot;title&quot;:&quot;Preprocess&quot;},{&quot;local&quot;:&quot;train&quot;,&quot;title&quot;:&quot;Train&quot;}],&quot;title&quot;:&quot;Text classification&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/tasks/sequence_classification.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Youtube-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Markdown-hf-doc-builder.js"> <h1 class="relative group"><a id="text-classification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#text-classification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Text classification </span></h1> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/leNG9fN9FQU" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Text classification is a common NLP task that assigns a label or class to text. There are many practical applications of text classification widely used in production by some of today’s largest companies. One of the most popular forms of text classification is sentiment analysis, which assigns a label like positive, negative, or neutral to a sequence of text. </p> <p>This guide will show you how to fine-tune <a href="https://huggingface.co/distilbert-base-uncased" rel="nofollow">DistilBERT</a> on the <a href="https://huggingface.co/datasets/imdb" rel="nofollow">IMDb</a> dataset to determine whether a movie review is positive or negative.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>See the text classification <a href="https://huggingface.co/tasks/text-classification" rel="nofollow">task page</a> for more information about other forms of text classification and their associated models, datasets, and metrics.</p></div> <h2 class="relative group"><a id="load-imdb-dataset" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#load-imdb-dataset"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Load IMDb dataset </span></h2> <p>Load the IMDb dataset from the 🤗 Datasets library:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>imdb = load_dataset(<span class="hljs-string">&quot;imdb&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Then take a look at an example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>imdb[<span class="hljs-string">&quot;test&quot;</span>][<span class="hljs-number">0</span>] { <span class="hljs-string">&quot;label&quot;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&quot;text&quot;</span>: <span class="hljs-string">&quot;I love sci-fi and am willing to put up with a lot. Sci-fi movies/TV are usually underfunded, under-appreciated and misunderstood. I tried to like this, I really did, but it is to good TV sci-fi as Babylon 5 is to Star Trek (the original). Silly prosthetics, cheap cardboard sets, stilted dialogues, CG that doesn&#x27;t match the background, and painfully one-dimensional characters cannot be overcome with a &#x27;sci-fi&#x27; setting. (I&#x27;m sure there are those of you out there who think Babylon 5 is good sci-fi TV. It&#x27;s not. It&#x27;s clichéd and uninspiring.) While US viewers might like emotion and character development, sci-fi is a genre that does not take itself seriously (cf. Star Trek). It may treat important issues, yet not as a serious philosophy. It&#x27;s really difficult to care about the characters here as they are not simply foolish, just missing a spark of life. Their actions and reactions are wooden and predictable, often painful to watch. The makers of Earth KNOW it&#x27;s rubbish as they have to always say \&quot;Gene Roddenberry&#x27;s Earth...\&quot; otherwise people would not continue watching. Roddenberry&#x27;s ashes must be turning in their orbit as this dull, cheap, poorly edited (watching it without advert breaks really brings this home) trudging Trabant of a show lumbers into space. Spoiler. So, kill off a main character. And then bring him back as another actor. Jeeez! Dallas all over again.&quot;</span>, }<!-- HTML_TAG_END --></pre></div> <p>There are two fields in this dataset: </p> <ul><li><code>text</code>: a string containing the text of the movie review.</li> <li><code>label</code>: a value that can either be <code>0</code> for a negative review or <code>1</code> for a positive review.</li></ul> <h2 class="relative group"><a id="preprocess" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#preprocess"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Preprocess </span></h2> <p>Load the DistilBERT tokenizer to process the <code>text</code> field:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Create a preprocessing function to tokenize <code>text</code> and truncate sequences to be no longer than DistilBERT’s maximum input length:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> tokenizer(examples[<span class="hljs-string">&quot;text&quot;</span>], truncation=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <p>Use 🤗 Datasets <a href="https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.map" rel="nofollow">map</a> function to apply the preprocessing function over the entire dataset. You can speed up the <code>map</code> function by setting <code>batched=True</code> to process multiple elements of the dataset at once:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->tokenized_imdb = imdb.<span class="hljs-built_in">map</span>(preprocess_function, batched=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <p>Use <a href="/docs/transformers/pr_19429/en/main_classes/data_collator#transformers.DataCollatorWithPadding">DataCollatorWithPadding</a> to create a batch of examples. It will also <em>dynamically pad</em> your text to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the <code>tokenizer</code> function by setting <code>padding=True</code>, dynamic padding is more efficient.</p> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorWithPadding <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorWithPadding(tokenizer=tokenizer)<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorWithPadding <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorWithPadding(tokenizer=tokenizer, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <h2 class="relative group"><a id="train" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#train"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Train </span></h2> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <p>Load DistilBERT with <a href="/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoModelForSequenceClassification">AutoModelForSequenceClassification</a> along with the number of expected labels:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSequenceClassification, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, num_labels=<span class="hljs-number">2</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>, take a look at the basic tutorial <a href="../training#finetune-with-trainer">here</a>!</p></div> <p>At this point, only three steps remain:</p> <ol><li>Define your training hyperparameters in <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>.</li> <li>Pass the training arguments to <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> along with the model, dataset, tokenizer, and data collator.</li> <li>Call <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.train">train()</a> to fine-tune your model.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> per_device_eval_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">5</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=tokenized_imdb[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=tokenized_imdb[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> tokenizer=tokenizer, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p><a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> will apply dynamic padding by default when you pass <code>tokenizer</code> to it. In this case, you don’t need to specify a data collator explicitly.</p></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <p>To fine-tune a model in TensorFlow, start by converting your datasets to the <code>tf.data.Dataset</code> format with <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset">prepare_tf_dataset()</a>.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> tokenized_imdb[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_validation_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> tokenized_imdb[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with Keras, take a look at the basic tutorial <a href="training#finetune-with-keras">here</a>!</p></div> <p>Set up an optimizer function, learning rate schedule, and some training hyperparameters:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>batch_size = <span class="hljs-number">16</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_epochs = <span class="hljs-number">5</span> <span class="hljs-meta">&gt;&gt;&gt; </span>batches_per_epoch = <span class="hljs-built_in">len</span>(tokenized_imdb[<span class="hljs-string">&quot;train&quot;</span>]) // batch_size <span class="hljs-meta">&gt;&gt;&gt; </span>total_train_steps = <span class="hljs-built_in">int</span>(batches_per_epoch * num_epochs) <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer, schedule = create_optimizer(init_lr=<span class="hljs-number">2e-5</span>, num_warmup_steps=<span class="hljs-number">0</span>, num_train_steps=total_train_steps)<!-- HTML_TAG_END --></pre></div> <p>Load DistilBERT with <a href="/docs/transformers/pr_19429/en/model_doc/auto#transformers.TFAutoModelForSequenceClassification">TFAutoModelForSequenceClassification</a> along with the number of expected labels:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, num_labels=<span class="hljs-number">2</span>)<!-- HTML_TAG_END --></pre></div> <p>Configure the model for training with <a href="https://keras.io/api/models/model_training_apis/#compile-method" rel="nofollow"><code>compile</code></a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)<!-- HTML_TAG_END --></pre></div> <p>Call <a href="https://keras.io/api/models/model_training_apis/#fit-method" rel="nofollow"><code>fit</code></a> to fine-tune the model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=<span class="hljs-number">3</span>)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>For a more in-depth example of how to fine-tune a model for text classification, take a look at the corresponding <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification.ipynb" rel="nofollow">PyTorch notebook</a> or <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb" rel="nofollow">TensorFlow notebook</a>.</p></div> <script type="module" data-hydrate="thq86"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="thq86"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/tasks/sequence_classification.mdx-hf-doc-builder.js") ], params: {} } }); </script>
61
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/tasks/translation.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;translation&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;load-opus-books-dataset&quot;,&quot;title&quot;:&quot;Load OPUS Books dataset&quot;},{&quot;local&quot;:&quot;preprocess&quot;,&quot;title&quot;:&quot;Preprocess&quot;},{&quot;local&quot;:&quot;train&quot;,&quot;title&quot;:&quot;Train&quot;}],&quot;title&quot;:&quot;Translation&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/tasks/translation.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Youtube-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Markdown-hf-doc-builder.js"> <h1 class="relative group"><a id="translation" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#translation"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Translation </span></h1> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/1JvfrvZgi6c" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Translation converts a sequence of text from one language to another. It is one of several tasks you can formulate as a sequence-to-sequence problem, a powerful framework that extends to vision and audio tasks. </p> <p>This guide will show you how to fine-tune <a href="https://huggingface.co/t5-small" rel="nofollow">T5</a> on the English-French subset of the <a href="https://huggingface.co/datasets/opus_books" rel="nofollow">OPUS Books</a> dataset to translate English text to French.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>See the translation <a href="https://huggingface.co/tasks/translation" rel="nofollow">task page</a> for more information about its associated models, datasets, and metrics.</p></div> <h2 class="relative group"><a id="load-opus-books-dataset" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#load-opus-books-dataset"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Load OPUS Books dataset </span></h2> <p>Load the OPUS Books dataset from the 🤗 Datasets library:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>books = load_dataset(<span class="hljs-string">&quot;opus_books&quot;</span>, <span class="hljs-string">&quot;en-fr&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Split this dataset into a train and test set:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->books = books[<span class="hljs-string">&quot;train&quot;</span>].train_test_split(test_size=<span class="hljs-number">0.2</span>)<!-- HTML_TAG_END --></pre></div> <p>Then take a look at an example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>books[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;id&#x27;</span>: <span class="hljs-string">&#x27;90560&#x27;</span>, <span class="hljs-string">&#x27;translation&#x27;</span>: {<span class="hljs-string">&#x27;en&#x27;</span>: <span class="hljs-string">&#x27;But this lofty plateau measured only a few fathoms, and soon we reentered Our Element.&#x27;</span>, <span class="hljs-string">&#x27;fr&#x27;</span>: <span class="hljs-string">&#x27;Mais ce plateau élevé ne mesurait que quelques toises, et bientôt nous fûmes rentrés dans notre élément.&#x27;</span>}}<!-- HTML_TAG_END --></pre></div> <p>The <code>translation</code> field is a dictionary containing the English and French translations of the text.</p> <h2 class="relative group"><a id="preprocess" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#preprocess"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Preprocess </span></h2> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/XAR8jnZZuUs" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Load the T5 tokenizer to process the language pairs:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>The preprocessing function needs to:</p> <ol><li>Prefix the input with a prompt so T5 knows this is a translation task. Some models capable of multiple NLP tasks require prompting for specific tasks.</li> <li>Tokenize the input (English) and target (French) separately. You can’t tokenize French text with a tokenizer pretrained on an English vocabulary. A context manager will help set the tokenizer to French first before tokenizing it.</li> <li>Truncate sequences to be no longer than the maximum length set by the <code>max_length</code> parameter.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>source_lang = <span class="hljs-string">&quot;en&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>target_lang = <span class="hljs-string">&quot;fr&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>prefix = <span class="hljs-string">&quot;translate English to French: &quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> inputs = [prefix + example[source_lang] <span class="hljs-keyword">for</span> example <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;translation&quot;</span>]] <span class="hljs-meta">... </span> targets = [example[target_lang] <span class="hljs-keyword">for</span> example <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;translation&quot;</span>]] <span class="hljs-meta">... </span> model_inputs = tokenizer(inputs, text_target=targets, max_length=<span class="hljs-number">128</span>, truncation=<span class="hljs-literal">True</span>) <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> model_inputs<!-- HTML_TAG_END --></pre></div> <p>Use 🤗 Datasets <a href="https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.map" rel="nofollow">map</a> function to apply the preprocessing function over the entire dataset. You can speed up the <code>map</code> function by setting <code>batched=True</code> to process multiple elements of the dataset at once:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_books = books.<span class="hljs-built_in">map</span>(preprocess_function, batched=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <p>Load T5 with <a href="/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoModelForSeq2SeqLM">AutoModelForSeq2SeqLM</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSeq2SeqLM <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>)<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <p>Load T5 with <a href="/docs/transformers/pr_19429/en/model_doc/auto#transformers.TFAutoModelForSeq2SeqLM">TFAutoModelForSeq2SeqLM</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForSeq2SeqLM <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <p>Use <a href="/docs/transformers/pr_19429/en/main_classes/data_collator#transformers.DataCollatorForSeq2Seq">DataCollatorForSeq2Seq</a> to create a batch of examples. It will also <em>dynamically pad</em> your text and labels to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the <code>tokenizer</code> function by setting <code>padding=True</code>, dynamic padding is more efficient.</p> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForSeq2Seq <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model)<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForSeq2Seq <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <h2 class="relative group"><a id="train" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#train"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Train </span></h2> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>, take a look at the basic tutorial <a href="../training#finetune-with-trainer">here</a>!</p></div> <p>At this point, only three steps remain:</p> <ol><li>Define your training hyperparameters in <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Seq2SeqTrainingArguments">Seq2SeqTrainingArguments</a>.</li> <li>Pass the training arguments to <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Seq2SeqTrainer">Seq2SeqTrainer</a> along with the model, dataset, tokenizer, and data collator.</li> <li>Call <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.train">train()</a> to fine-tune your model.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Seq2SeqTrainingArguments, Seq2SeqTrainer <span class="hljs-meta">&gt;&gt;&gt; </span>training_args = Seq2SeqTrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> per_device_eval_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span> save_total_limit=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">1</span>, <span class="hljs-meta">... </span> fp16=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Seq2SeqTrainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=tokenized_books[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=tokenized_books[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> tokenizer=tokenizer, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <p>To fine-tune a model in TensorFlow, start by converting your datasets to the <code>tf.data.Dataset</code> format with <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset">prepare_tf_dataset()</a>.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> tokenized_books[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_test_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> tokenized_books[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with Keras, take a look at the basic tutorial <a href="training#finetune-with-keras">here</a>!</p></div> <p>Set up an optimizer function, learning rate schedule, and some training hyperparameters:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer, AdamWeightDecay <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer = AdamWeightDecay(learning_rate=<span class="hljs-number">2e-5</span>, weight_decay_rate=<span class="hljs-number">0.01</span>)<!-- HTML_TAG_END --></pre></div> <p>Configure the model for training with <a href="https://keras.io/api/models/model_training_apis/#compile-method" rel="nofollow"><code>compile</code></a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)<!-- HTML_TAG_END --></pre></div> <p>Call <a href="https://keras.io/api/models/model_training_apis/#fit-method" rel="nofollow"><code>fit</code></a> to fine-tune the model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(tf_train_set, validation_data=tf_test_set, epochs=<span class="hljs-number">3</span>)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>For a more in-depth example of how to fine-tune a model for translation, take a look at the corresponding <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/translation.ipynb" rel="nofollow">PyTorch notebook</a> or <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/translation-tf.ipynb" rel="nofollow">TensorFlow notebook</a>.</p></div> <script type="module" data-hydrate="1b5muk3"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="1b5muk3"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/tasks/translation.mdx-hf-doc-builder.js") ], params: {} } }); </script>
62
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/tasks/audio_classification.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;audio-classification&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;load-minds14-dataset&quot;,&quot;title&quot;:&quot;Load MInDS-14 dataset&quot;},{&quot;local&quot;:&quot;preprocess&quot;,&quot;title&quot;:&quot;Preprocess&quot;},{&quot;local&quot;:&quot;train&quot;,&quot;title&quot;:&quot;Train&quot;}],&quot;title&quot;:&quot;Audio classification&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/tasks/audio_classification.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Youtube-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Markdown-hf-doc-builder.js"> <h1 class="relative group"><a id="audio-classification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#audio-classification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Audio classification </span></h1> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/KWwzcmG98Ds" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Audio classification assigns a label or class to audio data. It is similar to text classification, except an audio input is continuous and must be discretized, whereas text can be split into tokens. Some practical applications of audio classification include identifying intent, speakers, and even animal species by their sounds.</p> <p>This guide will show you how to fine-tune <a href="https://huggingface.co/facebook/wav2vec2-base" rel="nofollow">Wav2Vec2</a> on the <a href="https://huggingface.co/datasets/PolyAI/minds14" rel="nofollow">MInDS-14</a> to classify intent.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>See the audio classification <a href="https://huggingface.co/tasks/audio-classification" rel="nofollow">task page</a> for more information about its associated models, datasets, and metrics.</p></div> <h2 class="relative group"><a id="load-minds14-dataset" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#load-minds14-dataset"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Load MInDS-14 dataset </span></h2> <p>Load the <a href="https://huggingface.co/datasets/PolyAI/minds14" rel="nofollow">MInDS-14</a> from the 🤗 Datasets library:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset, Audio <span class="hljs-meta">&gt;&gt;&gt; </span>minds = load_dataset(<span class="hljs-string">&quot;PolyAI/minds14&quot;</span>, name=<span class="hljs-string">&quot;en-US&quot;</span>, split=<span class="hljs-string">&quot;train&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Split this dataset into a train and test set:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>minds = minds.train_test_split(test_size=<span class="hljs-number">0.2</span>)<!-- HTML_TAG_END --></pre></div> <p>Then take a look at the dataset:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>minds DatasetDict({ train: Dataset({ features: [<span class="hljs-string">&#x27;path&#x27;</span>, <span class="hljs-string">&#x27;audio&#x27;</span>, <span class="hljs-string">&#x27;transcription&#x27;</span>, <span class="hljs-string">&#x27;english_transcription&#x27;</span>, <span class="hljs-string">&#x27;intent_class&#x27;</span>, <span class="hljs-string">&#x27;lang_id&#x27;</span>], num_rows: <span class="hljs-number">450</span> }) test: Dataset({ features: [<span class="hljs-string">&#x27;path&#x27;</span>, <span class="hljs-string">&#x27;audio&#x27;</span>, <span class="hljs-string">&#x27;transcription&#x27;</span>, <span class="hljs-string">&#x27;english_transcription&#x27;</span>, <span class="hljs-string">&#x27;intent_class&#x27;</span>, <span class="hljs-string">&#x27;lang_id&#x27;</span>], num_rows: <span class="hljs-number">113</span> }) })<!-- HTML_TAG_END --></pre></div> <p>While the dataset contains a lot of other useful information, like <code>lang_id</code> and <code>english_transcription</code>, you will focus on the <code>audio</code> and <code>intent_class</code> in this guide. Remove the other columns:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>minds = minds.remove_columns([<span class="hljs-string">&quot;path&quot;</span>, <span class="hljs-string">&quot;transcription&quot;</span>, <span class="hljs-string">&quot;english_transcription&quot;</span>, <span class="hljs-string">&quot;lang_id&quot;</span>])<!-- HTML_TAG_END --></pre></div> <p>Take a look at an example now:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>minds[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;audio&#x27;</span>: {<span class="hljs-string">&#x27;array&#x27;</span>: array([ <span class="hljs-number">0.</span> , <span class="hljs-number">0.</span> , <span class="hljs-number">0.</span> , ..., -<span class="hljs-number">0.00048828</span>, -<span class="hljs-number">0.00024414</span>, -<span class="hljs-number">0.00024414</span>], dtype=float32), <span class="hljs-string">&#x27;path&#x27;</span>: <span class="hljs-string">&#x27;/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602b9a5fbb1e6d0fbce91f52.wav&#x27;</span>, <span class="hljs-string">&#x27;sampling_rate&#x27;</span>: <span class="hljs-number">8000</span>}, <span class="hljs-string">&#x27;intent_class&#x27;</span>: <span class="hljs-number">2</span>}<!-- HTML_TAG_END --></pre></div> <p>The <code>audio</code> column contains a 1-dimensional <code>array</code> of the speech signal that must be called to load and resample the audio file. The <code>intent_class</code> column is an integer that represents the class id of intent. Create a dictionary that maps a label name to an integer and vice versa. The mapping will help the model recover the label name from the label number:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>labels = minds[<span class="hljs-string">&quot;train&quot;</span>].features[<span class="hljs-string">&quot;intent_class&quot;</span>].names <span class="hljs-meta">&gt;&gt;&gt; </span>label2id, id2label = <span class="hljs-built_in">dict</span>(), <span class="hljs-built_in">dict</span>() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> i, label <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(labels): <span class="hljs-meta">... </span> label2id[label] = <span class="hljs-built_in">str</span>(i) <span class="hljs-meta">... </span> id2label[<span class="hljs-built_in">str</span>(i)] = label<!-- HTML_TAG_END --></pre></div> <p>Now you can convert the label number to a label name for more information:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>id2label[<span class="hljs-built_in">str</span>(<span class="hljs-number">2</span>)] <span class="hljs-string">&#x27;app_error&#x27;</span><!-- HTML_TAG_END --></pre></div> <p>Each keyword - or label - corresponds to a number; <code>2</code> indicates <code>app_error</code> in the example above.</p> <h2 class="relative group"><a id="preprocess" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#preprocess"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Preprocess </span></h2> <p>Load the Wav2Vec2 feature extractor to process the audio signal:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoFeatureExtractor <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = AutoFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-base&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>The <a href="https://huggingface.co/datasets/PolyAI/minds14" rel="nofollow">MInDS-14</a> dataset has a sampling rate of 8000khz. You will need to resample the dataset to use the pretrained Wav2Vec2 model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>minds = minds.cast_column(<span class="hljs-string">&quot;audio&quot;</span>, Audio(sampling_rate=<span class="hljs-number">16_000</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>minds[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;audio&#x27;</span>: {<span class="hljs-string">&#x27;array&#x27;</span>: array([ <span class="hljs-number">2.2098757e-05</span>, <span class="hljs-number">4.6582241e-05</span>, -<span class="hljs-number">2.2803260e-05</span>, ..., -<span class="hljs-number">2.8419291e-04</span>, -<span class="hljs-number">2.3305941e-04</span>, -<span class="hljs-number">1.1425107e-04</span>], dtype=float32), <span class="hljs-string">&#x27;path&#x27;</span>: <span class="hljs-string">&#x27;/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602b9a5fbb1e6d0fbce91f52.wav&#x27;</span>, <span class="hljs-string">&#x27;sampling_rate&#x27;</span>: <span class="hljs-number">16000</span>}, <span class="hljs-string">&#x27;intent_class&#x27;</span>: <span class="hljs-number">2</span>}<!-- HTML_TAG_END --></pre></div> <p>The preprocessing function needs to:</p> <ol><li>Call the <code>audio</code> column to load and if necessary resample the audio file.</li> <li>Check the sampling rate of the audio file matches the sampling rate of the audio data a model was pretrained with. You can find this information on the Wav2Vec2 <a href="https://huggingface.co/facebook/wav2vec2-base" rel="nofollow">model card</a>.</li> <li>Set a maximum input length so longer inputs are batched without being truncated.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> audio_arrays = [x[<span class="hljs-string">&quot;array&quot;</span>] <span class="hljs-keyword">for</span> x <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;audio&quot;</span>]] <span class="hljs-meta">... </span> inputs = feature_extractor( <span class="hljs-meta">... </span> audio_arrays, sampling_rate=feature_extractor.sampling_rate, max_length=<span class="hljs-number">16000</span>, truncation=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> inputs<!-- HTML_TAG_END --></pre></div> <p>Use 🤗 Datasets <a href="https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.map" rel="nofollow">map</a> function to apply the preprocessing function over the entire dataset. You can speed up the <code>map</code> function by setting <code>batched=True</code> to process multiple elements of the dataset at once. Remove the columns you don’t need, and rename <code>intent_class</code> to <code>label</code> because that is what the model expects:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>encoded_minds = minds.<span class="hljs-built_in">map</span>(preprocess_function, remove_columns=<span class="hljs-string">&quot;audio&quot;</span>, batched=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoded_minds = encoded_minds.rename_column(<span class="hljs-string">&quot;intent_class&quot;</span>, <span class="hljs-string">&quot;label&quot;</span>)<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="train" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#train"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Train </span></h2> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <p>Load Wav2Vec2 with <a href="/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoModelForAudioClassification">AutoModelForAudioClassification</a>. Specify the number of labels, and pass the model the mapping between label number and label class:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForAudioClassification, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>num_labels = <span class="hljs-built_in">len</span>(id2label) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForAudioClassification.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;facebook/wav2vec2-base&quot;</span>, num_labels=num_labels, label2id=label2id, id2label=id2label <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>, take a look at the basic tutorial <a href="../training#finetune-with-trainer">here</a>!</p></div> <p>At this point, only three steps remain:</p> <ol><li>Define your training hyperparameters in <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>.</li> <li>Pass the training arguments to <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> along with the model, datasets, and feature extractor.</li> <li>Call <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.train">train()</a> to fine-tune your model.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> save_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">3e-5</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">5</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=encoded_minds[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=encoded_minds[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> tokenizer=feature_extractor, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()<!-- HTML_TAG_END --></pre></div></div></div> </div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>For a more in-depth example of how to fine-tune a model for audio classification, take a look at the corresponding <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/audio_classification.ipynb" rel="nofollow">PyTorch notebook</a>.</p></div> <script type="module" data-hydrate="17mx7bk"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="17mx7bk"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/tasks/audio_classification.mdx-hf-doc-builder.js") ], params: {} } }); </script>
63
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/tasks/image_classification.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;image-classification&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;load-food101-dataset&quot;,&quot;title&quot;:&quot;Load Food-101 dataset&quot;},{&quot;local&quot;:&quot;preprocess&quot;,&quot;title&quot;:&quot;Preprocess&quot;},{&quot;local&quot;:&quot;train&quot;,&quot;title&quot;:&quot;Train&quot;}],&quot;title&quot;:&quot;Image classification&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/tasks/image_classification.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Youtube-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Markdown-hf-doc-builder.js"> <h1 class="relative group"><a id="image-classification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#image-classification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Image classification </span></h1> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/tjAIM7BOYhw" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Image classification assigns a label or class to an image. Unlike text or audio classification, the inputs are the pixel values that represent an image. There are many uses for image classification, like detecting damage after a disaster, monitoring crop health, or helping screen medical images for signs of disease.</p> <p>This guide will show you how to fine-tune <a href="https://huggingface.co/docs/transformers/v4.16.2/en/model_doc/vit" rel="nofollow">ViT</a> on the <a href="https://huggingface.co/datasets/food101" rel="nofollow">Food-101</a> dataset to classify a food item in an image.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>See the image classification <a href="https://huggingface.co/tasks/audio-classification" rel="nofollow">task page</a> for more information about its associated models, datasets, and metrics.</p></div> <h2 class="relative group"><a id="load-food101-dataset" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#load-food101-dataset"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Load Food-101 dataset </span></h2> <p>Load only the first 5000 images of the Food-101 dataset from the 🤗 Datasets library since it is pretty large:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>food = load_dataset(<span class="hljs-string">&quot;food101&quot;</span>, split=<span class="hljs-string">&quot;train[:5000]&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Split this dataset into a train and test set:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>food = food.train_test_split(test_size=<span class="hljs-number">0.2</span>)<!-- HTML_TAG_END --></pre></div> <p>Then take a look at an example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>food[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;image&#x27;</span>: &lt;PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=512x512 at <span class="hljs-number">0x7F52AFC8AC50</span>&gt;, <span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-number">79</span>}<!-- HTML_TAG_END --></pre></div> <p>The <code>image</code> field contains a PIL image, and each <code>label</code> is an integer that represents a class. Create a dictionary that maps a label name to an integer and vice versa. The mapping will help the model recover the label name from the label number:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>labels = food[<span class="hljs-string">&quot;train&quot;</span>].features[<span class="hljs-string">&quot;label&quot;</span>].names <span class="hljs-meta">&gt;&gt;&gt; </span>label2id, id2label = <span class="hljs-built_in">dict</span>(), <span class="hljs-built_in">dict</span>() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> i, label <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(labels): <span class="hljs-meta">... </span> label2id[label] = <span class="hljs-built_in">str</span>(i) <span class="hljs-meta">... </span> id2label[<span class="hljs-built_in">str</span>(i)] = label<!-- HTML_TAG_END --></pre></div> <p>Now you can convert the label number to a label name for more information:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>id2label[<span class="hljs-built_in">str</span>(<span class="hljs-number">79</span>)] <span class="hljs-string">&#x27;prime_rib&#x27;</span><!-- HTML_TAG_END --></pre></div> <p>Each food class - or label - corresponds to a number; <code>79</code> indicates a prime rib in the example above.</p> <h2 class="relative group"><a id="preprocess" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#preprocess"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Preprocess </span></h2> <p>Load the ViT feature extractor to process the image into a tensor:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoFeatureExtractor <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = AutoFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;google/vit-base-patch16-224-in21k&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Apply several image transformations to the dataset to make the model more robust against overfitting. Here you’ll use torchvision’s <a href="https://pytorch.org/vision/stable/transforms.html" rel="nofollow"><code>transforms</code></a> module. Crop a random part of the image, resize it, and normalize it with the image mean and standard deviation:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> torchvision.transforms <span class="hljs-keyword">import</span> RandomResizedCrop, Compose, Normalize, ToTensor <span class="hljs-meta">&gt;&gt;&gt; </span>normalize = Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std) <span class="hljs-meta">&gt;&gt;&gt; </span>_transforms = Compose([RandomResizedCrop(feature_extractor.size), ToTensor(), normalize])<!-- HTML_TAG_END --></pre></div> <p>Create a preprocessing function that will apply the transforms and return the <code>pixel_values</code> - the inputs to the model - of the image:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">transforms</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> examples[<span class="hljs-string">&quot;pixel_values&quot;</span>] = [_transforms(img.convert(<span class="hljs-string">&quot;RGB&quot;</span>)) <span class="hljs-keyword">for</span> img <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;image&quot;</span>]] <span class="hljs-meta">... </span> <span class="hljs-keyword">del</span> examples[<span class="hljs-string">&quot;image&quot;</span>] <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> examples<!-- HTML_TAG_END --></pre></div> <p>Use 🤗 Dataset’s <a href="https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.with_transform" rel="nofollow">with_transform</a> method to apply the transforms over the entire dataset. The transforms are applied on-the-fly when you load an element of the dataset:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>food = food.with_transform(transforms)<!-- HTML_TAG_END --></pre></div> <p>Use <a href="/docs/transformers/pr_19429/en/main_classes/data_collator#transformers.DefaultDataCollator">DefaultDataCollator</a> to create a batch of examples. Unlike other data collators in 🤗 Transformers, the DefaultDataCollator does not apply additional preprocessing such as padding.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DefaultDataCollator <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DefaultDataCollator()<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="train" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#train"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Train </span></h2> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <p>Load ViT with <a href="/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoModelForImageClassification">AutoModelForImageClassification</a>. Specify the number of labels, and pass the model the mapping between label number and label class:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForImageClassification, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForImageClassification.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;google/vit-base-patch16-224-in21k&quot;</span>, <span class="hljs-meta">... </span> num_labels=<span class="hljs-built_in">len</span>(labels), <span class="hljs-meta">... </span> id2label=id2label, <span class="hljs-meta">... </span> label2id=label2id, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>, take a look at the basic tutorial <a href="../training#finetune-with-trainer">here</a>!</p></div> <p>At this point, only three steps remain:</p> <ol><li>Define your training hyperparameters in <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>. It is important you don’t remove unused columns because this will drop the <code>image</code> column. Without the <code>image</code> column, you can’t create <code>pixel_values</code>. Set <code>remove_unused_columns=False</code> to prevent this behavior!</li> <li>Pass the training arguments to <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> along with the model, datasets, tokenizer, and data collator.</li> <li>Call <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.train">train()</a> to fine-tune your model.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;steps&quot;</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">4</span>, <span class="hljs-meta">... </span> fp16=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> save_steps=<span class="hljs-number">100</span>, <span class="hljs-meta">... </span> eval_steps=<span class="hljs-number">100</span>, <span class="hljs-meta">... </span> logging_steps=<span class="hljs-number">10</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-4</span>, <span class="hljs-meta">... </span> save_total_limit=<span class="hljs-number">2</span>, <span class="hljs-meta">... </span> remove_unused_columns=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span> train_dataset=food[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=food[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> tokenizer=feature_extractor, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()<!-- HTML_TAG_END --></pre></div></div></div> </div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>For a more in-depth example of how to fine-tune a model for image classification, take a look at the corresponding <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb" rel="nofollow">PyTorch notebook</a>.</p></div> <script type="module" data-hydrate="1ibz34p"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="1ibz34p"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/tasks/image_classification.mdx-hf-doc-builder.js") ], params: {} } }); </script>
64
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/tasks/summarization.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;summarization&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;load-billsum-dataset&quot;,&quot;title&quot;:&quot;Load BillSum dataset&quot;},{&quot;local&quot;:&quot;preprocess&quot;,&quot;title&quot;:&quot;Preprocess&quot;},{&quot;local&quot;:&quot;train&quot;,&quot;title&quot;:&quot;Train&quot;}],&quot;title&quot;:&quot;Summarization&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/tasks/summarization.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Youtube-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Markdown-hf-doc-builder.js"> <h1 class="relative group"><a id="summarization" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#summarization"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Summarization </span></h1> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/yHnr5Dk2zCI" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Summarization creates a shorter version of a document or an article that captures all the important information. Along with translation, it is another example of a task that can be formulated as a sequence-to-sequence task. Summarization can be:</p> <ul><li>Extractive: extract the most relevant information from a document.</li> <li>Abstractive: generate new text that captures the most relevant information. </li></ul> <p>This guide will show you how to fine-tune <a href="https://huggingface.co/t5-small" rel="nofollow">T5</a> on the California state bill subset of the <a href="https://huggingface.co/datasets/billsum" rel="nofollow">BillSum</a> dataset for abstractive summarization.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>See the summarization <a href="https://huggingface.co/tasks/summarization" rel="nofollow">task page</a> for more information about its associated models, datasets, and metrics.</p></div> <h2 class="relative group"><a id="load-billsum-dataset" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#load-billsum-dataset"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Load BillSum dataset </span></h2> <p>Load the BillSum dataset from the 🤗 Datasets library:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>billsum = load_dataset(<span class="hljs-string">&quot;billsum&quot;</span>, split=<span class="hljs-string">&quot;ca_test&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Split this dataset into a train and test set:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>billsum = billsum.train_test_split(test_size=<span class="hljs-number">0.2</span>)<!-- HTML_TAG_END --></pre></div> <p>Then take a look at an example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>billsum[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;summary&#x27;</span>: <span class="hljs-string">&#x27;Existing law authorizes state agencies to enter into contracts for the acquisition of goods or services upon approval by the Department of General Services. Existing law sets forth various requirements and prohibitions for those contracts, including, but not limited to, a prohibition on entering into contracts for the acquisition of goods or services of $100,000 or more with a contractor that discriminates between spouses and domestic partners or same-sex and different-sex couples in the provision of benefits. Existing law provides that a contract entered into in violation of those requirements and prohibitions is void and authorizes the state or any person acting on behalf of the state to bring a civil action seeking a determination that a contract is in violation and therefore void. Under existing law, a willful violation of those requirements and prohibitions is a misdemeanor.\nThis bill would also prohibit a state agency from entering into contracts for the acquisition of goods or services of $100,000 or more with a contractor that discriminates between employees on the basis of gender identity in the provision of benefits, as specified. By expanding the scope of a crime, this bill would impose a state-mandated local program.\nThe California Constitution requires the state to reimburse local agencies and school districts for certain costs mandated by the state. Statutory provisions establish procedures for making that reimbursement.\nThis bill would provide that no reimbursement is required by this act for a specified reason.&#x27;</span>, <span class="hljs-string">&#x27;text&#x27;</span>: <span class="hljs-string">&#x27;The people of the State of California do enact as follows:\n\n\nSECTION 1.\nSection 10295.35 is added to the Public Contract Code, to read:\n10295.35.\n(a) (1) Notwithstanding any other law, a state agency shall not enter into any contract for the acquisition of goods or services in the amount of one hundred thousand dollars ($100,000) or more with a contractor that, in the provision of benefits, discriminates between employees on the basis of an employee’s or dependent’s actual or perceived gender identity, including, but not limited to, the employee’s or dependent’s identification as transgender.\n(2) For purposes of this section, “contract” includes contracts with a cumulative amount of one hundred thousand dollars ($100,000) or more per contractor in each fiscal year.\n(3) For purposes of this section, an employee health plan is discriminatory if the plan is not consistent with Section 1365.5 of the Health and Safety Code and Section 10140 of the Insurance Code.\n(4) The requirements of this section shall apply only to those portions of a contractor’s operations that occur under any of the following conditions:\n(A) Within the state.\n(B) On real property outside the state if the property is owned by the state or if the state has a right to occupy the property, and if the contractor’s presence at that location is connected to a contract with the state.\n(C) Elsewhere in the United States where work related to a state contract is being performed.\n(b) Contractors shall treat as confidential, to the maximum extent allowed by law or by the requirement of the contractor’s insurance provider, any request by an employee or applicant for employment benefits or any documentation of eligibility for benefits submitted by an employee or applicant for employment.\n(c) After taking all reasonable measures to find a contractor that complies with this section, as determined by the state agency, the requirements of this section may be waived under any of the following circumstances:\n(1) There is only one prospective contractor willing to enter into a specific contract with the state agency.\n(2) The contract is necessary to respond to an emergency, as determined by the state agency, that endangers the public health, welfare, or safety, or the contract is necessary for the provision of essential services, and no entity that complies with the requirements of this section capable of responding to the emergency is immediately available.\n(3) The requirements of this section violate, or are inconsistent with, the terms or conditions of a grant, subvention, or agreement, if the agency has made a good faith attempt to change the terms or conditions of any grant, subvention, or agreement to authorize application of this section.\n(4) The contractor is providing wholesale or bulk water, power, or natural gas, the conveyance or transmission of the same, or ancillary services, as required for ensuring reliable services in accordance with good utility practice, if the purchase of the same cannot practically be accomplished through the standard competitive bidding procedures and the contractor is not providing direct retail services to end users.\n(d) (1) A contractor shall not be deemed to discriminate in the provision of benefits if the contractor, in providing the benefits, pays the actual costs incurred in obtaining the benefit.\n(2) If a contractor is unable to provide a certain benefit, despite taking reasonable measures to do so, the contractor shall not be deemed to discriminate in the provision of benefits.\n(e) (1) Every contract subject to this chapter shall contain a statement by which the contractor certifies that the contractor is in compliance with this section.\n(2) The department or other contracting agency shall enforce this section pursuant to its existing enforcement powers.\n(3) (A) If a contractor falsely certifies that it is in compliance with this section, the contract with that contractor shall be subject to Article 9 (commencing with Section 10420), unless, within a time period specified by the department or other contracting agency, the contractor provides to the department or agency proof that it has complied, or is in the process of complying, with this section.\n(B) The application of the remedies or penalties contained in Article 9 (commencing with Section 10420) to a contract subject to this chapter shall not preclude the application of any existing remedies otherwise available to the department or other contracting agency under its existing enforcement powers.\n(f) Nothing in this section is intended to regulate the contracting practices of any local jurisdiction.\n(g) This section shall be construed so as not to conflict with applicable federal laws, rules, or regulations. In the event that a court or agency of competent jurisdiction holds that federal law, rule, or regulation invalidates any clause, sentence, paragraph, or section of this code or the application thereof to any person or circumstances, it is the intent of the state that the court or agency sever that clause, sentence, paragraph, or section so that the remainder of this section shall remain in effect.\nSEC. 2.\nSection 10295.35 of the Public Contract Code shall not be construed to create any new enforcement authority or responsibility in the Department of General Services or any other contracting agency.\nSEC. 3.\nNo reimbursement is required by this act pursuant to Section 6 of Article XIII\u2009B of the California Constitution because the only costs that may be incurred by a local agency or school district will be incurred because this act creates a new crime or infraction, eliminates a crime or infraction, or changes the penalty for a crime or infraction, within the meaning of Section 17556 of the Government Code, or changes the definition of a crime within the meaning of Section 6 of Article XIII\u2009B of the California Constitution.&#x27;</span>, <span class="hljs-string">&#x27;title&#x27;</span>: <span class="hljs-string">&#x27;An act to add Section 10295.35 to the Public Contract Code, relating to public contracts.&#x27;</span>}<!-- HTML_TAG_END --></pre></div> <p>The <code>text</code> field is the input and the <code>summary</code> field is the target.</p> <h2 class="relative group"><a id="preprocess" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#preprocess"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Preprocess </span></h2> <p>Load the T5 tokenizer to process <code>text</code> and <code>summary</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>The preprocessing function needs to:</p> <ol><li>Prefix the input with a prompt so T5 knows this is a summarization task. Some models capable of multiple NLP tasks require prompting for specific tasks.</li> <li>Use the keyword <code>text_target</code> argument when tokenizing labels.</li> <li>Truncate sequences to be no longer than the maximum length set by the <code>max_length</code> parameter.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>prefix = <span class="hljs-string">&quot;summarize: &quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> inputs = [prefix + doc <span class="hljs-keyword">for</span> doc <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;text&quot;</span>]] <span class="hljs-meta">... </span> model_inputs = tokenizer(inputs, max_length=<span class="hljs-number">1024</span>, truncation=<span class="hljs-literal">True</span>) <span class="hljs-meta">... </span> labels = tokenizer(text_target=examples[<span class="hljs-string">&quot;summary&quot;</span>], max_length=<span class="hljs-number">128</span>, truncation=<span class="hljs-literal">True</span>) <span class="hljs-meta">... </span> model_inputs[<span class="hljs-string">&quot;labels&quot;</span>] = labels[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> model_inputs<!-- HTML_TAG_END --></pre></div> <p>Use 🤗 Datasets <a href="https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.map" rel="nofollow">map</a> function to apply the preprocessing function over the entire dataset. You can speed up the <code>map</code> function by setting <code>batched=True</code> to process multiple elements of the dataset at once:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_billsum = billsum.<span class="hljs-built_in">map</span>(preprocess_function, batched=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <p>Use <a href="/docs/transformers/pr_19429/en/main_classes/data_collator#transformers.DataCollatorForSeq2Seq">DataCollatorForSeq2Seq</a> to create a batch of examples. It will also <em>dynamically pad</em> your text and labels to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the <code>tokenizer</code> function by setting <code>padding=True</code>, dynamic padding is more efficient.</p> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForSeq2Seq <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model)<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForSeq2Seq <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <h2 class="relative group"><a id="train" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#train"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Train </span></h2> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <p>Load T5 with <a href="/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoModelForSeq2SeqLM">AutoModelForSeq2SeqLM</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSeq2SeqLM, Seq2SeqTrainingArguments, Seq2SeqTrainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>, take a look at the basic tutorial <a href="../training#finetune-with-trainer">here</a>!</p></div> <p>At this point, only three steps remain:</p> <ol><li>Define your training hyperparameters in <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Seq2SeqTrainingArguments">Seq2SeqTrainingArguments</a>.</li> <li>Pass the training arguments to <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Seq2SeqTrainer">Seq2SeqTrainer</a> along with the model, dataset, tokenizer, and data collator.</li> <li>Call <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.train">train()</a> to fine-tune your model.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>training_args = Seq2SeqTrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> per_device_eval_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span> save_total_limit=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">1</span>, <span class="hljs-meta">... </span> fp16=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Seq2SeqTrainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=tokenized_billsum[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=tokenized_billsum[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> tokenizer=tokenizer, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <p>To fine-tune a model in TensorFlow, start by converting your datasets to the <code>tf.data.Dataset</code> format with <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset">prepare_tf_dataset()</a>.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> tokenized_billsum[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_test_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> tokenized_billsum[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with Keras, take a look at the basic tutorial <a href="training#finetune-with-keras">here</a>!</p></div> <p>Set up an optimizer function, learning rate schedule, and some training hyperparameters:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer, AdamWeightDecay <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer = AdamWeightDecay(learning_rate=<span class="hljs-number">2e-5</span>, weight_decay_rate=<span class="hljs-number">0.01</span>)<!-- HTML_TAG_END --></pre></div> <p>Load T5 with <a href="/docs/transformers/pr_19429/en/model_doc/auto#transformers.TFAutoModelForSeq2SeqLM">TFAutoModelForSeq2SeqLM</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForSeq2SeqLM <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Configure the model for training with <a href="https://keras.io/api/models/model_training_apis/#compile-method" rel="nofollow"><code>compile</code></a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)<!-- HTML_TAG_END --></pre></div> <p>Call <a href="https://keras.io/api/models/model_training_apis/#fit-method" rel="nofollow"><code>fit</code></a> to fine-tune the model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=<span class="hljs-number">3</span>)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>For a more in-depth example of how to fine-tune a model for summarization, take a look at the corresponding <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/summarization.ipynb" rel="nofollow">PyTorch notebook</a> or <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/summarization-tf.ipynb" rel="nofollow">TensorFlow notebook</a>.</p></div> <script type="module" data-hydrate="aa7k54"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="aa7k54"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/tasks/summarization.mdx-hf-doc-builder.js") ], params: {} } }); </script>
65
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/tasks/asr.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;automatic-speech-recognition&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;load-minds14-dataset&quot;,&quot;title&quot;:&quot;Load MInDS-14 dataset&quot;},{&quot;local&quot;:&quot;preprocess&quot;,&quot;title&quot;:&quot;Preprocess&quot;},{&quot;local&quot;:&quot;train&quot;,&quot;title&quot;:&quot;Train&quot;}],&quot;title&quot;:&quot;Automatic speech recognition&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/tasks/asr.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Youtube-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Markdown-hf-doc-builder.js"> <h1 class="relative group"><a id="automatic-speech-recognition" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#automatic-speech-recognition"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Automatic speech recognition </span></h1> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/TksaY_FDgnk" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Automatic speech recognition (ASR) converts a speech signal to text. It is an example of a sequence-to-sequence task, going from a sequence of audio inputs to textual outputs. Voice assistants like Siri and Alexa utilize ASR models to assist users.</p> <p>This guide will show you how to fine-tune <a href="https://huggingface.co/facebook/wav2vec2-base" rel="nofollow">Wav2Vec2</a> on the <a href="https://huggingface.co/datasets/PolyAI/minds14" rel="nofollow">MInDS-14</a> dataset to transcribe audio to text.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>See the automatic speech recognition <a href="https://huggingface.co/tasks/automatic-speech-recognition" rel="nofollow">task page</a> for more information about its associated models, datasets, and metrics.</p></div> <h2 class="relative group"><a id="load-minds14-dataset" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#load-minds14-dataset"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Load MInDS-14 dataset </span></h2> <p>Load the <a href="https://huggingface.co/datasets/PolyAI/minds14" rel="nofollow">MInDS-14</a> from the 🤗 Datasets library:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset, Audio <span class="hljs-meta">&gt;&gt;&gt; </span>minds = load_dataset(<span class="hljs-string">&quot;PolyAI/minds14&quot;</span>, name=<span class="hljs-string">&quot;en-US&quot;</span>, split=<span class="hljs-string">&quot;train&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Split this dataset into a train and test set:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>minds = minds.train_test_split(test_size=<span class="hljs-number">0.2</span>)<!-- HTML_TAG_END --></pre></div> <p>Then take a look at the dataset:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>minds DatasetDict({ train: Dataset({ features: [<span class="hljs-string">&#x27;path&#x27;</span>, <span class="hljs-string">&#x27;audio&#x27;</span>, <span class="hljs-string">&#x27;transcription&#x27;</span>, <span class="hljs-string">&#x27;english_transcription&#x27;</span>, <span class="hljs-string">&#x27;intent_class&#x27;</span>, <span class="hljs-string">&#x27;lang_id&#x27;</span>], num_rows: <span class="hljs-number">450</span> }) test: Dataset({ features: [<span class="hljs-string">&#x27;path&#x27;</span>, <span class="hljs-string">&#x27;audio&#x27;</span>, <span class="hljs-string">&#x27;transcription&#x27;</span>, <span class="hljs-string">&#x27;english_transcription&#x27;</span>, <span class="hljs-string">&#x27;intent_class&#x27;</span>, <span class="hljs-string">&#x27;lang_id&#x27;</span>], num_rows: <span class="hljs-number">113</span> }) })<!-- HTML_TAG_END --></pre></div> <p>While the dataset contains a lot of helpful information, like <code>lang_id</code> and <code>intent_class</code>, you will focus on the <code>audio</code> and <code>transcription</code> columns in this guide. Remove the other columns:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>minds = minds.remove_columns([<span class="hljs-string">&quot;english_transcription&quot;</span>, <span class="hljs-string">&quot;intent_class&quot;</span>, <span class="hljs-string">&quot;lang_id&quot;</span>])<!-- HTML_TAG_END --></pre></div> <p>Take a look at the example again:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>minds[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;audio&#x27;</span>: {<span class="hljs-string">&#x27;array&#x27;</span>: array([-<span class="hljs-number">0.00024414</span>, <span class="hljs-number">0.</span> , <span class="hljs-number">0.</span> , ..., <span class="hljs-number">0.00024414</span>, <span class="hljs-number">0.00024414</span>, <span class="hljs-number">0.00024414</span>], dtype=float32), <span class="hljs-string">&#x27;path&#x27;</span>: <span class="hljs-string">&#x27;/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav&#x27;</span>, <span class="hljs-string">&#x27;sampling_rate&#x27;</span>: <span class="hljs-number">8000</span>}, <span class="hljs-string">&#x27;path&#x27;</span>: <span class="hljs-string">&#x27;/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav&#x27;</span>, <span class="hljs-string">&#x27;transcription&#x27;</span>: <span class="hljs-string">&quot;hi I&#x27;m trying to use the banking app on my phone and currently my checking and savings account balance is not refreshing&quot;</span>}<!-- HTML_TAG_END --></pre></div> <p>The <code>audio</code> column contains a 1-dimensional <code>array</code> of the speech signal that must be called to load and resample the audio file.</p> <h2 class="relative group"><a id="preprocess" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#preprocess"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Preprocess </span></h2> <p>Load the Wav2Vec2 processor to process the audio signal and transcribed text:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoProcessor <span class="hljs-meta">&gt;&gt;&gt; </span>processor = AutoProcessor.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-base&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>The <a href="https://huggingface.co/datasets/PolyAI/minds14" rel="nofollow">MInDS-14</a> dataset has a sampling rate of 8000khz. You will need to resample the dataset to use the pretrained Wav2Vec2 model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>minds = minds.cast_column(<span class="hljs-string">&quot;audio&quot;</span>, Audio(sampling_rate=<span class="hljs-number">16_000</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>minds[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;audio&#x27;</span>: {<span class="hljs-string">&#x27;array&#x27;</span>: array([-<span class="hljs-number">2.38064706e-04</span>, -<span class="hljs-number">1.58618059e-04</span>, -<span class="hljs-number">5.43987835e-06</span>, ..., <span class="hljs-number">2.78103951e-04</span>, <span class="hljs-number">2.38446111e-04</span>, <span class="hljs-number">1.18740834e-04</span>], dtype=float32), <span class="hljs-string">&#x27;path&#x27;</span>: <span class="hljs-string">&#x27;/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav&#x27;</span>, <span class="hljs-string">&#x27;sampling_rate&#x27;</span>: <span class="hljs-number">16000</span>}, <span class="hljs-string">&#x27;path&#x27;</span>: <span class="hljs-string">&#x27;/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav&#x27;</span>, <span class="hljs-string">&#x27;transcription&#x27;</span>: <span class="hljs-string">&quot;hi I&#x27;m trying to use the banking app on my phone and currently my checking and savings account balance is not refreshing&quot;</span>}<!-- HTML_TAG_END --></pre></div> <p>The preprocessing function needs to:</p> <ol><li>Call the <code>audio</code> column to load and resample the audio file.</li> <li>Extract the <code>input_values</code> from the audio file.</li> <li>Typically, when you call the processor, you call the feature extractor. Since you also want to tokenize text, instruct the processor to call the tokenizer instead with a context manager.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">prepare_dataset</span>(<span class="hljs-params">batch</span>): <span class="hljs-meta">... </span> audio = batch[<span class="hljs-string">&quot;audio&quot;</span>] <span class="hljs-meta">... </span> batch = processor(audio=audio[<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=audio[<span class="hljs-string">&quot;sampling_rate&quot;</span>]).input_values[<span class="hljs-number">0</span>] <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;input_length&quot;</span>] = <span class="hljs-built_in">len</span>(batch[<span class="hljs-string">&quot;input_values&quot;</span>]) <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;labels&quot;</span>] = processor(text=batch[<span class="hljs-string">&quot;transcription&quot;</span>]).input_ids <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch<!-- HTML_TAG_END --></pre></div> <p>Use 🤗 Datasets <a href="https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.map" rel="nofollow">map</a> function to apply the preprocessing function over the entire dataset. You can speed up the map function by increasing the number of processes with <code>num_proc</code>. Remove the columns you don’t need:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>encoded_minds = minds.<span class="hljs-built_in">map</span>(prepare_dataset, remove_columns=minds.column_names[<span class="hljs-string">&quot;train&quot;</span>], num_proc=<span class="hljs-number">4</span>)<!-- HTML_TAG_END --></pre></div> <p>🤗 Transformers doesn’t have a data collator for automatic speech recognition, so you will need to create one. You can adapt the <a href="/docs/transformers/pr_19429/en/main_classes/data_collator#transformers.DataCollatorWithPadding">DataCollatorWithPadding</a> to create a batch of examples for automatic speech recognition. It will also dynamically pad your text and labels to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the <code>tokenizer</code> function by setting <code>padding=True</code>, dynamic padding is more efficient.</p> <p>Unlike other data collators, this specific data collator needs to apply a different padding method to <code>input_values</code> and <code>labels</code>. You can apply a different padding method with a context manager:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> dataclasses <span class="hljs-keyword">import</span> dataclass, field <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> typing <span class="hljs-keyword">import</span> <span class="hljs-type">Any</span>, <span class="hljs-type">Dict</span>, <span class="hljs-type">List</span>, <span class="hljs-type">Optional</span>, <span class="hljs-type">Union</span> <span class="hljs-meta">&gt;&gt;&gt; </span>@dataclass <span class="hljs-meta">... </span><span class="hljs-keyword">class</span> <span class="hljs-title class_">DataCollatorCTCWithPadding</span>: <span class="hljs-meta">... </span> processor: AutoProcessor <span class="hljs-meta">... </span> padding: <span class="hljs-type">Union</span>[<span class="hljs-built_in">bool</span>, <span class="hljs-built_in">str</span>] = <span class="hljs-literal">True</span> <span class="hljs-meta">... </span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">__call__</span>(<span class="hljs-params">self, features: <span class="hljs-type">List</span>[<span class="hljs-type">Dict</span>[<span class="hljs-built_in">str</span>, <span class="hljs-type">Union</span>[<span class="hljs-type">List</span>[<span class="hljs-built_in">int</span>], torch.Tensor]]]</span>) -&gt; <span class="hljs-type">Dict</span>[<span class="hljs-built_in">str</span>, torch.Tensor]: <span class="hljs-meta">... </span> <span class="hljs-comment"># split inputs and labels since they have to be of different lengths and need</span> <span class="hljs-meta">... </span> <span class="hljs-comment"># different padding methods</span> <span class="hljs-meta">... </span> input_features = [{<span class="hljs-string">&quot;input_values&quot;</span>: feature[<span class="hljs-string">&quot;input_values&quot;</span>]} <span class="hljs-keyword">for</span> feature <span class="hljs-keyword">in</span> features] <span class="hljs-meta">... </span> label_features = [{<span class="hljs-string">&quot;input_ids&quot;</span>: feature[<span class="hljs-string">&quot;labels&quot;</span>]} <span class="hljs-keyword">for</span> feature <span class="hljs-keyword">in</span> features] <span class="hljs-meta">... </span> batch = self.processor.pad(input_features, padding=self.padding, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">... </span> labels_batch = self.processor.pad(labels=label_features, padding=self.padding, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) <span class="hljs-meta">... </span> <span class="hljs-comment"># replace padding with -100 to ignore loss correctly</span> <span class="hljs-meta">... </span> labels = labels_batch[<span class="hljs-string">&quot;input_ids&quot;</span>].masked_fill(labels_batch.attention_mask.ne(<span class="hljs-number">1</span>), -<span class="hljs-number">100</span>) <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;labels&quot;</span>] = labels <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch<!-- HTML_TAG_END --></pre></div> <p>Create a batch of examples and dynamically pad them with <code>DataCollatorForCTCWithPadding</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorCTCWithPadding(processor=processor, padding=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="train" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#train"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Train </span></h2> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <p>Load Wav2Vec2 with <a href="/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoModelForCTC">AutoModelForCTC</a>. For <code>ctc_loss_reduction</code>, it is often better to use the average instead of the default summation:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForCTC, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCTC.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;facebook/wav2vec2-base&quot;</span>, <span class="hljs-meta">... </span> ctc_loss_reduction=<span class="hljs-string">&quot;mean&quot;</span>, <span class="hljs-meta">... </span> pad_token_id=processor.tokenizer.pad_token_id, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>, take a look at the basic tutorial <a href="../training#finetune-with-trainer">here</a>!</p></div> <p>At this point, only three steps remain:</p> <ol><li>Define your training hyperparameters in <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>.</li> <li>Pass the training arguments to <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> along with the model, datasets, tokenizer, and data collator.</li> <li>Call <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.train">train()</a> to fine-tune your model.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> group_by_length=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;steps&quot;</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> fp16=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> gradient_checkpointing=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">1e-4</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.005</span>, <span class="hljs-meta">... </span> save_total_limit=<span class="hljs-number">2</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=encoded_minds[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=encoded_minds[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> tokenizer=processor.feature_extractor, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()<!-- HTML_TAG_END --></pre></div></div></div> </div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>For a more in-depth example of how to fine-tune a model for automatic speech recognition, take a look at this blog <a href="https://huggingface.co/blog/fine-tune-wav2vec2-english" rel="nofollow">post</a> for English ASR and this <a href="https://huggingface.co/blog/fine-tune-xlsr-wav2vec2" rel="nofollow">post</a> for multilingual ASR.</p></div> <script type="module" data-hydrate="165lw1v"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="165lw1v"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/tasks/asr.mdx-hf-doc-builder.js") ], params: {} } }); </script>
66
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/tasks/token_classification.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;token-classification&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;load-wnut-17-dataset&quot;,&quot;title&quot;:&quot;Load WNUT 17 dataset&quot;},{&quot;local&quot;:&quot;preprocess&quot;,&quot;title&quot;:&quot;Preprocess&quot;},{&quot;local&quot;:&quot;train&quot;,&quot;title&quot;:&quot;Train&quot;}],&quot;title&quot;:&quot;Token classification&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/tasks/token_classification.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Youtube-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Markdown-hf-doc-builder.js"> <h1 class="relative group"><a id="token-classification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#token-classification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Token classification </span></h1> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/wVHdVlPScxA" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Token classification assigns a label to individual tokens in a sentence. One of the most common token classification tasks is Named Entity Recognition (NER). NER attempts to find a label for each entity in a sentence, such as a person, location, or organization. </p> <p>This guide will show you how to fine-tune <a href="https://huggingface.co/distilbert-base-uncased" rel="nofollow">DistilBERT</a> on the <a href="https://huggingface.co/datasets/wnut_17" rel="nofollow">WNUT 17</a> dataset to detect new entities.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>See the token classification <a href="https://huggingface.co/tasks/token-classification" rel="nofollow">task page</a> for more information about other forms of token classification and their associated models, datasets, and metrics.</p></div> <h2 class="relative group"><a id="load-wnut-17-dataset" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#load-wnut-17-dataset"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Load WNUT 17 dataset </span></h2> <p>Load the WNUT 17 dataset from the 🤗 Datasets library:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>wnut = load_dataset(<span class="hljs-string">&quot;wnut_17&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Then take a look at an example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>wnut[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;id&#x27;</span>: <span class="hljs-string">&#x27;0&#x27;</span>, <span class="hljs-string">&#x27;ner_tags&#x27;</span>: [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>, <span class="hljs-number">8</span>, <span class="hljs-number">0</span>, <span class="hljs-number">7</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], <span class="hljs-string">&#x27;tokens&#x27;</span>: [<span class="hljs-string">&#x27;@paulwalk&#x27;</span>, <span class="hljs-string">&#x27;It&#x27;</span>, <span class="hljs-string">&quot;&#x27;s&quot;</span>, <span class="hljs-string">&#x27;the&#x27;</span>, <span class="hljs-string">&#x27;view&#x27;</span>, <span class="hljs-string">&#x27;from&#x27;</span>, <span class="hljs-string">&#x27;where&#x27;</span>, <span class="hljs-string">&#x27;I&#x27;</span>, <span class="hljs-string">&quot;&#x27;m&quot;</span>, <span class="hljs-string">&#x27;living&#x27;</span>, <span class="hljs-string">&#x27;for&#x27;</span>, <span class="hljs-string">&#x27;two&#x27;</span>, <span class="hljs-string">&#x27;weeks&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;Empire&#x27;</span>, <span class="hljs-string">&#x27;State&#x27;</span>, <span class="hljs-string">&#x27;Building&#x27;</span>, <span class="hljs-string">&#x27;=&#x27;</span>, <span class="hljs-string">&#x27;ESB&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;Pretty&#x27;</span>, <span class="hljs-string">&#x27;bad&#x27;</span>, <span class="hljs-string">&#x27;storm&#x27;</span>, <span class="hljs-string">&#x27;here&#x27;</span>, <span class="hljs-string">&#x27;last&#x27;</span>, <span class="hljs-string">&#x27;evening&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>] }<!-- HTML_TAG_END --></pre></div> <p>Each number in <code>ner_tags</code> represents an entity. Convert the number to a label name for more information:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>label_list = wnut[<span class="hljs-string">&quot;train&quot;</span>].features[<span class="hljs-string">f&quot;ner_tags&quot;</span>].feature.names <span class="hljs-meta">&gt;&gt;&gt; </span>label_list [ <span class="hljs-string">&quot;O&quot;</span>, <span class="hljs-string">&quot;B-corporation&quot;</span>, <span class="hljs-string">&quot;I-corporation&quot;</span>, <span class="hljs-string">&quot;B-creative-work&quot;</span>, <span class="hljs-string">&quot;I-creative-work&quot;</span>, <span class="hljs-string">&quot;B-group&quot;</span>, <span class="hljs-string">&quot;I-group&quot;</span>, <span class="hljs-string">&quot;B-location&quot;</span>, <span class="hljs-string">&quot;I-location&quot;</span>, <span class="hljs-string">&quot;B-person&quot;</span>, <span class="hljs-string">&quot;I-person&quot;</span>, <span class="hljs-string">&quot;B-product&quot;</span>, <span class="hljs-string">&quot;I-product&quot;</span>, ]<!-- HTML_TAG_END --></pre></div> <p>The <code>ner_tag</code> describes an entity, such as a corporation, location, or person. The letter that prefixes each <code>ner_tag</code> indicates the token position of the entity:</p> <ul><li><code>B-</code> indicates the beginning of an entity.</li> <li><code>I-</code> indicates a token is contained inside the same entity (e.g., the <code>State</code> token is a part of an entity like <code>Empire State Building</code>).</li> <li><code>0</code> indicates the token doesn’t correspond to any entity.</li></ul> <h2 class="relative group"><a id="preprocess" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#preprocess"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Preprocess </span></h2> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/iY2AZYdZAr0" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Load the DistilBERT tokenizer to process the <code>tokens</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Since the input has already been split into words, set <code>is_split_into_words=True</code> to tokenize the words into subwords:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_input = tokenizer(example[<span class="hljs-string">&quot;tokens&quot;</span>], is_split_into_words=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokens = tokenizer.convert_ids_to_tokens(tokenized_input[<span class="hljs-string">&quot;input_ids&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>tokens [<span class="hljs-string">&#x27;[CLS]&#x27;</span>, <span class="hljs-string">&#x27;@&#x27;</span>, <span class="hljs-string">&#x27;paul&#x27;</span>, <span class="hljs-string">&#x27;##walk&#x27;</span>, <span class="hljs-string">&#x27;it&#x27;</span>, <span class="hljs-string">&quot;&#x27;&quot;</span>, <span class="hljs-string">&#x27;s&#x27;</span>, <span class="hljs-string">&#x27;the&#x27;</span>, <span class="hljs-string">&#x27;view&#x27;</span>, <span class="hljs-string">&#x27;from&#x27;</span>, <span class="hljs-string">&#x27;where&#x27;</span>, <span class="hljs-string">&#x27;i&#x27;</span>, <span class="hljs-string">&quot;&#x27;&quot;</span>, <span class="hljs-string">&#x27;m&#x27;</span>, <span class="hljs-string">&#x27;living&#x27;</span>, <span class="hljs-string">&#x27;for&#x27;</span>, <span class="hljs-string">&#x27;two&#x27;</span>, <span class="hljs-string">&#x27;weeks&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;empire&#x27;</span>, <span class="hljs-string">&#x27;state&#x27;</span>, <span class="hljs-string">&#x27;building&#x27;</span>, <span class="hljs-string">&#x27;=&#x27;</span>, <span class="hljs-string">&#x27;es&#x27;</span>, <span class="hljs-string">&#x27;##b&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;pretty&#x27;</span>, <span class="hljs-string">&#x27;bad&#x27;</span>, <span class="hljs-string">&#x27;storm&#x27;</span>, <span class="hljs-string">&#x27;here&#x27;</span>, <span class="hljs-string">&#x27;last&#x27;</span>, <span class="hljs-string">&#x27;evening&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;[SEP]&#x27;</span>]<!-- HTML_TAG_END --></pre></div> <p>Adding the special tokens <code>[CLS]</code> and <code>[SEP]</code> and subword tokenization creates a mismatch between the input and labels. A single word corresponding to a single label may be split into two subwords. You will need to realign the tokens and labels by:</p> <ol><li>Mapping all tokens to their corresponding word with the <a href="https://huggingface.co/docs/tokenizers/python/latest/api/reference.html#tokenizers.Encoding.word_ids" rel="nofollow"><code>word_ids</code></a> method.</li> <li>Assigning the label <code>-100</code> to the special tokens <code>[CLS]</code> and <code>[SEP]</code> so the PyTorch loss function ignores them.</li> <li>Only labeling the first token of a given word. Assign <code>-100</code> to other subtokens from the same word.</li></ol> <p>Here is how you can create a function to realign the tokens and labels, and truncate sequences to be no longer than DistilBERT’s maximum input length::</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">tokenize_and_align_labels</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> tokenized_inputs = tokenizer(examples[<span class="hljs-string">&quot;tokens&quot;</span>], truncation=<span class="hljs-literal">True</span>, is_split_into_words=<span class="hljs-literal">True</span>) <span class="hljs-meta">... </span> labels = [] <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> i, label <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(examples[<span class="hljs-string">f&quot;ner_tags&quot;</span>]): <span class="hljs-meta">... </span> word_ids = tokenized_inputs.word_ids(batch_index=i) <span class="hljs-comment"># Map tokens to their respective word.</span> <span class="hljs-meta">... </span> previous_word_idx = <span class="hljs-literal">None</span> <span class="hljs-meta">... </span> label_ids = [] <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> word_idx <span class="hljs-keyword">in</span> word_ids: <span class="hljs-comment"># Set the special tokens to -100.</span> <span class="hljs-meta">... </span> <span class="hljs-keyword">if</span> word_idx <span class="hljs-keyword">is</span> <span class="hljs-literal">None</span>: <span class="hljs-meta">... </span> label_ids.append(-<span class="hljs-number">100</span>) <span class="hljs-meta">... </span> <span class="hljs-keyword">elif</span> word_idx != previous_word_idx: <span class="hljs-comment"># Only label the first token of a given word.</span> <span class="hljs-meta">... </span> label_ids.append(label[word_idx]) <span class="hljs-meta">... </span> <span class="hljs-keyword">else</span>: <span class="hljs-meta">... </span> label_ids.append(-<span class="hljs-number">100</span>) <span class="hljs-meta">... </span> previous_word_idx = word_idx <span class="hljs-meta">... </span> labels.append(label_ids) <span class="hljs-meta">... </span> tokenized_inputs[<span class="hljs-string">&quot;labels&quot;</span>] = labels <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> tokenized_inputs<!-- HTML_TAG_END --></pre></div> <p>Use 🤗 Datasets <a href="https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.map" rel="nofollow">map</a> function to tokenize and align the labels over the entire dataset. You can speed up the <code>map</code> function by setting <code>batched=True</code> to process multiple elements of the dataset at once:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_wnut = wnut.<span class="hljs-built_in">map</span>(tokenize_and_align_labels, batched=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <p>Use <a href="/docs/transformers/pr_19429/en/main_classes/data_collator#transformers.DataCollatorForTokenClassification">DataCollatorForTokenClassification</a> to create a batch of examples. It will also <em>dynamically pad</em> your text and labels to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the <code>tokenizer</code> function by setting <code>padding=True</code>, dynamic padding is more efficient.</p> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer)<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <h2 class="relative group"><a id="train" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#train"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Train </span></h2> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <p>Load DistilBERT with <a href="/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoModelForTokenClassification">AutoModelForTokenClassification</a> along with the number of expected labels:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForTokenClassification, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, num_labels=<span class="hljs-number">14</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>, take a look at the basic tutorial <a href="../training#finetune-with-trainer">here</a>!</p></div> <p>At this point, only three steps remain:</p> <ol><li>Define your training hyperparameters in <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>.</li> <li>Pass the training arguments to <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> along with the model, dataset, tokenizer, and data collator.</li> <li>Call <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.train">train()</a> to fine-tune your model.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> per_device_eval_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=tokenized_wnut[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=tokenized_wnut[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> tokenizer=tokenizer, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <p>To fine-tune a model in TensorFlow, start by converting your datasets to the <code>tf.data.Dataset</code> format with <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset">prepare_tf_dataset()</a>.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> tokenized_wnut[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_validation_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> tokenized_wnut[<span class="hljs-string">&quot;validation&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with Keras, take a look at the basic tutorial <a href="training#finetune-with-keras">here</a>!</p></div> <p>Set up an optimizer function, learning rate schedule, and some training hyperparameters:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer <span class="hljs-meta">&gt;&gt;&gt; </span>batch_size = <span class="hljs-number">16</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_train_epochs = <span class="hljs-number">3</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_train_steps = (<span class="hljs-built_in">len</span>(tokenized_wnut[<span class="hljs-string">&quot;train&quot;</span>]) // batch_size) * num_train_epochs <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer, lr_schedule = create_optimizer( <span class="hljs-meta">... </span> init_lr=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> num_train_steps=num_train_steps, <span class="hljs-meta">... </span> weight_decay_rate=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span> num_warmup_steps=<span class="hljs-number">0</span>, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Load DistilBERT with <a href="/docs/transformers/pr_19429/en/model_doc/auto#transformers.TFAutoModelForTokenClassification">TFAutoModelForTokenClassification</a> along with the number of expected labels:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, num_labels=<span class="hljs-number">2</span>)<!-- HTML_TAG_END --></pre></div> <p>Configure the model for training with <a href="https://keras.io/api/models/model_training_apis/#compile-method" rel="nofollow"><code>compile</code></a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)<!-- HTML_TAG_END --></pre></div> <p>Call <a href="https://keras.io/api/models/model_training_apis/#fit-method" rel="nofollow"><code>fit</code></a> to fine-tune the model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=<span class="hljs-number">3</span>)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>For a more in-depth example of how to fine-tune a model for token classification, take a look at the corresponding <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb" rel="nofollow">PyTorch notebook</a> or <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb" rel="nofollow">TensorFlow notebook</a>.</p></div> <script type="module" data-hydrate="tcxlb0"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="tcxlb0"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/tasks/token_classification.mdx-hf-doc-builder.js") ], params: {} } }); </script>
67
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/tasks/multiple_choice.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;multiple-choice&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;load-swag-dataset&quot;,&quot;title&quot;:&quot;Load SWAG dataset&quot;},{&quot;local&quot;:&quot;preprocess&quot;,&quot;title&quot;:&quot;Preprocess&quot;},{&quot;local&quot;:&quot;train&quot;,&quot;title&quot;:&quot;Train&quot;}],&quot;title&quot;:&quot;Multiple choice&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/tasks/multiple_choice.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Markdown-hf-doc-builder.js"> <h1 class="relative group"><a id="multiple-choice" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#multiple-choice"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Multiple choice </span></h1> <p>A multiple choice task is similar to question answering, except several candidate answers are provided along with a context. The model is trained to select the correct answer from multiple inputs given a context.</p> <p>This guide will show you how to fine-tune <a href="https://huggingface.co/bert-base-uncased" rel="nofollow">BERT</a> on the <code>regular</code> configuration of the <a href="https://huggingface.co/datasets/swag" rel="nofollow">SWAG</a> dataset to select the best answer given multiple options and some context.</p> <h2 class="relative group"><a id="load-swag-dataset" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#load-swag-dataset"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Load SWAG dataset </span></h2> <p>Load the SWAG dataset from the 🤗 Datasets library:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>swag = load_dataset(<span class="hljs-string">&quot;swag&quot;</span>, <span class="hljs-string">&quot;regular&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Then take a look at an example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>swag[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;ending0&#x27;</span>: <span class="hljs-string">&#x27;passes by walking down the street playing their instruments.&#x27;</span>, <span class="hljs-string">&#x27;ending1&#x27;</span>: <span class="hljs-string">&#x27;has heard approaching them.&#x27;</span>, <span class="hljs-string">&#x27;ending2&#x27;</span>: <span class="hljs-string">&quot;arrives and they&#x27;re outside dancing and asleep.&quot;</span>, <span class="hljs-string">&#x27;ending3&#x27;</span>: <span class="hljs-string">&#x27;turns the lead singer watches the performance.&#x27;</span>, <span class="hljs-string">&#x27;fold-ind&#x27;</span>: <span class="hljs-string">&#x27;3416&#x27;</span>, <span class="hljs-string">&#x27;gold-source&#x27;</span>: <span class="hljs-string">&#x27;gold&#x27;</span>, <span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&#x27;sent1&#x27;</span>: <span class="hljs-string">&#x27;Members of the procession walk down the street holding small horn brass instruments.&#x27;</span>, <span class="hljs-string">&#x27;sent2&#x27;</span>: <span class="hljs-string">&#x27;A drum line&#x27;</span>, <span class="hljs-string">&#x27;startphrase&#x27;</span>: <span class="hljs-string">&#x27;Members of the procession walk down the street holding small horn brass instruments. A drum line&#x27;</span>, <span class="hljs-string">&#x27;video-id&#x27;</span>: <span class="hljs-string">&#x27;anetv_jkn6uvmqwh4&#x27;</span>}<!-- HTML_TAG_END --></pre></div> <p>The <code>sent1</code> and <code>sent2</code> fields show how a sentence begins, and each <code>ending</code> field shows how a sentence could end. Given the sentence beginning, the model must pick the correct sentence ending as indicated by the <code>label</code> field.</p> <h2 class="relative group"><a id="preprocess" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#preprocess"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Preprocess </span></h2> <p>Load the BERT tokenizer to process the start of each sentence and the four possible endings:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>The preprocessing function needs to do:</p> <ol><li>Make four copies of the <code>sent1</code> field so you can combine each of them with <code>sent2</code> to recreate how a sentence starts.</li> <li>Combine <code>sent2</code> with each of the four possible sentence endings.</li> <li>Flatten these two lists so you can tokenize them, and then unflatten them afterward so each example has a corresponding <code>input_ids</code>, <code>attention_mask</code>, and <code>labels</code> field.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>ending_names = [<span class="hljs-string">&quot;ending0&quot;</span>, <span class="hljs-string">&quot;ending1&quot;</span>, <span class="hljs-string">&quot;ending2&quot;</span>, <span class="hljs-string">&quot;ending3&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> first_sentences = [[context] * <span class="hljs-number">4</span> <span class="hljs-keyword">for</span> context <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;sent1&quot;</span>]] <span class="hljs-meta">... </span> question_headers = examples[<span class="hljs-string">&quot;sent2&quot;</span>] <span class="hljs-meta">... </span> second_sentences = [ <span class="hljs-meta">... </span> [<span class="hljs-string">f&quot;<span class="hljs-subst">{header}</span> <span class="hljs-subst">{examples[end][i]}</span>&quot;</span> <span class="hljs-keyword">for</span> end <span class="hljs-keyword">in</span> ending_names] <span class="hljs-keyword">for</span> i, header <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(question_headers) <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span> first_sentences = <span class="hljs-built_in">sum</span>(first_sentences, []) <span class="hljs-meta">... </span> second_sentences = <span class="hljs-built_in">sum</span>(second_sentences, []) <span class="hljs-meta">... </span> tokenized_examples = tokenizer(first_sentences, second_sentences, truncation=<span class="hljs-literal">True</span>) <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> {k: [v[i : i + <span class="hljs-number">4</span>] <span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(<span class="hljs-number">0</span>, <span class="hljs-built_in">len</span>(v), <span class="hljs-number">4</span>)] <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> tokenized_examples.items()}<!-- HTML_TAG_END --></pre></div> <p>Use 🤗 Datasets <a href="https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.map" rel="nofollow">map</a> function to apply the preprocessing function over the entire dataset. You can speed up the <code>map</code> function by setting <code>batched=True</code> to process multiple elements of the dataset at once:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->tokenized_swag = swag.<span class="hljs-built_in">map</span>(preprocess_function, batched=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <p>🤗 Transformers doesn’t have a data collator for multiple choice, so you will need to create one. You can adapt the <a href="/docs/transformers/pr_19429/en/main_classes/data_collator#transformers.DataCollatorWithPadding">DataCollatorWithPadding</a> to create a batch of examples for multiple choice. It will also <em>dynamically pad</em> your text and labels to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the <code>tokenizer</code> function by setting <code>padding=True</code>, dynamic padding is more efficient.</p> <p><code>DataCollatorForMultipleChoice</code> will flatten all the model inputs, apply padding, and then unflatten the results:</p> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> dataclasses <span class="hljs-keyword">import</span> dataclass <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers.tokenization_utils_base <span class="hljs-keyword">import</span> PreTrainedTokenizerBase, PaddingStrategy <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> typing <span class="hljs-keyword">import</span> <span class="hljs-type">Optional</span>, <span class="hljs-type">Union</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>@dataclass <span class="hljs-meta">... </span><span class="hljs-keyword">class</span> <span class="hljs-title class_">DataCollatorForMultipleChoice</span>: <span class="hljs-meta">... </span> <span class="hljs-string">&quot;&quot;&quot; <span class="hljs-meta">... </span> Data collator that will dynamically pad the inputs for multiple choice received. <span class="hljs-meta">... </span> &quot;&quot;&quot;</span> <span class="hljs-meta">... </span> tokenizer: PreTrainedTokenizerBase <span class="hljs-meta">... </span> padding: <span class="hljs-type">Union</span>[<span class="hljs-built_in">bool</span>, <span class="hljs-built_in">str</span>, PaddingStrategy] = <span class="hljs-literal">True</span> <span class="hljs-meta">... </span> max_length: <span class="hljs-type">Optional</span>[<span class="hljs-built_in">int</span>] = <span class="hljs-literal">None</span> <span class="hljs-meta">... </span> pad_to_multiple_of: <span class="hljs-type">Optional</span>[<span class="hljs-built_in">int</span>] = <span class="hljs-literal">None</span> <span class="hljs-meta">... </span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">__call__</span>(<span class="hljs-params">self, features</span>): <span class="hljs-meta">... </span> label_name = <span class="hljs-string">&quot;label&quot;</span> <span class="hljs-keyword">if</span> <span class="hljs-string">&quot;label&quot;</span> <span class="hljs-keyword">in</span> features[<span class="hljs-number">0</span>].keys() <span class="hljs-keyword">else</span> <span class="hljs-string">&quot;labels&quot;</span> <span class="hljs-meta">... </span> labels = [feature.pop(label_name) <span class="hljs-keyword">for</span> feature <span class="hljs-keyword">in</span> features] <span class="hljs-meta">... </span> batch_size = <span class="hljs-built_in">len</span>(features) <span class="hljs-meta">... </span> num_choices = <span class="hljs-built_in">len</span>(features[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;input_ids&quot;</span>]) <span class="hljs-meta">... </span> flattened_features = [ <span class="hljs-meta">... </span> [{k: v[i] <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> feature.items()} <span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(num_choices)] <span class="hljs-keyword">for</span> feature <span class="hljs-keyword">in</span> features <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span> flattened_features = <span class="hljs-built_in">sum</span>(flattened_features, []) <span class="hljs-meta">... </span> batch = self.tokenizer.pad( <span class="hljs-meta">... </span> flattened_features, <span class="hljs-meta">... </span> padding=self.padding, <span class="hljs-meta">... </span> max_length=self.max_length, <span class="hljs-meta">... </span> pad_to_multiple_of=self.pad_to_multiple_of, <span class="hljs-meta">... </span> return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span> batch = {k: v.view(batch_size, num_choices, -<span class="hljs-number">1</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> batch.items()} <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;labels&quot;</span>] = torch.tensor(labels, dtype=torch.int64) <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> dataclasses <span class="hljs-keyword">import</span> dataclass <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers.tokenization_utils_base <span class="hljs-keyword">import</span> PreTrainedTokenizerBase, PaddingStrategy <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> typing <span class="hljs-keyword">import</span> <span class="hljs-type">Optional</span>, <span class="hljs-type">Union</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>@dataclass <span class="hljs-meta">... </span><span class="hljs-keyword">class</span> <span class="hljs-title class_">DataCollatorForMultipleChoice</span>: <span class="hljs-meta">... </span> <span class="hljs-string">&quot;&quot;&quot; <span class="hljs-meta">... </span> Data collator that will dynamically pad the inputs for multiple choice received. <span class="hljs-meta">... </span> &quot;&quot;&quot;</span> <span class="hljs-meta">... </span> tokenizer: PreTrainedTokenizerBase <span class="hljs-meta">... </span> padding: <span class="hljs-type">Union</span>[<span class="hljs-built_in">bool</span>, <span class="hljs-built_in">str</span>, PaddingStrategy] = <span class="hljs-literal">True</span> <span class="hljs-meta">... </span> max_length: <span class="hljs-type">Optional</span>[<span class="hljs-built_in">int</span>] = <span class="hljs-literal">None</span> <span class="hljs-meta">... </span> pad_to_multiple_of: <span class="hljs-type">Optional</span>[<span class="hljs-built_in">int</span>] = <span class="hljs-literal">None</span> <span class="hljs-meta">... </span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">__call__</span>(<span class="hljs-params">self, features</span>): <span class="hljs-meta">... </span> label_name = <span class="hljs-string">&quot;label&quot;</span> <span class="hljs-keyword">if</span> <span class="hljs-string">&quot;label&quot;</span> <span class="hljs-keyword">in</span> features[<span class="hljs-number">0</span>].keys() <span class="hljs-keyword">else</span> <span class="hljs-string">&quot;labels&quot;</span> <span class="hljs-meta">... </span> labels = [feature.pop(label_name) <span class="hljs-keyword">for</span> feature <span class="hljs-keyword">in</span> features] <span class="hljs-meta">... </span> batch_size = <span class="hljs-built_in">len</span>(features) <span class="hljs-meta">... </span> num_choices = <span class="hljs-built_in">len</span>(features[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;input_ids&quot;</span>]) <span class="hljs-meta">... </span> flattened_features = [ <span class="hljs-meta">... </span> [{k: v[i] <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> feature.items()} <span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(num_choices)] <span class="hljs-keyword">for</span> feature <span class="hljs-keyword">in</span> features <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span> flattened_features = <span class="hljs-built_in">sum</span>(flattened_features, []) <span class="hljs-meta">... </span> batch = self.tokenizer.pad( <span class="hljs-meta">... </span> flattened_features, <span class="hljs-meta">... </span> padding=self.padding, <span class="hljs-meta">... </span> max_length=self.max_length, <span class="hljs-meta">... </span> pad_to_multiple_of=self.pad_to_multiple_of, <span class="hljs-meta">... </span> return_tensors=<span class="hljs-string">&quot;tf&quot;</span>, <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span> batch = {k: tf.reshape(v, (batch_size, num_choices, -<span class="hljs-number">1</span>)) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> batch.items()} <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;labels&quot;</span>] = tf.convert_to_tensor(labels, dtype=tf.int64) <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch<!-- HTML_TAG_END --></pre></div> </div></div> </div> <h2 class="relative group"><a id="train" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#train"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Train </span></h2> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <p>Load BERT with <a href="/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoModelForMultipleChoice">AutoModelForMultipleChoice</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForMultipleChoice, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForMultipleChoice.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with Trainer, take a look at the basic tutorial <a href="../training#finetune-with-trainer">here</a>!</p></div> <p>At this point, only three steps remain:</p> <ol><li>Define your training hyperparameters in <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>.</li> <li>Pass the training arguments to <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> along with the model, dataset, tokenizer, and data collator.</li> <li>Call <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.train">train()</a> to fine-tune your model.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">5e-5</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> per_device_eval_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=tokenized_swag[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=tokenized_swag[<span class="hljs-string">&quot;validation&quot;</span>], <span class="hljs-meta">... </span> tokenizer=tokenizer, <span class="hljs-meta">... </span> data_collator=DataCollatorForMultipleChoice(tokenizer=tokenizer), <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <p>To fine-tune a model in TensorFlow, start by converting your datasets to the <code>tf.data.Dataset</code> format with <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset">prepare_tf_dataset()</a>.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForMultipleChoice(tokenizer=tokenizer) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> tokenized_swag[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=batch_size, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_validation_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> tokenized_swag[<span class="hljs-string">&quot;validation&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=batch_size, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with Keras, take a look at the basic tutorial <a href="training#finetune-with-keras">here</a>!</p></div> <p>Set up an optimizer function, learning rate schedule, and some training hyperparameters:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer <span class="hljs-meta">&gt;&gt;&gt; </span>batch_size = <span class="hljs-number">16</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_train_epochs = <span class="hljs-number">2</span> <span class="hljs-meta">&gt;&gt;&gt; </span>total_train_steps = (<span class="hljs-built_in">len</span>(tokenized_swag[<span class="hljs-string">&quot;train&quot;</span>]) // batch_size) * num_train_epochs <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer, schedule = create_optimizer(init_lr=<span class="hljs-number">5e-5</span>, num_warmup_steps=<span class="hljs-number">0</span>, num_train_steps=total_train_steps)<!-- HTML_TAG_END --></pre></div> <p>Load BERT with <a href="/docs/transformers/pr_19429/en/model_doc/auto#transformers.TFAutoModelForMultipleChoice">TFAutoModelForMultipleChoice</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForMultipleChoice.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Configure the model for training with <a href="https://keras.io/api/models/model_training_apis/#compile-method" rel="nofollow"><code>compile</code></a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)<!-- HTML_TAG_END --></pre></div> <p>Call <a href="https://keras.io/api/models/model_training_apis/#fit-method" rel="nofollow"><code>fit</code></a> to fine-tune the model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=<span class="hljs-number">2</span>)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <script type="module" data-hydrate="sivwif"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="sivwif"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/tasks/multiple_choice.mdx-hf-doc-builder.js") ], params: {} } }); </script>
68
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/tasks/question_answering.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;question-answering&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;load-squad-dataset&quot;,&quot;title&quot;:&quot;Load SQuAD dataset&quot;},{&quot;local&quot;:&quot;preprocess&quot;,&quot;title&quot;:&quot;Preprocess&quot;},{&quot;local&quot;:&quot;train&quot;,&quot;title&quot;:&quot;Train&quot;}],&quot;title&quot;:&quot;Question answering&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/tasks/question_answering.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Youtube-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Markdown-hf-doc-builder.js"> <h1 class="relative group"><a id="question-answering" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#question-answering"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Question answering </span></h1> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/ajPx5LwJD-I" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Question answering tasks return an answer given a question. There are two common forms of question answering:</p> <ul><li>Extractive: extract the answer from the given context.</li> <li>Abstractive: generate an answer from the context that correctly answers the question.</li></ul> <p>This guide will show you how to fine-tune <a href="https://huggingface.co/distilbert-base-uncased" rel="nofollow">DistilBERT</a> on the <a href="https://huggingface.co/datasets/squad" rel="nofollow">SQuAD</a> dataset for extractive question answering.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>See the question answering <a href="https://huggingface.co/tasks/question-answering" rel="nofollow">task page</a> for more information about other forms of question answering and their associated models, datasets, and metrics.</p></div> <h2 class="relative group"><a id="load-squad-dataset" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#load-squad-dataset"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Load SQuAD dataset </span></h2> <p>Load the SQuAD dataset from the 🤗 Datasets library:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>squad = load_dataset(<span class="hljs-string">&quot;squad&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Then take a look at an example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>squad[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;answers&#x27;</span>: {<span class="hljs-string">&#x27;answer_start&#x27;</span>: [<span class="hljs-number">515</span>], <span class="hljs-string">&#x27;text&#x27;</span>: [<span class="hljs-string">&#x27;Saint Bernadette Soubirous&#x27;</span>]}, <span class="hljs-string">&#x27;context&#x27;</span>: <span class="hljs-string">&#x27;Architecturally, the school has a Catholic character. Atop the Main Building\&#x27;s gold dome is a golden statue of the Virgin Mary. Immediately in front of the Main Building and facing it, is a copper statue of Christ with arms upraised with the legend &quot;Venite Ad Me Omnes&quot;. Next to the Main Building is the Basilica of the Sacred Heart. Immediately behind the basilica is the Grotto, a Marian place of prayer and reflection. It is a replica of the grotto at Lourdes, France where the Virgin Mary reputedly appeared to Saint Bernadette Soubirous in 1858. At the end of the main drive (and in a direct line that connects through 3 statues and the Gold Dome), is a simple, modern stone statue of Mary.&#x27;</span>, <span class="hljs-string">&#x27;id&#x27;</span>: <span class="hljs-string">&#x27;5733be284776f41900661182&#x27;</span>, <span class="hljs-string">&#x27;question&#x27;</span>: <span class="hljs-string">&#x27;To whom did the Virgin Mary allegedly appear in 1858 in Lourdes France?&#x27;</span>, <span class="hljs-string">&#x27;title&#x27;</span>: <span class="hljs-string">&#x27;University_of_Notre_Dame&#x27;</span> }<!-- HTML_TAG_END --></pre></div> <p>The <code>answers</code> field is a dictionary containing the starting position of the answer and the <code>text</code> of the answer.</p> <h2 class="relative group"><a id="preprocess" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#preprocess"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Preprocess </span></h2> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/qgaM0weJHpA" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Load the DistilBERT tokenizer to process the <code>question</code> and <code>context</code> fields:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>There are a few preprocessing steps particular to question answering that you should be aware of:</p> <ol><li>Some examples in a dataset may have a very long <code>context</code> that exceeds the maximum input length of the model. Truncate only the <code>context</code> by setting <code>truncation=&quot;only_second&quot;</code>.</li> <li>Next, map the start and end positions of the answer to the original <code>context</code> by setting <code>return_offset_mapping=True</code>.</li> <li>With the mapping in hand, you can find the start and end tokens of the answer. Use the <a href="https://huggingface.co/docs/tokenizers/python/latest/api/reference.html#tokenizers.Encoding.sequence_ids" rel="nofollow"><code>sequence_ids</code></a> method to find which part of the offset corresponds to the <code>question</code> and which corresponds to the <code>context</code>.</li></ol> <p>Here is how you can create a function to truncate and map the start and end tokens of the answer to the <code>context</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> questions = [q.strip() <span class="hljs-keyword">for</span> q <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;question&quot;</span>]] <span class="hljs-meta">... </span> inputs = tokenizer( <span class="hljs-meta">... </span> questions, <span class="hljs-meta">... </span> examples[<span class="hljs-string">&quot;context&quot;</span>], <span class="hljs-meta">... </span> max_length=<span class="hljs-number">384</span>, <span class="hljs-meta">... </span> truncation=<span class="hljs-string">&quot;only_second&quot;</span>, <span class="hljs-meta">... </span> return_offsets_mapping=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> padding=<span class="hljs-string">&quot;max_length&quot;</span>, <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span> offset_mapping = inputs.pop(<span class="hljs-string">&quot;offset_mapping&quot;</span>) <span class="hljs-meta">... </span> answers = examples[<span class="hljs-string">&quot;answers&quot;</span>] <span class="hljs-meta">... </span> start_positions = [] <span class="hljs-meta">... </span> end_positions = [] <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> i, offset <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(offset_mapping): <span class="hljs-meta">... </span> answer = answers[i] <span class="hljs-meta">... </span> start_char = answer[<span class="hljs-string">&quot;answer_start&quot;</span>][<span class="hljs-number">0</span>] <span class="hljs-meta">... </span> end_char = answer[<span class="hljs-string">&quot;answer_start&quot;</span>][<span class="hljs-number">0</span>] + <span class="hljs-built_in">len</span>(answer[<span class="hljs-string">&quot;text&quot;</span>][<span class="hljs-number">0</span>]) <span class="hljs-meta">... </span> sequence_ids = inputs.sequence_ids(i) <span class="hljs-meta">... </span> <span class="hljs-comment"># Find the start and end of the context</span> <span class="hljs-meta">... </span> idx = <span class="hljs-number">0</span> <span class="hljs-meta">... </span> <span class="hljs-keyword">while</span> sequence_ids[idx] != <span class="hljs-number">1</span>: <span class="hljs-meta">... </span> idx += <span class="hljs-number">1</span> <span class="hljs-meta">... </span> context_start = idx <span class="hljs-meta">... </span> <span class="hljs-keyword">while</span> sequence_ids[idx] == <span class="hljs-number">1</span>: <span class="hljs-meta">... </span> idx += <span class="hljs-number">1</span> <span class="hljs-meta">... </span> context_end = idx - <span class="hljs-number">1</span> <span class="hljs-meta">... </span> <span class="hljs-comment"># If the answer is not fully inside the context, label it (0, 0)</span> <span class="hljs-meta">... </span> <span class="hljs-keyword">if</span> offset[context_start][<span class="hljs-number">0</span>] &gt; end_char <span class="hljs-keyword">or</span> offset[context_end][<span class="hljs-number">1</span>] &lt; start_char: <span class="hljs-meta">... </span> start_positions.append(<span class="hljs-number">0</span>) <span class="hljs-meta">... </span> end_positions.append(<span class="hljs-number">0</span>) <span class="hljs-meta">... </span> <span class="hljs-keyword">else</span>: <span class="hljs-meta">... </span> <span class="hljs-comment"># Otherwise it&#x27;s the start and end token positions</span> <span class="hljs-meta">... </span> idx = context_start <span class="hljs-meta">... </span> <span class="hljs-keyword">while</span> idx &lt;= context_end <span class="hljs-keyword">and</span> offset[idx][<span class="hljs-number">0</span>] &lt;= start_char: <span class="hljs-meta">... </span> idx += <span class="hljs-number">1</span> <span class="hljs-meta">... </span> start_positions.append(idx - <span class="hljs-number">1</span>) <span class="hljs-meta">... </span> idx = context_end <span class="hljs-meta">... </span> <span class="hljs-keyword">while</span> idx &gt;= context_start <span class="hljs-keyword">and</span> offset[idx][<span class="hljs-number">1</span>] &gt;= end_char: <span class="hljs-meta">... </span> idx -= <span class="hljs-number">1</span> <span class="hljs-meta">... </span> end_positions.append(idx + <span class="hljs-number">1</span>) <span class="hljs-meta">... </span> inputs[<span class="hljs-string">&quot;start_positions&quot;</span>] = start_positions <span class="hljs-meta">... </span> inputs[<span class="hljs-string">&quot;end_positions&quot;</span>] = end_positions <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> inputs<!-- HTML_TAG_END --></pre></div> <p>Use 🤗 Datasets <a href="https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.map" rel="nofollow">map</a> function to apply the preprocessing function over the entire dataset. You can speed up the <code>map</code> function by setting <code>batched=True</code> to process multiple elements of the dataset at once. Remove the columns you don’t need:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_squad = squad.<span class="hljs-built_in">map</span>(preprocess_function, batched=<span class="hljs-literal">True</span>, remove_columns=squad[<span class="hljs-string">&quot;train&quot;</span>].column_names)<!-- HTML_TAG_END --></pre></div> <p>Use <a href="/docs/transformers/pr_19429/en/main_classes/data_collator#transformers.DefaultDataCollator">DefaultDataCollator</a> to create a batch of examples. Unlike other data collators in 🤗 Transformers, the <code>DefaultDataCollator</code> does not apply additional preprocessing such as padding.</p> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DefaultDataCollator <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DefaultDataCollator()<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DefaultDataCollator <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DefaultDataCollator(return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <h2 class="relative group"><a id="train" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#train"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Train </span></h2> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <p>Load DistilBERT with <a href="/docs/transformers/pr_19429/en/model_doc/auto#transformers.AutoModelForQuestionAnswering">AutoModelForQuestionAnswering</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForQuestionAnswering, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with the <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>, take a look at the basic tutorial <a href="../training#finetune-with-trainer">here</a>!</p></div> <p>At this point, only three steps remain:</p> <ol><li>Define your training hyperparameters in <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>.</li> <li>Pass the training arguments to <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a> along with the model, dataset, tokenizer, and data collator.</li> <li>Call <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer.train">train()</a> to fine-tune your model.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> per_device_eval_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=tokenized_squad[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=tokenized_squad[<span class="hljs-string">&quot;validation&quot;</span>], <span class="hljs-meta">... </span> tokenizer=tokenizer, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <p>To fine-tune a model in TensorFlow, start by converting your datasets to the <code>tf.data.Dataset</code> format with <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset">prepare_tf_dataset()</a>.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> tokenized_squad[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_validation_set = model.prepare_tf_dataset( <span class="hljs-meta">... </span> tokenized_squad[<span class="hljs-string">&quot;validation&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with Keras, take a look at the basic tutorial <a href="training#finetune-with-keras">here</a>!</p></div> <p>Set up an optimizer function, learning rate schedule, and some training hyperparameters:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer <span class="hljs-meta">&gt;&gt;&gt; </span>batch_size = <span class="hljs-number">16</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_epochs = <span class="hljs-number">2</span> <span class="hljs-meta">&gt;&gt;&gt; </span>total_train_steps = (<span class="hljs-built_in">len</span>(tokenized_squad[<span class="hljs-string">&quot;train&quot;</span>]) // batch_size) * num_epochs <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer, schedule = create_optimizer( <span class="hljs-meta">... </span> init_lr=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> num_warmup_steps=<span class="hljs-number">0</span>, <span class="hljs-meta">... </span> num_train_steps=total_train_steps, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Load DistilBERT with <a href="/docs/transformers/pr_19429/en/model_doc/auto#transformers.TFAutoModelForQuestionAnswering">TFAutoModelForQuestionAnswering</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForQuestionAnswering(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Configure the model for training with <a href="https://keras.io/api/models/model_training_apis/#compile-method" rel="nofollow"><code>compile</code></a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)<!-- HTML_TAG_END --></pre></div> <p>Call <a href="https://keras.io/api/models/model_training_apis/#fit-method" rel="nofollow"><code>fit</code></a> to fine-tune the model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=<span class="hljs-number">3</span>)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>For a more in-depth example of how to fine-tune a model for question answering, take a look at the corresponding <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb" rel="nofollow">PyTorch notebook</a> or <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb" rel="nofollow">TensorFlow notebook</a>.</p></div> <script type="module" data-hydrate="hs7szg"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="hs7szg"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/tasks/question_answering.mdx-hf-doc-builder.js") ], params: {} } }); </script>
69
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/internal/trainer_utils.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;utilities-for-trainer&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.EvalPrediction&quot;,&quot;title&quot;:&quot;Utilities&quot;},{&quot;local&quot;:&quot;transformers.trainer_callback.CallbackHandler&quot;,&quot;title&quot;:&quot;Callbacks internals&quot;},{&quot;local&quot;:&quot;transformers.trainer_pt_utils.DistributedTensorGatherer&quot;,&quot;title&quot;:&quot;Distributed Evaluation&quot;},{&quot;local&quot;:&quot;transformers.HfArgumentParser&quot;,&quot;title&quot;:&quot;Distributed Evaluation&quot;},{&quot;local&quot;:&quot;transformers.debug_utils.DebugUnderflowOverflow&quot;,&quot;title&quot;:&quot;Debug Utilities&quot;}],&quot;title&quot;:&quot;Utilities for Trainer&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/internal/trainer_utils.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Docstring-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/ExampleCodeBlock-hf-doc-builder.js"> <h1 class="relative group"><a id="utilities-for-trainer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#utilities-for-trainer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Utilities for Trainer </span></h1> <p>This page lists all the utility functions used by <a href="/docs/transformers/pr_19429/en/main_classes/trainer#transformers.Trainer">Trainer</a>.</p> <p>Most of those are only useful if you are studying the code of the Trainer in the library.</p> <h2 class="relative group"><a id="transformers.EvalPrediction" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.EvalPrediction"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Utilities </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.EvalPrediction"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">EvalPrediction</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.EvalPrediction" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.EvalPrediction"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_utils.py#L100" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">predictions<span class="opacity-60">: typing.Union[numpy.ndarray, typing.Tuple[numpy.ndarray]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">label_ids<span class="opacity-60">: typing.Union[numpy.ndarray, typing.Tuple[numpy.ndarray]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Union[numpy.ndarray, typing.Tuple[numpy.ndarray], NoneType] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.EvalPrediction.predictions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.EvalPrediction.predictions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>predictions</strong> (<code>np.ndarray</code>) &#x2014; Predictions of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.EvalPrediction.label_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.EvalPrediction.label_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>label_ids</strong> (<code>np.ndarray</code>) &#x2014; Targets to be matched.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.EvalPrediction.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.EvalPrediction.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>np.ndarray</code>, <em>optional</em>) &#x2014;<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Evaluation output (always contains labels), to be used to compute metrics.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.IntervalStrategy"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">IntervalStrategy</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.IntervalStrategy" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.IntervalStrategy"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_utils.py#L174" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">value<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">names<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">module<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">qualname<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">type<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start<span class="opacity-60"> = 1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>An enumeration.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.enable_full_determinism"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.enable_full_determinism</span></h4><!-- HTML_TAG_END --> <a id="transformers.enable_full_determinism" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.enable_full_determinism"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_utils.py#L58" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Helper function for reproducible behavior during distributed training. See</p> <ul><li><a href="https://pytorch.org/docs/stable/notes/randomness.html" rel="nofollow">https://pytorch.org/docs/stable/notes/randomness.html</a> for pytorch</li> <li><a href="https://www.tensorflow.org/api_docs/python/tf/config/experimental/enable_op_determinism" rel="nofollow">https://www.tensorflow.org/api_docs/python/tf/config/experimental/enable_op_determinism</a> for tensorflow</li></ul></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.set_seed"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.set_seed</span></h4><!-- HTML_TAG_END --> <a id="transformers.set_seed" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.set_seed"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_utils.py#L83" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.set_seed.seed" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.set_seed.seed"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>seed</strong> (<code>int</code>) &#x2014; The seed to set.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Helper function for reproducible behavior to set the seed in <code>random</code>, <code>numpy</code>, <code>torch</code> and/or <code>tf</code> (if installed).</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.torch_distributed_zero_first"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.torch_distributed_zero_first</span></h4><!-- HTML_TAG_END --> <a id="transformers.torch_distributed_zero_first" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.torch_distributed_zero_first"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_pt_utils.py#L218" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">local_rank<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.torch_distributed_zero_first.local_rank" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.torch_distributed_zero_first.local_rank"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>local_rank</strong> (<code>int</code>) &#x2014; The rank of the local process.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Decorator to make all processes in distributed training wait for each local_master to do something.</p></div> <h2 class="relative group"><a id="transformers.trainer_callback.CallbackHandler" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.trainer_callback.CallbackHandler"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Callbacks internals </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.trainer_callback.CallbackHandler"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.trainer_callback.</span><span class="font-semibold">CallbackHandler</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.trainer_callback.CallbackHandler" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.trainer_callback.CallbackHandler"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_callback.py#L290" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">callbacks<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">optimizer<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">lr_scheduler<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Internal class that just calls the list of callbacks in order.</p></div> <h2 class="relative group"><a id="transformers.trainer_pt_utils.DistributedTensorGatherer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.trainer_pt_utils.DistributedTensorGatherer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Distributed Evaluation </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.trainer_pt_utils.DistributedTensorGatherer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.trainer_pt_utils.</span><span class="font-semibold">DistributedTensorGatherer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.trainer_pt_utils.DistributedTensorGatherer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.trainer_pt_utils.DistributedTensorGatherer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_pt_utils.py#L344" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">world_size<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_samples<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">make_multiple_of<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding_index<span class="opacity-60"> = -100</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.trainer_pt_utils.DistributedTensorGatherer.world_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.trainer_pt_utils.DistributedTensorGatherer.world_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>world_size</strong> (<code>int</code>) &#x2014; The number of processes used in the distributed training.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.trainer_pt_utils.DistributedTensorGatherer.num_samples" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.trainer_pt_utils.DistributedTensorGatherer.num_samples"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_samples</strong> (<code>int</code>) &#x2014; The number of samples in our dataset.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.trainer_pt_utils.DistributedTensorGatherer.make_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.trainer_pt_utils.DistributedTensorGatherer.make_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>make_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If passed, the class assumes the datasets passed to each process are made to be a multiple of this argument (by adding samples).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.trainer_pt_utils.DistributedTensorGatherer.padding_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.trainer_pt_utils.DistributedTensorGatherer.padding_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding_index</strong> (<code>int</code>, <em>optional</em>, defaults to -100) &#x2014; The padding index to use if the arrays don&#x2019;t all have the same sequence length.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>A class responsible for properly gathering tensors (or nested list/tuple of tensors) on the CPU by chunks.</p> <p>If our dataset has 16 samples with a batch size of 2 on 3 processes and we gather then transfer on CPU at every step, our sampler will generate the following indices:</p> <p><code>[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1]</code></p> <p>to get something of size a multiple of 3 (so that each process gets the same dataset length). Then process 0, 1 and 2 will be responsible of making predictions for the following samples:</p> <ul><li>P0: <code>[0, 1, 2, 3, 4, 5]</code></li> <li>P1: <code>[6, 7, 8, 9, 10, 11]</code></li> <li>P2: <code>[12, 13, 14, 15, 0, 1]</code></li></ul> <p>The first batch treated on each process will be</p> <ul><li>P0: <code>[0, 1]</code></li> <li>P1: <code>[6, 7]</code></li> <li>P2: <code>[12, 13]</code></li></ul> <p>So if we gather at the end of the first batch, we will get a tensor (nested list/tuple of tensor) corresponding to the following indices:</p> <p><code>[0, 1, 6, 7, 12, 13]</code></p> <p>If we directly concatenate our results without taking any precautions, the user will then get the predictions for the indices in this order at the end of the prediction loop:</p> <p><code>[0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15, 4, 5, 10, 11, 0, 1]</code></p> <p>For some reason, that’s not going to roll their boat. This class is there to solve that problem.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.trainer_pt_utils.DistributedTensorGatherer.add_arrays"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>add_arrays</span></h4><!-- HTML_TAG_END --> <a id="transformers.trainer_pt_utils.DistributedTensorGatherer.add_arrays" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.trainer_pt_utils.DistributedTensorGatherer.add_arrays"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_pt_utils.py#L404" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">arrays<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Add <code>arrays</code> to the internal storage, Will initialize the storage to the full size at the first arrays passed so that if we’re bound to get an OOM, it happens at the beginning.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.trainer_pt_utils.DistributedTensorGatherer.finalize"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>finalize</span></h4><!-- HTML_TAG_END --> <a id="transformers.trainer_pt_utils.DistributedTensorGatherer.finalize" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.trainer_pt_utils.DistributedTensorGatherer.finalize"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/trainer_pt_utils.py#L440" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Return the properly gathered arrays and truncate to the number of samples (since the sampler added some extras to get each process a dataset of the same length).</p></div></div> <h2 class="relative group"><a id="transformers.HfArgumentParser" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.HfArgumentParser"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Distributed Evaluation </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.HfArgumentParser"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">HfArgumentParser</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.HfArgumentParser" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.HfArgumentParser"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/hf_argparser.py#L46" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dataclass_types<span class="opacity-60">: typing.Union[DataClassType, typing.Iterable[DataClassType]]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>This subclass of <code>argparse.ArgumentParser</code> uses type hints on dataclasses to generate arguments.</p> <p>The class is designed to play well with the native argparse. In particular, you can add more (non-dataclass backed) arguments to the parser after initialization and you’ll get the output back after parsing as an additional namespace. Optional: To create sub argument groups use the <code>_argument_group_name</code> attribute in the dataclass.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.HfArgumentParser.parse_args_into_dataclasses"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>parse_args_into_dataclasses</span></h4><!-- HTML_TAG_END --> <a id="transformers.HfArgumentParser.parse_args_into_dataclasses" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.HfArgumentParser.parse_args_into_dataclasses"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/hf_argparser.py#L180" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_remaining_strings<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">look_for_args_file<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args_filename<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>Tuple consisting of</span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div id="transformers.HfArgumentParser.parse_args_into_dataclasses.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>Tuple consisting of</p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <ul> <li>the dataclass instances in the same order as they were passed to the initializer.abspath</li> <li>if applicable, an additional namespace for more (non-dataclass backed) arguments added to the parser after initialization.</li> <li>The potential list of remaining argument strings. (same as argparse.ArgumentParser.parse_known_args)</li> </ul> <!-- HTML_TAG_END --></p> </div></div> <p>Parse command-line args into instances of the specified dataclass types.</p> <p>This relies on argparse’s <code>ArgumentParser.parse_known_args</code>. See the doc at: docs.python.org/3.7/library/argparse.html#argparse.ArgumentParser.parse_args</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.HfArgumentParser.parse_dict"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>parse_dict</span></h4><!-- HTML_TAG_END --> <a id="transformers.HfArgumentParser.parse_dict" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.HfArgumentParser.parse_dict"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/hf_argparser.py#L239" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: typing.Dict[str, typing.Any]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">allow_extra_keys<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>Tuple consisting of</span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.HfArgumentParser.parse_dict.args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.HfArgumentParser.parse_dict.args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args</strong> (<code>dict</code>) &#x2014; dict containing config values<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.HfArgumentParser.parse_dict.allow_extra_keys" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.HfArgumentParser.parse_dict.allow_extra_keys"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>allow_extra_keys</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Defaults to False. If False, will raise an exception if the dict contains keys that are not parsed.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.HfArgumentParser.parse_dict.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>Tuple consisting of</p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <ul> <li>the dataclass instances in the same order as they were passed to the initializer.</li> </ul> <!-- HTML_TAG_END --></p> </div></div> <p>Alternative helper method that does not use <code>argparse</code> at all, instead uses a dict and populating the dataclass types.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.HfArgumentParser.parse_json_file"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>parse_json_file</span></h4><!-- HTML_TAG_END --> <a id="transformers.HfArgumentParser.parse_json_file" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.HfArgumentParser.parse_json_file"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/hf_argparser.py#L267" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">json_file<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">allow_extra_keys<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>Tuple consisting of</span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.HfArgumentParser.parse_json_file.json_file" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.HfArgumentParser.parse_json_file.json_file"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>json_file</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; File name of the json file to parse<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.HfArgumentParser.parse_json_file.allow_extra_keys" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.HfArgumentParser.parse_json_file.allow_extra_keys"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>allow_extra_keys</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Defaults to False. If False, will raise an exception if the json file contains keys that are not parsed.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.HfArgumentParser.parse_json_file.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>Tuple consisting of</p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <ul> <li>the dataclass instances in the same order as they were passed to the initializer.</li> </ul> <!-- HTML_TAG_END --></p> </div></div> <p>Alternative helper method that does not use <code>argparse</code> at all, instead loading a json file and populating the dataclass types.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.HfArgumentParser.parse_yaml_file"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>parse_yaml_file</span></h4><!-- HTML_TAG_END --> <a id="transformers.HfArgumentParser.parse_yaml_file" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.HfArgumentParser.parse_yaml_file"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/hf_argparser.py#L289" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">yaml_file<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">allow_extra_keys<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>Tuple consisting of</span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.HfArgumentParser.parse_yaml_file.yaml_file" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.HfArgumentParser.parse_yaml_file.yaml_file"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>yaml_file</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; File name of the yaml file to parse<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.HfArgumentParser.parse_yaml_file.allow_extra_keys" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.HfArgumentParser.parse_yaml_file.allow_extra_keys"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>allow_extra_keys</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Defaults to False. If False, will raise an exception if the json file contains keys that are not parsed.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.HfArgumentParser.parse_yaml_file.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>Tuple consisting of</p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <ul> <li>the dataclass instances in the same order as they were passed to the initializer.</li> </ul> <!-- HTML_TAG_END --></p> </div></div> <p>Alternative helper method that does not use <code>argparse</code> at all, instead loading a json file and populating the dataclass types.</p></div></div> <h2 class="relative group"><a id="transformers.debug_utils.DebugUnderflowOverflow" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.debug_utils.DebugUnderflowOverflow"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Debug Utilities </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.debug_utils.DebugUnderflowOverflow"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.debug_utils.</span><span class="font-semibold">DebugUnderflowOverflow</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.debug_utils.DebugUnderflowOverflow" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.debug_utils.DebugUnderflowOverflow"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/debug_utils.py#L27" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_frames_to_save<span class="opacity-60"> = 21</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">trace_batch_nums<span class="opacity-60"> = []</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">abort_after_batch_num<span class="opacity-60"> = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.debug_utils.DebugUnderflowOverflow.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.debug_utils.DebugUnderflowOverflow.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<code>nn.Module</code>) &#x2014; The model to debug.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.debug_utils.DebugUnderflowOverflow.max_frames_to_save" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.debug_utils.DebugUnderflowOverflow.max_frames_to_save"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_frames_to_save</strong> (<code>int</code>, <em>optional</em>, defaults to 21) &#x2014; How many frames back to record<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.debug_utils.DebugUnderflowOverflow.trace_batch_nums(List[int]," class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.debug_utils.DebugUnderflowOverflow.trace_batch_nums(List[int],"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>trace_batch_nums(<code>List[int]</code>,</strong> <em>optional</em>, defaults to <code>[]</code>) &#x2014; Which batch numbers to trace (turns detection off)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.debug_utils.DebugUnderflowOverflow.abort_after_batch_num" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.debug_utils.DebugUnderflowOverflow.abort_after_batch_num"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>abort_after_batch_num</strong> (`int&#x201C;, <em>optional</em>) &#x2014; Whether to abort after a certain batch number has finished<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This debug class helps detect and understand where the model starts getting very large or very small, and more importantly <code>nan</code> or <code>inf</code> weight and activation elements.</p> <p>There are 2 working modes:</p> <ol><li>Underflow/overflow detection (default)</li> <li>Specific batch absolute min/max tracing without detection</li></ol> <p>Mode 1: Underflow/overflow detection</p> <div class="relative group rounded-md"><a id="transformers.debug_utils.DebugUnderflowOverflow.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.debug_utils.DebugUnderflowOverflow.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>To activate the underflow/overflow detection, initialize the object with the model :</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->debug_overflow = DebugUnderflowOverflow(model)<!-- HTML_TAG_END --></pre></div></div> <p>then run the training as normal and if <code>nan</code> or <code>inf</code> gets detected in at least one of the weight, input or output elements this module will throw an exception and will print <code>max_frames_to_save</code> frames that lead to this event, each frame reporting</p> <ol><li>the fully qualified module name plus the class name whose <code>forward</code> was run</li> <li>the absolute min and max value of all elements for each module weights, and the inputs and output</li></ol> <p>For example, here is the header and the last few frames in detection report for <code>google/mt5-small</code> run in fp16</p> <div class="relative group rounded-md"><a id="transformers.debug_utils.DebugUnderflowOverflow.example-2" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.debug_utils.DebugUnderflowOverflow.example-2"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>mixed precision :</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-attribute">Detected</span> inf/nan during batch_number=<span class="hljs-number">0</span> <span class="hljs-attribute">Last</span> <span class="hljs-number">21</span> forward frames: <span class="hljs-attribute">abs</span> min abs max metadata<span class="hljs-meta"> [...]</span> <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.DenseReluDense.wi_0 Linear <span class="hljs-attribute">2</span>.<span class="hljs-number">17</span>e-<span class="hljs-number">07</span> <span class="hljs-number">4</span>.<span class="hljs-number">50</span>e+<span class="hljs-number">00</span> weight <span class="hljs-attribute">1</span>.<span class="hljs-number">79</span>e-<span class="hljs-number">06</span> <span class="hljs-number">4</span>.<span class="hljs-number">65</span>e+<span class="hljs-number">00</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">2</span>.<span class="hljs-number">68</span>e-<span class="hljs-number">06</span> <span class="hljs-number">3</span>.<span class="hljs-number">70</span>e+<span class="hljs-number">01</span> output <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.DenseReluDense.wi_1 Linear <span class="hljs-attribute">8</span>.<span class="hljs-number">08</span>e-<span class="hljs-number">07</span> <span class="hljs-number">2</span>.<span class="hljs-number">66</span>e+<span class="hljs-number">01</span> weight <span class="hljs-attribute">1</span>.<span class="hljs-number">79</span>e-<span class="hljs-number">06</span> <span class="hljs-number">4</span>.<span class="hljs-number">65</span>e+<span class="hljs-number">00</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">1</span>.<span class="hljs-number">27</span>e-<span class="hljs-number">04</span> <span class="hljs-number">2</span>.<span class="hljs-number">37</span>e+<span class="hljs-number">02</span> output <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.DenseReluDense.wo Linear <span class="hljs-attribute">1</span>.<span class="hljs-number">01</span>e-<span class="hljs-number">06</span> <span class="hljs-number">6</span>.<span class="hljs-number">44</span>e+<span class="hljs-number">00</span> weight <span class="hljs-attribute">0</span>.<span class="hljs-number">00</span>e+<span class="hljs-number">00</span> <span class="hljs-number">9</span>.<span class="hljs-number">74</span>e+<span class="hljs-number">03</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">3</span>.<span class="hljs-number">18</span>e-<span class="hljs-number">04</span> <span class="hljs-number">6</span>.<span class="hljs-number">27</span>e+<span class="hljs-number">04</span> output <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.DenseReluDense T5DenseGatedGeluDense <span class="hljs-attribute">1</span>.<span class="hljs-number">79</span>e-<span class="hljs-number">06</span> <span class="hljs-number">4</span>.<span class="hljs-number">65</span>e+<span class="hljs-number">00</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">3</span>.<span class="hljs-number">18</span>e-<span class="hljs-number">04</span> <span class="hljs-number">6</span>.<span class="hljs-number">27</span>e+<span class="hljs-number">04</span> output <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.dropout Dropout <span class="hljs-attribute">3</span>.<span class="hljs-number">18</span>e-<span class="hljs-number">04</span> <span class="hljs-number">6</span>.<span class="hljs-number">27</span>e+<span class="hljs-number">04</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">0</span>.<span class="hljs-number">00</span>e+<span class="hljs-number">00</span> inf output<!-- HTML_TAG_END --></pre></div></div> <p>You can see here, that <code>T5DenseGatedGeluDense.forward</code> resulted in output activations, whose absolute max value was around 62.7K, which is very close to fp16’s top limit of 64K. In the next frame we have <code>Dropout</code> which renormalizes the weights, after it zeroed some of the elements, which pushes the absolute max value to more than 64K, and we get an overlow.</p> <p>As you can see it’s the previous frames that we need to look into when the numbers start going into very large for fp16 numbers.</p> <p>The tracking is done in a forward hook, which gets invoked immediately after <code>forward</code> has completed.</p> <div class="relative group rounded-md"><a id="transformers.debug_utils.DebugUnderflowOverflow.example-3" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.debug_utils.DebugUnderflowOverflow.example-3"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>By default the last 21 frames are printed. You can change the default to adjust for your needs. For example :</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=<span class="hljs-number">100</span>)<!-- HTML_TAG_END --></pre></div></div> <p>To validate that you have set up this debugging feature correctly, and you intend to use it in a training that may take hours to complete, first run it with normal tracing enabled for one of a few batches as explained in the next section.</p> <p>Mode 2. Specific batch absolute min/max tracing without detection</p> <p>The second work mode is per-batch tracing with the underflow/overflow detection feature turned off.</p> <p>Let’s say you want to watch the absolute min and max values for all the ingredients of each <code>forward</code> call of a</p> <div class="relative group rounded-md"><a id="transformers.debug_utils.DebugUnderflowOverflow.example-4" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.debug_utils.DebugUnderflowOverflow.example-4"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>given batch, and only do that for batches 1 and 3. Then you instantiate this class as :</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[<span class="hljs-number">1</span>, <span class="hljs-number">3</span>])<!-- HTML_TAG_END --></pre></div></div> <p>And now full batches 1 and 3 will be traced using the same format as explained above. Batches are 0-indexed.</p> <p>This is helpful if you know that the program starts misbehaving after a certain batch number, so you can fast-forward right to that area.</p> <p>Early stopping:</p> <div class="relative group rounded-md"><a id="transformers.debug_utils.DebugUnderflowOverflow.example-5" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.debug_utils.DebugUnderflowOverflow.example-5"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>You can also specify the batch number after which to stop the training, with :</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[<span class="hljs-number">1</span>, <span class="hljs-number">3</span>], abort_after_batch_num=<span class="hljs-number">3</span>)<!-- HTML_TAG_END --></pre></div></div> <p>This feature is mainly useful in the tracing mode, but you can use it for any mode.</p> <p><strong>Performance</strong>:</p> <p>As this module measures absolute <code>min</code>/`<code>max</code> of each weight of the model on every forward it’ll slow the training down. Therefore remember to turn it off once the debugging needs have been met.</p></div> <script type="module" data-hydrate="nrbhxo"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="nrbhxo"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/internal/trainer_utils.mdx-hf-doc-builder.js") ], params: {} } }); </script>
70
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/internal/modeling_utils.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;custom-layers-and-utilities&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.Conv1D&quot;,&quot;title&quot;:&quot;Pytorch custom modules&quot;},{&quot;local&quot;:&quot;transformers.apply_chunking_to_forward&quot;,&quot;title&quot;:&quot;PyTorch Helper Functions&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_utils.TFConv1D&quot;,&quot;title&quot;:&quot;TensorFlow custom layers&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_utils.TFCausalLanguageModelingLoss&quot;,&quot;title&quot;:&quot;TensorFlow loss functions&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_utils.get_initializer&quot;,&quot;title&quot;:&quot;TensorFlow Helper Functions&quot;}],&quot;title&quot;:&quot;Custom Layers and Utilities&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/internal/modeling_utils.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Docstring-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/ExampleCodeBlock-hf-doc-builder.js"> <h1 class="relative group"><a id="custom-layers-and-utilities" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#custom-layers-and-utilities"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Custom Layers and Utilities </span></h1> <p>This page lists all the custom layers used by the library, as well as the utility functions it provides for modeling.</p> <p>Most of those are only useful if you are studying the code of the models in the library.</p> <h2 class="relative group"><a id="transformers.Conv1D" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Conv1D"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Pytorch custom modules </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Conv1D"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Conv1D</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Conv1D" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Conv1D"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pytorch_utils.py#L91" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">nf<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">nx<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Conv1D.nf" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Conv1D.nf"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>nf</strong> (<code>int</code>) &#x2014; The number of output features.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.Conv1D.nx" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Conv1D.nx"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>nx</strong> (<code>int</code>) &#x2014; The number of input features.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).</p> <p>Basically works like a linear layer but the weights are transposed.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.PoolerStartLogits"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_utils.</span><span class="font-semibold">PoolerStartLogits</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.PoolerStartLogits" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.PoolerStartLogits"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L2713" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: PretrainedConfig</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.PoolerStartLogits.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.PoolerStartLogits.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) &#x2014; The config used by the model, will be used to grab the <code>hidden_size</code> of the model.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Compute SQuAD start logits from sequence hidden states.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.PoolerStartLogits.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.PoolerStartLogits.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.PoolerStartLogits.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L2726" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">p_mask<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>torch.FloatTensor</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.PoolerStartLogits.forward.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.PoolerStartLogits.forward.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len, hidden_size)</code>) &#x2014; The final hidden states of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.PoolerStartLogits.forward.p_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.PoolerStartLogits.forward.p_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>p_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len)</code>, <em>optional</em>) &#x2014; Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token should be masked.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.modeling_utils.PoolerStartLogits.forward.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>torch.FloatTensor</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The start logits for SQuAD.</p> <!-- HTML_TAG_END --></p> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.PoolerEndLogits"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_utils.</span><span class="font-semibold">PoolerEndLogits</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.PoolerEndLogits" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.PoolerEndLogits"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L2751" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: PretrainedConfig</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.PoolerEndLogits.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.PoolerEndLogits.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) &#x2014; The config used by the model, will be used to grab the <code>hidden_size</code> of the model and the <code>layer_norm_eps</code> to use.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Compute SQuAD end logits from sequence hidden states.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.PoolerEndLogits.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.PoolerEndLogits.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.PoolerEndLogits.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L2768" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_states<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_positions<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">p_mask<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>torch.FloatTensor</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.PoolerEndLogits.forward.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.PoolerEndLogits.forward.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len, hidden_size)</code>) &#x2014; The final hidden states of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.PoolerEndLogits.forward.start_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.PoolerEndLogits.forward.start_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len, hidden_size)</code>, <em>optional</em>) &#x2014; The hidden states of the first tokens for the labeled span.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.PoolerEndLogits.forward.start_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.PoolerEndLogits.forward.start_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; The position of the first token for the labeled span.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.PoolerEndLogits.forward.p_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.PoolerEndLogits.forward.p_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>p_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len)</code>, <em>optional</em>) &#x2014; Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token should be masked.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.modeling_utils.PoolerEndLogits.forward.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>torch.FloatTensor</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The end logits for SQuAD.</p> <!-- HTML_TAG_END --></p> </div></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>One of <code>start_states</code> or <code>start_positions</code> should be not <code>None</code>. If both are set, <code>start_positions</code> overrides <code>start_states</code>.</p></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.PoolerAnswerClass"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_utils.</span><span class="font-semibold">PoolerAnswerClass</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.PoolerAnswerClass" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.PoolerAnswerClass"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L2820" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.PoolerAnswerClass.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.PoolerAnswerClass.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) &#x2014; The config used by the model, will be used to grab the <code>hidden_size</code> of the model.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Compute SQuAD 2.0 answer class from classification and start tokens hidden states.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.PoolerAnswerClass.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.PoolerAnswerClass.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.PoolerAnswerClass.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L2835" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_states<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_positions<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_index<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>torch.FloatTensor</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.PoolerAnswerClass.forward.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.PoolerAnswerClass.forward.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len, hidden_size)</code>) &#x2014; The final hidden states of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.PoolerAnswerClass.forward.start_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.PoolerAnswerClass.forward.start_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len, hidden_size)</code>, <em>optional</em>) &#x2014; The hidden states of the first tokens for the labeled span.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.PoolerAnswerClass.forward.start_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.PoolerAnswerClass.forward.start_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; The position of the first token for the labeled span.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.PoolerAnswerClass.forward.cls_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.PoolerAnswerClass.forward.cls_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Position of the CLS token for each sentence in the batch. If <code>None</code>, takes the last token.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.modeling_utils.PoolerAnswerClass.forward.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>torch.FloatTensor</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The SQuAD 2.0 answer class.</p> <!-- HTML_TAG_END --></p> </div></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>One of <code>start_states</code> or <code>start_positions</code> should be not <code>None</code>. If both are set, <code>start_positions</code> overrides <code>start_states</code>.</p></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.SquadHeadOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_utils.</span><span class="font-semibold">SquadHeadOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.SquadHeadOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.SquadHeadOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L2886" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_top_log_probs<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_top_index<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_top_log_probs<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_top_index<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_logits<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SquadHeadOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SquadHeadOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned if both <code>start_positions</code> and <code>end_positions</code> are provided) &#x2014; Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SquadHeadOutput.start_top_log_probs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SquadHeadOutput.start_top_log_probs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_top_log_probs</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.start_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) &#x2014; Log probabilities for the top config.start_n_top start token possibilities (beam-search).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SquadHeadOutput.start_top_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SquadHeadOutput.start_top_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_top_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.start_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) &#x2014; Indices for the top config.start_n_top start token possibilities (beam-search).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SquadHeadOutput.end_top_log_probs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SquadHeadOutput.end_top_log_probs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_top_log_probs</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.start_n_top * config.end_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) &#x2014; Log probabilities for the top <code>config.start_n_top * config.end_n_top</code> end token possibilities (beam-search).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SquadHeadOutput.end_top_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SquadHeadOutput.end_top_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_top_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.start_n_top * config.end_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) &#x2014; Indices for the top <code>config.start_n_top * config.end_n_top</code> end token possibilities (beam-search).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SquadHeadOutput.cls_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SquadHeadOutput.cls_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) &#x2014; Log probabilities for the <code>is_impossible</code> label of the answers.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of question answering models using a <a href="/docs/transformers/pr_19429/en/internal/modeling_utils#transformers.modeling_utils.SQuADHead">SQuADHead</a>.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.SQuADHead"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_utils.</span><span class="font-semibold">SQuADHead</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.SQuADHead" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.SQuADHead"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L2916" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SQuADHead.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SQuADHead.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) &#x2014; The config used by the model, will be used to grab the <code>hidden_size</code> of the model and the <code>layer_norm_eps</code> to use.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>A SQuAD head inspired by XLNet.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.SQuADHead.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.SQuADHead.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.SQuADHead.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L2935" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_positions<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_positions<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_index<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_impossible<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">p_mask<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_19429/en/internal/modeling_utils#transformers.modeling_utils.SquadHeadOutput" >transformers.modeling_utils.SquadHeadOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SQuADHead.forward.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SQuADHead.forward.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len, hidden_size)</code>) &#x2014; Final hidden states of the model on the sequence tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SQuADHead.forward.start_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SQuADHead.forward.start_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Positions of the first token for the labeled span.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SQuADHead.forward.end_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SQuADHead.forward.end_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Positions of the last token for the labeled span.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SQuADHead.forward.cls_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SQuADHead.forward.cls_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Position of the CLS token for each sentence in the batch. If <code>None</code>, takes the last token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SQuADHead.forward.is_impossible" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SQuADHead.forward.is_impossible"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_impossible</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Whether the question has a possible answer in the paragraph or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SQuADHead.forward.p_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SQuADHead.forward.p_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>p_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len)</code>, <em>optional</em>) &#x2014; Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token should be masked.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SQuADHead.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SQuADHead.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.modeling_utils.SQuADHead.forward.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_19429/en/internal/modeling_utils#transformers.modeling_utils.SquadHeadOutput" >transformers.modeling_utils.SquadHeadOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_19429/en/internal/modeling_utils#transformers.modeling_utils.SquadHeadOutput" >transformers.modeling_utils.SquadHeadOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.configuration_utils.PretrainedConfig'&gt;</code>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned if both <code>start_positions</code> and <code>end_positions</code> are provided) — Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.</li> <li><strong>start_top_log_probs</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.start_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) — Log probabilities for the top config.start_n_top start token possibilities (beam-search).</li> <li><strong>start_top_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.start_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) — Indices for the top config.start_n_top start token possibilities (beam-search).</li> <li><strong>end_top_log_probs</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.start_n_top * config.end_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) — Log probabilities for the top <code>config.start_n_top * config.end_n_top</code> end token possibilities (beam-search).</li> <li><strong>end_top_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.start_n_top * config.end_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) — Indices for the top <code>config.start_n_top * config.end_n_top</code> end token possibilities (beam-search).</li> <li><strong>cls_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) — Log probabilities for the <code>is_impossible</code> label of the answers.</li> </ul> <!-- HTML_TAG_END --></p> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.SequenceSummary"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_utils.</span><span class="font-semibold">SequenceSummary</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.SequenceSummary" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.SequenceSummary"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L3033" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: PretrainedConfig</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SequenceSummary.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SequenceSummary.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) &#x2014; The config used by the model. Relevant arguments in the config class of the model are (refer to the actual config class of your model for the default values it uses):</p> <ul> <li> <p><strong>summary_type</strong> (<code>str</code>) &#x2014; The method to use to make this summary. Accepted values are:</p> <ul> <li><code>&quot;last&quot;</code> &#x2014; Take the last token hidden state (like XLNet)</li> <li><code>&quot;first&quot;</code> &#x2014; Take the first token hidden state (like Bert)</li> <li><code>&quot;mean&quot;</code> &#x2014; Take the mean of all tokens hidden states</li> <li><code>&quot;cls_index&quot;</code> &#x2014; Supply a Tensor of classification token position (GPT/GPT-2)</li> <li><code>&quot;attn&quot;</code> &#x2014; Not implemented now, use multi-head attention</li> </ul> </li> <li> <p><strong>summary_use_proj</strong> (<code>bool</code>) &#x2014; Add a projection after the vector extraction.</p> </li> <li> <p><strong>summary_proj_to_labels</strong> (<code>bool</code>) &#x2014; If <code>True</code>, the projection outputs to <code>config.num_labels</code> classes (otherwise to <code>config.hidden_size</code>).</p> </li> <li> <p><strong>summary_activation</strong> (<code>Optional[str]</code>) &#x2014; Set to <code>&quot;tanh&quot;</code> to add a tanh activation to the output, another string or <code>None</code> will add no activation.</p> </li> <li> <p><strong>summary_first_dropout</strong> (<code>float</code>) &#x2014; Optional dropout probability before the projection and activation.</p> </li> <li> <p><strong>summary_last_dropout</strong> (<code>float</code>)&#x2014; Optional dropout probability after the projection and activation.</p> </li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Compute a single vector summary of a sequence hidden states.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.SequenceSummary.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.SequenceSummary.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.SequenceSummary.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_utils.py#L3088" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_index<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>torch.FloatTensor</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SequenceSummary.forward.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SequenceSummary.forward.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>[batch_size, seq_len, hidden_size]</code>) &#x2014; The hidden states of the last layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SequenceSummary.forward.cls_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SequenceSummary.forward.cls_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_index</strong> (<code>torch.LongTensor</code> of shape <code>[batch_size]</code> or <code>[batch_size, ...]</code> where &#x2026; are optional leading dimensions of <code>hidden_states</code>, <em>optional</em>) &#x2014; Used if <code>summary_type == &quot;cls_index&quot;</code> and takes the last token of the sequence as classification token.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.modeling_utils.SequenceSummary.forward.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>torch.FloatTensor</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The summary of the sequence hidden states.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Compute a single vector summary of a sequence hidden states.</p></div></div> <h2 class="relative group"><a id="transformers.apply_chunking_to_forward" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.apply_chunking_to_forward"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PyTorch Helper Functions </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.apply_chunking_to_forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.apply_chunking_to_forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.apply_chunking_to_forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.apply_chunking_to_forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pytorch_utils.py#L174" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">forward_fn<span class="opacity-60">: typing.Callable[..., torch.Tensor]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">chunk_size<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">chunk_dim<span class="opacity-60">: int</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*input_tensors<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>torch.Tensor</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.apply_chunking_to_forward.forward_fn" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.apply_chunking_to_forward.forward_fn"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>forward_fn</strong> (<code>Callable[..., torch.Tensor]</code>) &#x2014; The forward function of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.apply_chunking_to_forward.chunk_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.apply_chunking_to_forward.chunk_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>chunk_size</strong> (<code>int</code>) &#x2014; The chunk size of a chunked tensor: <code>num_chunks = len(input_tensors[0]) / chunk_size</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.apply_chunking_to_forward.chunk_dim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.apply_chunking_to_forward.chunk_dim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>chunk_dim</strong> (<code>int</code>) &#x2014; The dimension over which the <code>input_tensors</code> should be chunked.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.apply_chunking_to_forward.input_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.apply_chunking_to_forward.input_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_tensors</strong> (<code>Tuple[torch.Tensor]</code>) &#x2014; The input tensors of <code>forward_fn</code> which will be chunked<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.apply_chunking_to_forward.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>torch.Tensor</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A tensor with the same shape as the <code>forward_fn</code> would have given if applied`.</p> <!-- HTML_TAG_END --></p> </div></div> <p>This function chunks the <code>input_tensors</code> into smaller input tensor parts of size <code>chunk_size</code> over the dimension <code>chunk_dim</code>. It then applies a layer <code>forward_fn</code> to each chunk independently to save memory.</p> <p>If the <code>forward_fn</code> is independent across the <code>chunk_dim</code> this function will yield the same result as directly applying <code>forward_fn</code> to <code>input_tensors</code>.</p> <div class="relative group rounded-md"><a id="transformers.apply_chunking_to_forward.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.apply_chunking_to_forward.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment"># rename the usual forward() fn to forward_chunk()</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">forward_chunk</span>(<span class="hljs-params">self, hidden_states</span>): hidden_states = self.decoder(hidden_states) <span class="hljs-keyword">return</span> hidden_states <span class="hljs-comment"># implement a chunked forward function</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">forward</span>(<span class="hljs-params">self, hidden_states</span>): <span class="hljs-keyword">return</span> apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states)<!-- HTML_TAG_END --></pre></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.pytorch_utils.find_pruneable_heads_and_indices"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.pytorch_utils.find_pruneable_heads_and_indices</span></h4><!-- HTML_TAG_END --> <a id="transformers.pytorch_utils.find_pruneable_heads_and_indices" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.pytorch_utils.find_pruneable_heads_and_indices"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pytorch_utils.py#L249" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">heads<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">n_heads<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_size<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">already_pruned_heads<span class="opacity-60">: typing.Set[int]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Tuple[Set[int], torch.LongTensor]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.pytorch_utils.find_pruneable_heads_and_indices.heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pytorch_utils.find_pruneable_heads_and_indices.heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>heads</strong> (<code>List[int]</code>) &#x2014; List of the indices of heads to prune.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.pytorch_utils.find_pruneable_heads_and_indices.n_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pytorch_utils.find_pruneable_heads_and_indices.n_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>n_heads</strong> (<code>int</code>) &#x2014; The number of heads in the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.pytorch_utils.find_pruneable_heads_and_indices.head_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pytorch_utils.find_pruneable_heads_and_indices.head_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_size</strong> (<code>int</code>) &#x2014; The size of each head.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.pytorch_utils.find_pruneable_heads_and_indices.already_pruned_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pytorch_utils.find_pruneable_heads_and_indices.already_pruned_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>already_pruned_heads</strong> (<code>Set[int]</code>) &#x2014; A set of already pruned heads.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.pytorch_utils.find_pruneable_heads_and_indices.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Tuple[Set[int], torch.LongTensor]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A tuple with the remaining heads and their corresponding indices.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Finds the heads and their indices taking <code>already_pruned_heads</code> into account.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.prune_layer"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.prune_layer</span></h4><!-- HTML_TAG_END --> <a id="transformers.prune_layer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.prune_layer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pytorch_utils.py#L150" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">layer<span class="opacity-60">: typing.Union[torch.nn.modules.linear.Linear, transformers.pytorch_utils.Conv1D]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">index<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dim<span class="opacity-60">: typing.Optional[int] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>torch.nn.Linear</code> or <a href="/docs/transformers/pr_19429/en/internal/modeling_utils#transformers.Conv1D" >Conv1D</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.prune_layer.layer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.prune_layer.layer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>layer</strong> (<code>Union[torch.nn.Linear, Conv1D]</code>) &#x2014; The layer to prune.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.prune_layer.index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.prune_layer.index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>index</strong> (<code>torch.LongTensor</code>) &#x2014; The indices to keep in the layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.prune_layer.dim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.prune_layer.dim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dim</strong> (<code>int</code>, <em>optional</em>) &#x2014; The dimension on which to keep the indices.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.prune_layer.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>torch.nn.Linear</code> or <a href="/docs/transformers/pr_19429/en/internal/modeling_utils#transformers.Conv1D" >Conv1D</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The pruned layer as a new layer with <code>requires_grad=True</code>.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Prune a Conv1D or linear layer to keep only entries in index.</p> <p>Used to remove heads.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.pytorch_utils.prune_conv1d_layer"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.pytorch_utils.prune_conv1d_layer</span></h4><!-- HTML_TAG_END --> <a id="transformers.pytorch_utils.prune_conv1d_layer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.pytorch_utils.prune_conv1d_layer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pytorch_utils.py#L117" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">layer<span class="opacity-60">: Conv1D</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">index<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dim<span class="opacity-60">: int = 1</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_19429/en/internal/modeling_utils#transformers.Conv1D" >Conv1D</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.pytorch_utils.prune_conv1d_layer.layer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pytorch_utils.prune_conv1d_layer.layer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>layer</strong> (<a href="/docs/transformers/pr_19429/en/internal/modeling_utils#transformers.Conv1D">Conv1D</a>) &#x2014; The layer to prune.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.pytorch_utils.prune_conv1d_layer.index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pytorch_utils.prune_conv1d_layer.index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>index</strong> (<code>torch.LongTensor</code>) &#x2014; The indices to keep in the layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.pytorch_utils.prune_conv1d_layer.dim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pytorch_utils.prune_conv1d_layer.dim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dim</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The dimension on which to keep the indices.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.pytorch_utils.prune_conv1d_layer.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_19429/en/internal/modeling_utils#transformers.Conv1D" >Conv1D</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The pruned layer as a new layer with <code>requires_grad=True</code>.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Prune a Conv1D layer to keep only entries in index. A Conv1D work as a Linear layer (see e.g. BERT) but the weights are transposed.</p> <p>Used to remove heads.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.pytorch_utils.prune_linear_layer"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.pytorch_utils.prune_linear_layer</span></h4><!-- HTML_TAG_END --> <a id="transformers.pytorch_utils.prune_linear_layer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.pytorch_utils.prune_linear_layer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pytorch_utils.py#L57" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">layer<span class="opacity-60">: Linear</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">index<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dim<span class="opacity-60">: int = 0</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>torch.nn.Linear</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.pytorch_utils.prune_linear_layer.layer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pytorch_utils.prune_linear_layer.layer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>layer</strong> (<code>torch.nn.Linear</code>) &#x2014; The layer to prune.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.pytorch_utils.prune_linear_layer.index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pytorch_utils.prune_linear_layer.index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>index</strong> (<code>torch.LongTensor</code>) &#x2014; The indices to keep in the layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.pytorch_utils.prune_linear_layer.dim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pytorch_utils.prune_linear_layer.dim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dim</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The dimension on which to keep the indices.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.pytorch_utils.prune_linear_layer.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>torch.nn.Linear</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The pruned layer as a new layer with <code>requires_grad=True</code>.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Prune a linear layer to keep only entries in index.</p> <p>Used to remove heads.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_utils.TFConv1D" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_utils.TFConv1D"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TensorFlow custom layers </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_utils.TFConv1D"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_utils.</span><span class="font-semibold">TFConv1D</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_utils.TFConv1D" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_utils.TFConv1D"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L2759" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_utils.TFConv1D.nf" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_utils.TFConv1D.nf"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>nf</strong> (<code>int</code>) &#x2014; The number of output features.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_utils.TFConv1D.nx" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_utils.TFConv1D.nx"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>nx</strong> (<code>int</code>) &#x2014; The number of input features.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_utils.TFConv1D.initializer_range" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_utils.TFConv1D.initializer_range"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation to use to initialize the weights. kwargs &#x2014; Additional keyword arguments passed along to the <code>__init__</code> of <code>tf.keras.layers.Layer</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).</p> <p>Basically works like a linear layer but the weights are transposed.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFSharedEmbeddings"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFSharedEmbeddings</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFSharedEmbeddings" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFSharedEmbeddings"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L2799" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFSharedEmbeddings.vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFSharedEmbeddings.vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_size</strong> (<code>int</code>) &#x2014; The size of the vocabulary, e.g., the number of unique tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFSharedEmbeddings.hidden_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFSharedEmbeddings.hidden_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_size</strong> (<code>int</code>) &#x2014; The size of the embedding vectors.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFSharedEmbeddings.initializer_range" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFSharedEmbeddings.initializer_range"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>initializer_range</strong> (<code>float</code>, <em>optional</em>) &#x2014; The standard deviation to use when initializing the weights. If no value is provided, it will default to {@html &quot;<span class="\&quot;katex\&quot;"><span class="\&quot;katex-mathml\&quot;"><math xmlns="\&quot;http://www.w3.org/1998/Math/MathML\&quot;"><semantics><mrow><mn>1</mn><mi mathvariant="\&quot;normal\&quot;">/</mi><msqrt><mrow><mi>h</mi><mi>i</mi><mi>d</mi><mi>d</mi><mi>e</mi><mi>n</mi><mi mathvariant="\&quot;normal\&quot;">_</mi><mi>s</mi><mi>i</mi><mi>z</mi><mi>e</mi></mrow></msqrt></mrow><annotation encoding="\&quot;application/x-tex\&quot;">1/\\sqrt{hidden\\_size}</annotation></semantics></math></span><span class="\&quot;katex-html\&quot;" aria-hidden="\&quot;true\&quot;"><span class="\&quot;base\&quot;"><span class="\&quot;strut\&quot;" style="\&quot;height:1.24em;vertical-align:-0.3628em;\&quot;"></span><span class="\&quot;mord\&quot;">1/</span><span class="\&quot;mord" sqrt\"><span class="\&quot;vlist-t" vlist-t2\"><span class="\&quot;vlist-r\&quot;"><span class="\&quot;vlist\&quot;" style="\&quot;height:0.8772em;\&quot;"><span class="\&quot;svg-align\&quot;" style="\&quot;top:-3.2em;\&quot;"><span class="\&quot;pstrut\&quot;" style="\&quot;height:3.2em;\&quot;"></span><span class="\&quot;mord\&quot;" style="\&quot;padding-left:1em;\&quot;"><span class="\&quot;mord" mathnormal\">hi</span><span class="\&quot;mord" mathnormal\">dd</span><span class="\&quot;mord" mathnormal\">e</span><span class="\&quot;mord" mathnormal\">n</span><span class="\&quot;mord\&quot;" style="\&quot;margin-right:0.02778em;\&quot;">_</span><span class="\&quot;mord" mathnormal\">s</span><span class="\&quot;mord" mathnormal\">i</span><span class="\&quot;mord" mathnormal\">ze</span></span></span><span style="\&quot;top:-2.8372em;\&quot;"><span class="\&quot;pstrut\&quot;" style="\&quot;height:3.2em;\&quot;"></span><span class="\&quot;hide-tail\&quot;" style="\&quot;min-width:1.02em;height:1.28em;\&quot;"><svg xmlns="\&quot;http://www.w3.org/2000/svg\&quot;" width="400em" height="1.28em" viewBox="0 0 400000 1296" preserveAspectRatio="xMinYMin slice"><path d="M263,681c0.7,0,18,39.7,52,119\nc34,79.3,68.167,158.7,102.5,238c34.3,79.3,51.8,119.3,52.5,120\nc340,-704.7,510.7,-1060.3,512,-1067\nl0 -0\nc4.7,-7.3,11,-11,19,-11\nH40000v40H1012.3\ns-271.3,567,-271.3,567c-38.7,80.7,-84,175,-136,283c-52,108,-89.167,185.3,-111.5,232\nc-22.3,46.7,-33.8,70.3,-34.5,71c-4.7,4.7,-12.3,7,-23,7s-12,-1,-12,-1\ns-109,-253,-109,-253c-72.7,-168,-109.3,-252,-110,-252c-10.7,8,-22,16.7,-34,26\nc-22,17.3,-33.3,26,-34,26s-26,-26,-26,-26s76,-59,76,-59s76,-60,76,-60z\nM1001 80h400000v40h-400000z"/></svg></span></span></span><span class="\&quot;vlist-s\&quot;">&#x200B;</span></span><span class="\&quot;vlist-r\&quot;"><span class="\&quot;vlist\&quot;" style="\&quot;height:0.3628em;\&quot;"><span></span></span></span></span></span></span></span></span>&quot;}. kwargs &#x2014; Additional keyword arguments passed along to the <code>__init__</code> of <code>tf.keras.layers.Layer</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Construct shared token embeddings.</p> <p>The weights of the embedding layer is usually shared with the weights of the linear decoder when doing language modeling.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFSharedEmbeddings.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFSharedEmbeddings.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFSharedEmbeddings.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L2845" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mode<span class="opacity-60">: str = &#39;embedding&#39;</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>tf.Tensor</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFSharedEmbeddings.call.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFSharedEmbeddings.call.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>tf.Tensor</code>) &#x2014; In embedding mode, should be an int64 tensor with shape <code>[batch_size, length]</code>.</p> <p>In linear mode, should be a float tensor with shape <code>[batch_size, length, hidden_size]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFSharedEmbeddings.call.mode" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFSharedEmbeddings.call.mode"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mode</strong> (<code>str</code>, defaults to <code>&quot;embedding&quot;</code>) &#x2014; A valid value is either <code>&quot;embedding&quot;</code> or <code>&quot;linear&quot;</code>, the first one indicates that the layer should be used as an embedding layer, the second one that the layer should be used as a linear decoder.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.TFSharedEmbeddings.call.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>tf.Tensor</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>In embedding mode, the output is a float32 embedding tensor, with shape <code>[batch_size, length, embedding_size]</code>.</p> <p>In linear mode, the output is a float32 with shape <code>[batch_size, length, vocab_size]</code>.</p> <!-- HTML_TAG_END --></p> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFSharedEmbeddings.call.raises"><p class="text-base">Raises</p> <!-- HTML_TAG_START --> <p><code>ValueError</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <ul> <li><code>ValueError</code> — if <code>mode</code> is not valid.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Get token embeddings of inputs or decode final hidden state.</p> <p>Shared weights logic is adapted from <a href="https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24" rel="nofollow">here</a>.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFSequenceSummary"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFSequenceSummary</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFSequenceSummary" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFSequenceSummary"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L2898" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFSequenceSummary.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFSequenceSummary.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) &#x2014; The config used by the model. Relevant arguments in the config class of the model are (refer to the actual config class of your model for the default values it uses):</p> <ul> <li> <p><strong>summary_type</strong> (<code>str</code>) &#x2014; The method to use to make this summary. Accepted values are:</p> <ul> <li><code>&quot;last&quot;</code> &#x2014; Take the last token hidden state (like XLNet)</li> <li><code>&quot;first&quot;</code> &#x2014; Take the first token hidden state (like Bert)</li> <li><code>&quot;mean&quot;</code> &#x2014; Take the mean of all tokens hidden states</li> <li><code>&quot;cls_index&quot;</code> &#x2014; Supply a Tensor of classification token position (GPT/GPT-2)</li> <li><code>&quot;attn&quot;</code> &#x2014; Not implemented now, use multi-head attention</li> </ul> </li> <li> <p><strong>summary_use_proj</strong> (<code>bool</code>) &#x2014; Add a projection after the vector extraction.</p> </li> <li> <p><strong>summary_proj_to_labels</strong> (<code>bool</code>) &#x2014; If <code>True</code>, the projection outputs to <code>config.num_labels</code> classes (otherwise to <code>config.hidden_size</code>).</p> </li> <li> <p><strong>summary_activation</strong> (<code>Optional[str]</code>) &#x2014; Set to <code>&quot;tanh&quot;</code> to add a tanh activation to the output, another string or <code>None</code> will add no activation.</p> </li> <li> <p><strong>summary_first_dropout</strong> (<code>float</code>) &#x2014; Optional dropout probability before the projection and activation.</p> </li> <li> <p><strong>summary_last_dropout</strong> (<code>float</code>)&#x2014; Optional dropout probability after the projection and activation.</p> </li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFSequenceSummary.initializer_range" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFSequenceSummary.initializer_range"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>initializer_range</strong> (<code>float</code>, defaults to 0.02) &#x2014; The standard deviation to use to initialize the weights. kwargs &#x2014; Additional keyword arguments passed along to the <code>__init__</code> of <code>tf.keras.layers.Layer</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Compute a single vector summary of a sequence hidden states.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_utils.TFCausalLanguageModelingLoss" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_utils.TFCausalLanguageModelingLoss"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TensorFlow loss functions </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_utils.TFCausalLanguageModelingLoss"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_utils.</span><span class="font-semibold">TFCausalLanguageModelingLoss</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_utils.TFCausalLanguageModelingLoss" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_utils.TFCausalLanguageModelingLoss"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L179" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Loss function suitable for causal language modeling (CLM), that is, the task of guessing the next token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_utils.TFMaskedLanguageModelingLoss"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_utils.</span><span class="font-semibold">TFMaskedLanguageModelingLoss</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_utils.TFMaskedLanguageModelingLoss" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_utils.TFMaskedLanguageModelingLoss"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L298" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Loss function suitable for masked language modeling (MLM), that is, the task of guessing the masked tokens.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_utils.TFMultipleChoiceLoss"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_utils.</span><span class="font-semibold">TFMultipleChoiceLoss</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_utils.TFMultipleChoiceLoss" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_utils.TFMultipleChoiceLoss"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L288" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Loss function suitable for multiple choice tasks.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_utils.TFQuestionAnsweringLoss"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_utils.</span><span class="font-semibold">TFQuestionAnsweringLoss</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_utils.TFQuestionAnsweringLoss" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_utils.TFQuestionAnsweringLoss"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L210" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Loss function suitable for question answering.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_utils.TFSequenceClassificationLoss"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_utils.</span><span class="font-semibold">TFSequenceClassificationLoss</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_utils.TFSequenceClassificationLoss" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_utils.TFSequenceClassificationLoss"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L269" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Loss function suitable for sequence classification.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_utils.TFTokenClassificationLoss"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_utils.</span><span class="font-semibold">TFTokenClassificationLoss</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_utils.TFTokenClassificationLoss" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_utils.TFTokenClassificationLoss"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L225" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Loss function suitable for token classification.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_tf_utils.get_initializer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_utils.get_initializer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TensorFlow Helper Functions </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_utils.get_initializer"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.modeling_tf_utils.get_initializer</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_utils.get_initializer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_utils.get_initializer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L3014" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">initializer_range<span class="opacity-60">: float = 0.02</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>tf.initializers.TruncatedNormal</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_utils.get_initializer.initializer_range" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_utils.get_initializer.initializer_range"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>initializer_range</strong> (<em>float</em>, defaults to 0.02) &#x2014; Standard deviation of the initializer range.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.modeling_tf_utils.get_initializer.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>tf.initializers.TruncatedNormal</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The truncated normal initializer.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Creates a <code>tf.initializers.TruncatedNormal</code> with the given range.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_utils.keras_serializable"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.modeling_tf_utils.keras_serializable</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_utils.keras_serializable" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_utils.keras_serializable"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/modeling_tf_utils.py#L114" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_utils.keras_serializable.cls" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_utils.keras_serializable.cls"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls</strong> (a <code>tf.keras.layers.Layers subclass</code>) &#x2014; Typically a <code>TF.MainLayer</code> class in this project, in general must accept a <code>config</code> argument to its initializer.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Decorate a Keras Layer class to support Keras serialization.</p> <p>This is done by:</p> <ol><li>Adding a <code>transformers_config</code> dict to the Keras config dictionary in <code>get_config</code> (called by Keras at serialization time.</li> <li>Wrapping <code>__init__</code> to accept that <code>transformers_config</code> dict (passed by Keras at deserialization time) and convert it to a config object for the actual layer initializer.</li> <li>Registering the class as a custom object in Keras (if the Tensorflow version supports this), so that it does not need to be supplied in <code>custom_objects</code> in the call to <code>tf.keras.models.load_model</code>.</li></ol></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.shape_list"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.shape_list</span></h4><!-- HTML_TAG_END --> <a id="transformers.shape_list" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.shape_list"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tf_utils.py#L26" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tensor<span class="opacity-60">: typing.Union[tensorflow.python.framework.ops.Tensor, numpy.ndarray]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.shape_list.tensor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.shape_list.tensor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tensor</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code>) &#x2014; The tensor we want the shape of.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.shape_list.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The shape of the tensor as a list.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Deal with dynamic shape in tensorflow cleanly.</p></div> <script type="module" data-hydrate="j2upai"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="j2upai"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/internal/modeling_utils.mdx-hf-doc-builder.js") ], params: {} } }); </script>
71
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/internal/file_utils.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;general-utilities&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.utils.ExplicitEnum&quot;,&quot;title&quot;:&quot;Enums and namedtuples&quot;},{&quot;local&quot;:&quot;transformers.add_start_docstrings&quot;,&quot;title&quot;:&quot;Special Decorators&quot;},{&quot;local&quot;:&quot;transformers.utils.cached_property&quot;,&quot;title&quot;:&quot;Special Properties&quot;},{&quot;local&quot;:&quot;transformers._LazyModule&quot;,&quot;title&quot;:&quot;Other Utilities&quot;}],&quot;title&quot;:&quot;General Utilities&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/internal/file_utils.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Docstring-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <h1 class="relative group"><a id="general-utilities" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#general-utilities"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>General Utilities </span></h1> <p>This page lists all of Transformers general utility functions that are found in the file <code>utils.py</code>.</p> <p>Most of those are only useful if you are studying the general code in the library.</p> <h2 class="relative group"><a id="transformers.utils.ExplicitEnum" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.utils.ExplicitEnum"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Enums and namedtuples </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.ExplicitEnum"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.utils.</span><span class="font-semibold">ExplicitEnum</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.utils.ExplicitEnum" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.ExplicitEnum"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/generic.py#L244" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">value<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">names<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">module<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">qualname<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">type<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start<span class="opacity-60"> = 1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Enum with more explicit error message for missing values.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.PaddingStrategy"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.utils.</span><span class="font-semibold">PaddingStrategy</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.utils.PaddingStrategy" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.PaddingStrategy"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/generic.py#L256" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">value<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">names<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">module<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">qualname<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">type<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start<span class="opacity-60"> = 1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Possible values for the <code>padding</code> argument in <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizerBase.<strong>call</strong>()</a>. Useful for tab-completion in an IDE.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TensorType"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TensorType</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TensorType" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TensorType"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/generic.py#L267" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">value<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">names<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">module<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">qualname<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">type<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start<span class="opacity-60"> = 1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Possible values for the <code>return_tensors</code> argument in <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizerBase.<strong>call</strong>()</a>. Useful for tab-completion in an IDE.</p></div> <h2 class="relative group"><a id="transformers.add_start_docstrings" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.add_start_docstrings"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Special Decorators </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.add_start_docstrings"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.add_start_docstrings</span></h4><!-- HTML_TAG_END --> <a id="transformers.add_start_docstrings" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.add_start_docstrings"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/doc.py#L23" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*docstr<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.add_start_docstrings_to_model_forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.utils.add_start_docstrings_to_model_forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.add_start_docstrings_to_model_forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.add_start_docstrings_to_model_forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/doc.py#L31" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*docstr<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.add_end_docstrings"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.add_end_docstrings</span></h4><!-- HTML_TAG_END --> <a id="transformers.add_end_docstrings" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.add_end_docstrings"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/doc.py#L53" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*docstr<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.add_code_sample_docstrings"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.utils.add_code_sample_docstrings</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.add_code_sample_docstrings" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.add_code_sample_docstrings"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/doc.py#L1051" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*docstr<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">processor_class<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">checkpoint<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_type<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config_class<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask<span class="opacity-60"> = &#39;[MASK]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">qa_target_start_index<span class="opacity-60"> = 14</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">qa_target_end_index<span class="opacity-60"> = 15</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model_cls<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">modality<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">expected_output<span class="opacity-60"> = &#39;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">expected_loss<span class="opacity-60"> = &#39;&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.replace_return_docstrings"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.utils.replace_return_docstrings</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.replace_return_docstrings" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.replace_return_docstrings"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/doc.py#L1130" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_type<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config_class<span class="opacity-60"> = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div> <h2 class="relative group"><a id="transformers.utils.cached_property" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.utils.cached_property"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Special Properties </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.cached_property"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.utils.</span><span class="font-semibold">cached_property</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.utils.cached_property" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.cached_property"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/generic.py#L32" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fget<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fset<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fdel<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">doc<span class="opacity-60"> = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Descriptor that mimics @property but caches output in member variable.</p> <p>From tensorflow_datasets</p> <p>Built-in in functools from Python 3.8.</p></div> <h2 class="relative group"><a id="transformers._LazyModule" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers._LazyModule"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Other Utilities </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers._LazyModule"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">_LazyModule</span></span></h3><!-- HTML_TAG_END --> <a id="transformers._LazyModule" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers._LazyModule"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/import_utils.py#L1014" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">name<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">module_file<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">import_structure<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">module_spec<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">extra_objects<span class="opacity-60"> = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Module class that surfaces all objects but only performs associated imports when the objects are requested.</p></div> <script type="module" data-hydrate="18yi047"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="18yi047"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/internal/file_utils.mdx-hf-doc-builder.js") ], params: {} } }); </script>
72
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/internal/pipelines_utils.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;utilities-for-pipelines&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.pipelines.ArgumentHandler&quot;,&quot;title&quot;:&quot;Argument handling&quot;},{&quot;local&quot;:&quot;transformers.PipelineDataFormat&quot;,&quot;title&quot;:&quot;Data format&quot;},{&quot;local&quot;:&quot;transformers.pipelines.PipelineException&quot;,&quot;title&quot;:&quot;Utilities&quot;}],&quot;title&quot;:&quot;Utilities for pipelines&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/internal/pipelines_utils.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Docstring-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <h1 class="relative group"><a id="utilities-for-pipelines" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#utilities-for-pipelines"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Utilities for pipelines </span></h1> <p>This page lists all the utility functions the library provides for pipelines.</p> <p>Most of those are only useful if you are studying the code of the models in the library.</p> <h2 class="relative group"><a id="transformers.pipelines.ArgumentHandler" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipelines.ArgumentHandler"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Argument handling </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.pipelines.ArgumentHandler"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.pipelines.</span><span class="font-semibold">ArgumentHandler</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.pipelines.ArgumentHandler" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.pipelines.ArgumentHandler"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L406" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Base interface for handling arguments for each <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.Pipeline">Pipeline</a>.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.pipelines.ZeroShotClassificationArgumentHandler"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.pipelines.</span><span class="font-semibold">ZeroShotClassificationArgumentHandler</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.pipelines.ZeroShotClassificationArgumentHandler" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.pipelines.ZeroShotClassificationArgumentHandler"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/zero_shot_classification.py#L13" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Handles arguments for zero-shot for text classification by turning each possible label into an NLI premise/hypothesis pair.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.pipelines.QuestionAnsweringArgumentHandler"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.pipelines.</span><span class="font-semibold">QuestionAnsweringArgumentHandler</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.pipelines.QuestionAnsweringArgumentHandler" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.pipelines.QuestionAnsweringArgumentHandler"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/question_answering.py#L149" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>QuestionAnsweringPipeline requires the user to provide multiple arguments (i.e. question &amp; context) to be mapped to internal <code>SquadExample</code>.</p> <p>QuestionAnsweringArgumentHandler manages all the possible to create a <code>SquadExample</code> from the command-line supplied arguments.</p></div> <h2 class="relative group"><a id="transformers.PipelineDataFormat" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipelineDataFormat"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Data format </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PipelineDataFormat"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PipelineDataFormat</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PipelineDataFormat" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PipelineDataFormat"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L416" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_path<span class="opacity-60">: typing.Optional[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_path<span class="opacity-60">: typing.Optional[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">column<span class="opacity-60">: typing.Optional[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">overwrite<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PipelineDataFormat.output_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipelineDataFormat.output_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to save the outgoing data.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PipelineDataFormat.input_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipelineDataFormat.input_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to look for the input data.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PipelineDataFormat.column" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipelineDataFormat.column"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>column</strong> (<code>str</code>, <em>optional</em>) &#x2014; The column to read.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PipelineDataFormat.overwrite" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipelineDataFormat.overwrite"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>overwrite</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to overwrite the <code>output_path</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for all the pipeline supported data format both for reading and writing. Supported data formats currently includes:</p> <ul><li>JSON</li> <li>CSV</li> <li>stdin/stdout (pipe)</li></ul> <p><code>PipelineDataFormat</code> also includes some utilities to work with multi-columns like mapping from datasets columns to pipelines keyword arguments through the <code>dataset_kwarg_1=dataset_column_1</code> format.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PipelineDataFormat.from_str"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_str</span></h4><!-- HTML_TAG_END --> <a id="transformers.PipelineDataFormat.from_str" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PipelineDataFormat.from_str"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L493" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">format<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_path<span class="opacity-60">: typing.Optional[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_path<span class="opacity-60">: typing.Optional[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">column<span class="opacity-60">: typing.Optional[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">overwrite<span class="opacity-60"> = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.PipelineDataFormat" >PipelineDataFormat</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PipelineDataFormat.from_str.output_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipelineDataFormat.from_str.output_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to save the outgoing data.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PipelineDataFormat.from_str.input_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipelineDataFormat.from_str.input_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to look for the input data.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PipelineDataFormat.from_str.column" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipelineDataFormat.from_str.column"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>column</strong> (<code>str</code>, <em>optional</em>) &#x2014; The column to read.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PipelineDataFormat.from_str.overwrite" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipelineDataFormat.from_str.overwrite"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>overwrite</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to overwrite the <code>output_path</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PipelineDataFormat.from_str.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.PipelineDataFormat" >PipelineDataFormat</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The proper data format.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Creates an instance of the right subclass of <a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.PipelineDataFormat">PipelineDataFormat</a> depending on <code>format</code>.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PipelineDataFormat.save"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save</span></h4><!-- HTML_TAG_END --> <a id="transformers.PipelineDataFormat.save" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PipelineDataFormat.save"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L465" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data<span class="opacity-60">: typing.Union[dict, typing.List[dict]]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PipelineDataFormat.save.data" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipelineDataFormat.save.data"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>data</strong> (<code>dict</code> or list of <code>dict</code>) &#x2014; The data to store.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Save the provided data object with the representation for the current <a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.PipelineDataFormat">PipelineDataFormat</a>.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PipelineDataFormat.save_binary"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_binary</span></h4><!-- HTML_TAG_END --> <a id="transformers.PipelineDataFormat.save_binary" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PipelineDataFormat.save_binary"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L475" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data<span class="opacity-60">: typing.Union[dict, typing.List[dict]]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>str</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PipelineDataFormat.save_binary.data" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipelineDataFormat.save_binary.data"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>data</strong> (<code>dict</code> or list of <code>dict</code>) &#x2014; The data to store.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PipelineDataFormat.save_binary.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>str</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Path where the data has been saved.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Save the provided data object as a pickle-formatted binary data on the disk.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.CsvPipelineDataFormat"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">CsvPipelineDataFormat</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.CsvPipelineDataFormat" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.CsvPipelineDataFormat"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L529" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_path<span class="opacity-60">: typing.Optional[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_path<span class="opacity-60">: typing.Optional[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">column<span class="opacity-60">: typing.Optional[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">overwrite<span class="opacity-60"> = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.CsvPipelineDataFormat.output_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.CsvPipelineDataFormat.output_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to save the outgoing data.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.CsvPipelineDataFormat.input_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.CsvPipelineDataFormat.input_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to look for the input data.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.CsvPipelineDataFormat.column" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.CsvPipelineDataFormat.column"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>column</strong> (<code>str</code>, <em>optional</em>) &#x2014; The column to read.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.CsvPipelineDataFormat.overwrite" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.CsvPipelineDataFormat.overwrite"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>overwrite</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to overwrite the <code>output_path</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Support for pipelines using CSV data format.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.CsvPipelineDataFormat.save"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save</span></h4><!-- HTML_TAG_END --> <a id="transformers.CsvPipelineDataFormat.save" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.CsvPipelineDataFormat.save"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L559" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data<span class="opacity-60">: typing.List[dict]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.CsvPipelineDataFormat.save.data" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.CsvPipelineDataFormat.save.data"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>data</strong> (<code>List[dict]</code>) &#x2014; The data to store.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Save the provided data object with the representation for the current <a href="/docs/transformers/pr_19429/en/internal/pipelines_utils#transformers.PipelineDataFormat">PipelineDataFormat</a>.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.JsonPipelineDataFormat"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">JsonPipelineDataFormat</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.JsonPipelineDataFormat" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.JsonPipelineDataFormat"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L573" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_path<span class="opacity-60">: typing.Optional[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_path<span class="opacity-60">: typing.Optional[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">column<span class="opacity-60">: typing.Optional[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">overwrite<span class="opacity-60"> = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.JsonPipelineDataFormat.output_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.JsonPipelineDataFormat.output_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to save the outgoing data.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.JsonPipelineDataFormat.input_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.JsonPipelineDataFormat.input_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to look for the input data.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.JsonPipelineDataFormat.column" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.JsonPipelineDataFormat.column"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>column</strong> (<code>str</code>, <em>optional</em>) &#x2014; The column to read.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.JsonPipelineDataFormat.overwrite" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.JsonPipelineDataFormat.overwrite"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>overwrite</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to overwrite the <code>output_path</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Support for pipelines using JSON file format.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.JsonPipelineDataFormat.save"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save</span></h4><!-- HTML_TAG_END --> <a id="transformers.JsonPipelineDataFormat.save" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.JsonPipelineDataFormat.save"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L604" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data<span class="opacity-60">: dict</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.JsonPipelineDataFormat.save.data" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.JsonPipelineDataFormat.save.data"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>data</strong> (<code>dict</code>) &#x2014; The data to store.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Save the provided data object in a json file.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PipedPipelineDataFormat"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PipedPipelineDataFormat</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PipedPipelineDataFormat" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PipedPipelineDataFormat"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L615" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_path<span class="opacity-60">: typing.Optional[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_path<span class="opacity-60">: typing.Optional[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">column<span class="opacity-60">: typing.Optional[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">overwrite<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PipedPipelineDataFormat.output_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipedPipelineDataFormat.output_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to save the outgoing data.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PipedPipelineDataFormat.input_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipedPipelineDataFormat.input_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to look for the input data.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PipedPipelineDataFormat.column" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipedPipelineDataFormat.column"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>column</strong> (<code>str</code>, <em>optional</em>) &#x2014; The column to read.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PipedPipelineDataFormat.overwrite" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipedPipelineDataFormat.overwrite"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>overwrite</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to overwrite the <code>output_path</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Read data from piped input to the python process. For multi columns data, columns should separated by </p> <p>If columns are provided, then the output will be a dictionary with {column_x: value_x}</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PipedPipelineDataFormat.save"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save</span></h4><!-- HTML_TAG_END --> <a id="transformers.PipedPipelineDataFormat.save" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PipedPipelineDataFormat.save"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L644" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data<span class="opacity-60">: dict</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PipedPipelineDataFormat.save.data" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipedPipelineDataFormat.save.data"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>data</strong> (<code>dict</code>) &#x2014; The data to store.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Print the data.</p></div></div> <h2 class="relative group"><a id="transformers.pipelines.PipelineException" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipelines.PipelineException"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Utilities </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.pipelines.PipelineException"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.pipelines.</span><span class="font-semibold">PipelineException</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.pipelines.PipelineException" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.pipelines.PipelineException"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/pipelines/base.py#L389" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">task<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">reason<span class="opacity-60">: str</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.pipelines.PipelineException.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipelines.PipelineException.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>) &#x2014; The task of the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.pipelines.PipelineException.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipelines.PipelineException.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<code>str</code>) &#x2014; The model used by the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.pipelines.PipelineException.reason" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipelines.PipelineException.reason"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>reason</strong> (<code>str</code>) &#x2014; The error message to display.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Raised by a <a href="/docs/transformers/pr_19429/en/main_classes/pipelines#transformers.Pipeline">Pipeline</a> when handling <strong>call</strong>.</p></div> <script type="module" data-hydrate="tseymv"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="tseymv"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/internal/pipelines_utils.mdx-hf-doc-builder.js") ], params: {} } }); </script>
73
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/internal/generation_utils.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;utilities-for-generation&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;generate-outputs&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.generation_utils.GreedySearchDecoderOnlyOutput&quot;,&quot;title&quot;:&quot;GreedySearchOutput&quot;},{&quot;local&quot;:&quot;transformers.generation_utils.SampleDecoderOnlyOutput&quot;,&quot;title&quot;:&quot;SampleOutput&quot;},{&quot;local&quot;:&quot;transformers.generation_utils.BeamSearchDecoderOnlyOutput&quot;,&quot;title&quot;:&quot;BeamSearchOutput&quot;},{&quot;local&quot;:&quot;transformers.generation_utils.BeamSampleDecoderOnlyOutput&quot;,&quot;title&quot;:&quot;BeamSampleOutput&quot;}],&quot;title&quot;:&quot;Generate Outputs&quot;},{&quot;local&quot;:&quot;transformers.LogitsProcessor&quot;,&quot;title&quot;:&quot;LogitsProcessor&quot;},{&quot;local&quot;:&quot;transformers.StoppingCriteria&quot;,&quot;title&quot;:&quot;StoppingCriteria&quot;},{&quot;local&quot;:&quot;transformers.Constraint&quot;,&quot;title&quot;:&quot;Constraints&quot;},{&quot;local&quot;:&quot;transformers.BeamScorer&quot;,&quot;title&quot;:&quot;BeamSearch&quot;},{&quot;local&quot;:&quot;transformers.top_k_top_p_filtering&quot;,&quot;title&quot;:&quot;Utilities&quot;}],&quot;title&quot;:&quot;Utilities for Generation&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/internal/generation_utils.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Docstring-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/ExampleCodeBlock-hf-doc-builder.js"> <h1 class="relative group"><a id="utilities-for-generation" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#utilities-for-generation"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Utilities for Generation </span></h1> <p>This page lists all the utility functions used by <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate">generate()</a>, <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.greedy_search">greedy_search()</a>, <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.sample">sample()</a>, <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_search">beam_search()</a>, <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_sample">beam_sample()</a>, <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.group_beam_search">group_beam_search()</a>, and <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.constrained_beam_search">constrained_beam_search()</a>.</p> <p>Most of those are only useful if you are studying the code of the generate methods in the library.</p> <h2 class="relative group"><a id="generate-outputs" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#generate-outputs"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Generate Outputs </span></h2> <p>The output of <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate">generate()</a> is an instance of a subclass of <a href="/docs/transformers/pr_19429/en/main_classes/output#transformers.utils.ModelOutput">ModelOutput</a>. This output is a data structure containing all the information returned by <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate">generate()</a>, but that can also be used as tuple or dictionary.</p> <p>Here’s an example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, GPT2LMHeadModel tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) model = GPT2LMHeadModel.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute and &quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) generation_output = model.generate(**inputs, return_dict_in_generate=<span class="hljs-literal">True</span>, output_scores=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <p>The <code>generation_output</code> object is a <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.generation_utils.GreedySearchDecoderOnlyOutput">GreedySearchDecoderOnlyOutput</a>, as we can see in the documentation of that class below, it means it has the following attributes:</p> <ul><li><code>sequences</code>: the generated sequences of tokens</li> <li><code>scores</code> (optional): the prediction scores of the language modelling head, for each generation step</li> <li><code>hidden_states</code> (optional): the hidden states of the model, for each generation step</li> <li><code>attentions</code> (optional): the attention weights of the model, for each generation step</li></ul> <p>Here we have the <code>scores</code> since we passed along <code>output_scores=True</code>, but we don’t have <code>hidden_states</code> and <code>attentions</code> because we didn’t pass <code>output_hidden_states=True</code> or <code>output_attentions=True</code>.</p> <p>You can access each attribute as you would usually do, and if that attribute has not been returned by the model, you will get <code>None</code>. Here for instance <code>generation_output.scores</code> are all the generated prediction scores of the language modeling head, and <code>generation_output.attentions</code> is <code>None</code>.</p> <p>When using our <code>generation_output</code> object as a tuple, it only keeps the attributes that don’t have <code>None</code> values. Here, for instance, it has two elements, <code>loss</code> then <code>logits</code>, so</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->generation_output[:<span class="hljs-number">2</span>]<!-- HTML_TAG_END --></pre></div> <p>will return the tuple <code>(generation_output.sequences, generation_output.scores)</code> for instance.</p> <p>When using our <code>generation_output</code> object as a dictionary, it only keeps the attributes that don’t have <code>None</code> values. Here, for instance, it has two keys that are <code>sequences</code> and <code>scores</code>.</p> <p>We document here all output types.</p> <h3 class="relative group"><a id="transformers.generation_utils.GreedySearchDecoderOnlyOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GreedySearchDecoderOnlyOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>GreedySearchOutput </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.GreedySearchDecoderOnlyOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.generation_utils.</span><span class="font-semibold">GreedySearchDecoderOnlyOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.generation_utils.GreedySearchDecoderOnlyOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.GreedySearchDecoderOnlyOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L72" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences<span class="opacity-60">: LongTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GreedySearchDecoderOnlyOutput.sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GreedySearchDecoderOnlyOutput.sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GreedySearchDecoderOnlyOutput.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GreedySearchDecoderOnlyOutput.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. Tuple of <code>torch.FloatTensor</code> with up to <code>max_new_tokens</code> elements (one element for each generated token), with each tensor of shape <code>(batch_size, config.vocab_size)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GreedySearchDecoderOnlyOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GreedySearchDecoderOnlyOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, num_heads, generated_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GreedySearchDecoderOnlyOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GreedySearchDecoderOnlyOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, generated_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of decoder-only generation models using greedy search.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.GreedySearchEncoderDecoderOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.generation_utils.</span><span class="font-semibold">GreedySearchEncoderDecoderOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.generation_utils.GreedySearchEncoderDecoderOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.GreedySearchEncoderDecoderOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L100" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences<span class="opacity-60">: LongTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GreedySearchEncoderDecoderOutput.sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GreedySearchEncoderDecoderOutput.sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GreedySearchEncoderDecoderOutput.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GreedySearchEncoderDecoderOutput.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. Tuple of <code>torch.FloatTensor</code> with up to <code>max_new_tokens</code> elements (one element for each generated token), with each tensor of shape <code>(batch_size, config.vocab_size)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GreedySearchEncoderDecoderOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GreedySearchEncoderDecoderOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer of the decoder) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GreedySearchEncoderDecoderOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GreedySearchEncoderDecoderOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GreedySearchEncoderDecoderOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GreedySearchEncoderDecoderOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, num_heads, generated_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GreedySearchEncoderDecoderOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GreedySearchEncoderDecoderOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, num_heads, generated_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GreedySearchEncoderDecoderOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GreedySearchEncoderDecoderOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, generated_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of encoder-decoder generation models using greedy search. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_flax_utils.FlaxGreedySearchOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.generation_flax_utils.</span><span class="font-semibold">FlaxGreedySearchOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.generation_flax_utils.FlaxGreedySearchOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_flax_utils.FlaxGreedySearchOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_utils.py#L51" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences<span class="opacity-60">: ndarray = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxGreedySearchOutput.sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGreedySearchOutput.sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, max_length)</code>) &#x2014; The generated sequences.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Flax Base class for outputs of decoder-only generation models using greedy search.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_flax_utils.FlaxGreedySearchOutput.replace"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="transformers.generation_flax_utils.FlaxGreedySearchOutput.replace" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_flax_utils.FlaxGreedySearchOutput.replace"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h3 class="relative group"><a id="transformers.generation_utils.SampleDecoderOnlyOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.SampleDecoderOnlyOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>SampleOutput </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.SampleDecoderOnlyOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.generation_utils.</span><span class="font-semibold">SampleDecoderOnlyOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.generation_utils.SampleDecoderOnlyOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.SampleDecoderOnlyOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L142" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences<span class="opacity-60">: LongTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.SampleDecoderOnlyOutput.sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.SampleDecoderOnlyOutput.sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size*num_return_sequences, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.SampleDecoderOnlyOutput.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.SampleDecoderOnlyOutput.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. Tuple of <code>torch.FloatTensor</code> with up to <code>max_new_tokens</code> elements (one element for each generated token), with each tensor of shape <code>(batch_size*num_return_sequences, config.vocab_size)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.SampleDecoderOnlyOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.SampleDecoderOnlyOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(num_return_sequences*batch_size, num_heads, generated_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.SampleDecoderOnlyOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.SampleDecoderOnlyOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(num_return_sequences*batch_size, generated_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of decoder-only generation models using sampling.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.SampleEncoderDecoderOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.generation_utils.</span><span class="font-semibold">SampleEncoderDecoderOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.generation_utils.SampleEncoderDecoderOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.SampleEncoderDecoderOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L171" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences<span class="opacity-60">: LongTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.SampleEncoderDecoderOutput.sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.SampleEncoderDecoderOutput.sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size*num_return_sequences, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.SampleEncoderDecoderOutput.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.SampleEncoderDecoderOutput.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. Tuple of <code>torch.FloatTensor</code> with up to <code>max_new_tokens</code> elements (one element for each generated token), with each tensor of shape <code>(batch_size*num_return_sequences, config.vocab_size)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.SampleEncoderDecoderOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.SampleEncoderDecoderOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer of the decoder) of shape <code>(batch_size*num_return_sequences, num_heads, sequence_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.SampleEncoderDecoderOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.SampleEncoderDecoderOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size*num_return_sequences, sequence_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.SampleEncoderDecoderOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.SampleEncoderDecoderOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_return_sequences, num_heads, generated_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.SampleEncoderDecoderOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.SampleEncoderDecoderOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, num_heads, generated_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.SampleEncoderDecoderOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.SampleEncoderDecoderOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_return_sequences, generated_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of encoder-decoder generation models using sampling. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_flax_utils.FlaxSampleOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.generation_flax_utils.</span><span class="font-semibold">FlaxSampleOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.generation_flax_utils.FlaxSampleOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_flax_utils.FlaxSampleOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_utils.py#L65" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences<span class="opacity-60">: ndarray = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxSampleOutput.sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxSampleOutput.sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, max_length)</code>) &#x2014; The generated sequences.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Flax Base class for outputs of decoder-only generation models using sampling.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_flax_utils.FlaxSampleOutput.replace"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="transformers.generation_flax_utils.FlaxSampleOutput.replace" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_flax_utils.FlaxSampleOutput.replace"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/flax/struct.py#L108" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h3 class="relative group"><a id="transformers.generation_utils.BeamSearchDecoderOnlyOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchDecoderOnlyOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BeamSearchOutput </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.BeamSearchDecoderOnlyOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.generation_utils.</span><span class="font-semibold">BeamSearchDecoderOnlyOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.generation_utils.BeamSearchDecoderOnlyOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.BeamSearchDecoderOnlyOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L214" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences<span class="opacity-60">: LongTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences_scores<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">beam_indices<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchDecoderOnlyOutput.sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchDecoderOnlyOutput.sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size*num_return_sequences, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchDecoderOnlyOutput.sequences_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchDecoderOnlyOutput.sequences_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size*num_return_sequences)</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Final beam scores of the generated <code>sequences</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchDecoderOnlyOutput.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchDecoderOnlyOutput.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam. Tuple of <code>torch.FloatTensor</code> with up to <code>max_new_tokens</code> elements (one element for each generated token), with each tensor of shape <code>(batch_size*num_beams*num_return_sequences, config.vocab_size)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchDecoderOnlyOutput.beam_indices" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchDecoderOnlyOutput.beam_indices"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>beam_indices</strong> (<code>tuple(tuple(torch.LongTensor))</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam indices of generated token id at each generation step. <code>torch.LongTensor</code> of shape <code>(batch_size*num_return_sequences, input_ids.shape[-1])</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchDecoderOnlyOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchDecoderOnlyOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams, num_heads, generated_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchDecoderOnlyOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchDecoderOnlyOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of decoder-only generation models using beam search.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.BeamSearchEncoderDecoderOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.generation_utils.</span><span class="font-semibold">BeamSearchEncoderDecoderOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.generation_utils.BeamSearchEncoderDecoderOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.BeamSearchEncoderDecoderOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L249" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences<span class="opacity-60">: LongTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences_scores<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">beam_indices<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchEncoderDecoderOutput.sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchEncoderDecoderOutput.sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size*num_return_sequences, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchEncoderDecoderOutput.sequences_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchEncoderDecoderOutput.sequences_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size*num_return_sequences)</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Final beam scores of the generated <code>sequences</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchEncoderDecoderOutput.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchEncoderDecoderOutput.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam. Tuple of <code>torch.FloatTensor</code> with up to <code>max_new_tokens</code> elements (one element for each generated token), with each tensor of shape <code>(batch_size*num_beams, config.vocab_size)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchEncoderDecoderOutput.beam_indices" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchEncoderDecoderOutput.beam_indices"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>beam_indices</strong> (<code>tuple(tuple(torch.LongTensor))</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam indices of generated token id at each generation step. <code>torch.LongTensor</code> of shape <code>(batch_size*num_return_sequences, max_length-1)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchEncoderDecoderOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchEncoderDecoderOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014;<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchEncoderDecoderOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchEncoderDecoderOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer of the decoder) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchEncoderDecoderOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchEncoderDecoderOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size*num_beams*num_return_sequences, sequence_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchEncoderDecoderOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchEncoderDecoderOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams*num_return_sequences, num_heads, generated_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchEncoderDecoderOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchEncoderDecoderOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, num_heads, generated_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchEncoderDecoderOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchEncoderDecoderOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of encoder-decoder generation models using beam search. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)</p></div> <h3 class="relative group"><a id="transformers.generation_utils.BeamSampleDecoderOnlyOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleDecoderOnlyOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BeamSampleOutput </span></h3> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.BeamSampleDecoderOnlyOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.generation_utils.</span><span class="font-semibold">BeamSampleDecoderOnlyOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.generation_utils.BeamSampleDecoderOnlyOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.BeamSampleDecoderOnlyOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L300" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences<span class="opacity-60">: LongTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences_scores<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">beam_indices<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSampleDecoderOnlyOutput.sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleDecoderOnlyOutput.sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size*num_return_sequences, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSampleDecoderOnlyOutput.sequences_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleDecoderOnlyOutput.sequences_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_return_sequence)</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Final beam scores of the generated <code>sequences</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSampleDecoderOnlyOutput.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleDecoderOnlyOutput.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam. Tuple of <code>torch.FloatTensor</code> with up to <code>max_new_tokens</code> elements (one element for each generated token), with each tensor of shape <code>(batch_size*num_beams*num_return_sequences, config.vocab_size)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSampleDecoderOnlyOutput.beam_indices" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleDecoderOnlyOutput.beam_indices"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>beam_indices</strong> (<code>tuple(tuple(torch.LongTensor))</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam indices of generated token id at each generation step. <code>torch.LongTensor</code> of shape <code>(batch_size*num_return_sequences, input_ids.shape[-1])</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSampleDecoderOnlyOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleDecoderOnlyOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams, num_heads, generated_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSampleDecoderOnlyOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleDecoderOnlyOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams, generated_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of decoder-only generation models using beam sample.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.BeamSampleEncoderDecoderOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.generation_utils.</span><span class="font-semibold">BeamSampleEncoderDecoderOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.generation_utils.BeamSampleEncoderDecoderOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.BeamSampleEncoderDecoderOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L335" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences<span class="opacity-60">: LongTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences_scores<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">beam_indices<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSampleEncoderDecoderOutput.sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleEncoderDecoderOutput.sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size*num_beams, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSampleEncoderDecoderOutput.sequences_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleEncoderDecoderOutput.sequences_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_return_sequence)</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Final beam scores of the generated <code>sequences</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSampleEncoderDecoderOutput.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleEncoderDecoderOutput.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam. Tuple of <code>torch.FloatTensor</code> with up to <code>max_new_tokens</code> elements (one element for each generated token), with each tensor of shape <code>(batch_size*num_beams, config.vocab_size)</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSampleEncoderDecoderOutput.beam_indices" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleEncoderDecoderOutput.beam_indices"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>beam_indices</strong> (<code>torch.LongTensor</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam indices of generated token id at each generation step. <code>torch.LongTensor</code> of shape <code>(batch_size*num_return_sequences, max_length-1)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSampleEncoderDecoderOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleEncoderDecoderOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer of the decoder) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSampleEncoderDecoderOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleEncoderDecoderOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size*num_beams, sequence_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSampleEncoderDecoderOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleEncoderDecoderOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams, num_heads, generated_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSampleEncoderDecoderOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleEncoderDecoderOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, num_heads, generated_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSampleEncoderDecoderOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleEncoderDecoderOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams, generated_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of encoder-decoder generation models using beam sampling. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)</p></div> <h2 class="relative group"><a id="transformers.LogitsProcessor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LogitsProcessor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>LogitsProcessor </span></h2> <p>A <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> can be used to modify the prediction scores of a language model head for generation.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">LogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.LogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L51" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Abstract base class for all logit processors that can be applied during generation.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.LogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L54" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.LogitsProcessor.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LogitsProcessor.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.LogitsProcessor.__call__.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LogitsProcessor.__call__.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs &#x2014; Additional logits processor specific kwargs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.LogitsProcessor.__call__.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The processed prediction scores.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Torch method for processing logits.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LogitsProcessorList"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">LogitsProcessorList</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.LogitsProcessorList" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LogitsProcessorList"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L73" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">iterable<span class="opacity-60"> = ()</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>This class can be used to create a list of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> or <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsWarper">LogitsWarper</a> to subsequently process a <code>scores</code> input tensor. This class inherits from list and adds a specific <em><strong>call</strong></em> method to apply each <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> or <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsWarper">LogitsWarper</a> to the inputs.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LogitsProcessorList.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.LogitsProcessorList.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LogitsProcessorList.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L80" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.LogitsProcessorList.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LogitsProcessorList.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.LogitsProcessorList.__call__.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LogitsProcessorList.__call__.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs &#x2014; Additional logits processor specific kwargs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.LogitsProcessorList.__call__.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The processed prediction scores.</p> <!-- HTML_TAG_END --></p> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LogitsWarper"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">LogitsWarper</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.LogitsWarper" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LogitsWarper"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L62" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Abstract base class for all logit warpers that can be applied during generation with multinomial sampling.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LogitsWarper.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.LogitsWarper.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LogitsWarper.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L65" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.LogitsWarper.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LogitsWarper.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.LogitsWarper.__call__.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LogitsWarper.__call__.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs &#x2014; Additional logits processor specific kwargs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.LogitsWarper.__call__.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The processed prediction scores.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Torch method for warping logits.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MinLengthLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">MinLengthLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.MinLengthLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MinLengthLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L96" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_length<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.MinLengthLogitsProcessor.min_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MinLengthLogitsProcessor.min_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_length</strong> (<code>int</code>) &#x2014; The minimum length below which the score of <code>eos_token_id</code> is set to <code>-float(&quot;Inf&quot;)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.MinLengthLogitsProcessor.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MinLengthLogitsProcessor.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> enforcing a min-length by setting EOS probability to 0.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MinLengthLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.MinLengthLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MinLengthLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L117" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TemperatureLogitsWarper"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TemperatureLogitsWarper</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TemperatureLogitsWarper" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TemperatureLogitsWarper"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L124" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">temperature<span class="opacity-60">: float</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TemperatureLogitsWarper.temperature" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TemperatureLogitsWarper.temperature"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>temperature</strong> (<code>float</code>) &#x2014; The value used to module the logits distribution.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsWarper">LogitsWarper</a> for temperature (exponential scaling output probability distribution).</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TemperatureLogitsWarper.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TemperatureLogitsWarper.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TemperatureLogitsWarper.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L139" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: Tensor</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RepetitionPenaltyLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RepetitionPenaltyLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RepetitionPenaltyLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RepetitionPenaltyLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L144" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">penalty<span class="opacity-60">: float</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.RepetitionPenaltyLogitsProcessor.repetition_penalty" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RepetitionPenaltyLogitsProcessor.repetition_penalty"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repetition_penalty</strong> (<code>float</code>) &#x2014; The parameter for repetition penalty. 1.0 means no penalty. See <a href="https://arxiv.org/pdf/1909.05858.pdf" rel="nofollow">this paper</a> for more details.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> enforcing an exponential penalty on repeated sequences.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RepetitionPenaltyLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.RepetitionPenaltyLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RepetitionPenaltyLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L160" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TopPLogitsWarper"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TopPLogitsWarper</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TopPLogitsWarper" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TopPLogitsWarper"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L170" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_p<span class="opacity-60">: float</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filter_value<span class="opacity-60">: float = -inf</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_tokens_to_keep<span class="opacity-60">: int = 1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TopPLogitsWarper.top_p" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TopPLogitsWarper.top_p"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_p</strong> (<code>float</code>) &#x2014; If set to &lt; 1, only the smallest set of most probable tokens with probabilities that add up to <code>top_p</code> or higher are kept for generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TopPLogitsWarper.filter_value" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TopPLogitsWarper.filter_value"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>filter_value</strong> (<code>float</code>, <em>optional</em>, defaults to <code>-float(&quot;Inf&quot;)</code>) &#x2014; All filtered values will be set to this float value.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TopPLogitsWarper.min_tokens_to_keep" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TopPLogitsWarper.min_tokens_to_keep"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimum number of tokens that cannot be filtered.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsWarper">LogitsWarper</a> that performs top-p, i.e. restricting to top tokens summing to prob_cut_off &lt;= prob_cut_off.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TopPLogitsWarper.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TopPLogitsWarper.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TopPLogitsWarper.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L193" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TopKLogitsWarper"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TopKLogitsWarper</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TopKLogitsWarper" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TopKLogitsWarper"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L209" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_k<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filter_value<span class="opacity-60">: float = -inf</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_tokens_to_keep<span class="opacity-60">: int = 1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TopKLogitsWarper.top_k" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TopKLogitsWarper.top_k"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_k</strong> (<code>int</code>) &#x2014; The number of highest probability vocabulary tokens to keep for top-k-filtering.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TopKLogitsWarper.filter_value" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TopKLogitsWarper.filter_value"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>filter_value</strong> (<code>float</code>, <em>optional</em>, defaults to <code>-float(&quot;Inf&quot;)</code>) &#x2014; All filtered values will be set to this float value.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TopKLogitsWarper.min_tokens_to_keep" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TopKLogitsWarper.min_tokens_to_keep"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimum number of tokens that cannot be filtered.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsWarper">LogitsWarper</a> that performs top-k, i.e. restricting to the k highest probability elements.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TopKLogitsWarper.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TopKLogitsWarper.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TopKLogitsWarper.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L230" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TypicalLogitsWarper"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TypicalLogitsWarper</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TypicalLogitsWarper" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TypicalLogitsWarper"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L238" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mass<span class="opacity-60">: float = 0.9</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filter_value<span class="opacity-60">: float = -inf</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_tokens_to_keep<span class="opacity-60">: int = 1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TypicalLogitsWarper.mass" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TypicalLogitsWarper.mass"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mass</strong> (<code>float</code>) &#x2014; Value of typical_p between 0 and 1 inclusive, defaults to 0.9.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TypicalLogitsWarper.filter_value" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TypicalLogitsWarper.filter_value"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>filter_value</strong> (<code>float</code>, <em>optional</em>, defaults to <code>-float(&quot;Inf&quot;)</code>) &#x2014; All filtered values will be set to this float value.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TypicalLogitsWarper.min_tokens_to_keep" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TypicalLogitsWarper.min_tokens_to_keep"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimum number of tokens that cannot be filtered.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsWarper">LogitsWarper</a> that performs typical decoding. See <a href="https://arxiv.org/abs/2202.00666" rel="nofollow">Typical Decoding for Natural Language Generation</a> for more information.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TypicalLogitsWarper.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TypicalLogitsWarper.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TypicalLogitsWarper.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L261" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.NoRepeatNGramLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">NoRepeatNGramLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.NoRepeatNGramLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.NoRepeatNGramLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L322" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ngram_size<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.NoRepeatNGramLogitsProcessor.ngram_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.NoRepeatNGramLogitsProcessor.ngram_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ngram_size</strong> (<code>int</code>) &#x2014; All ngrams of size <code>ngram_size</code> can only occur once.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> that enforces no repetition of n-grams. See <a href="https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345" rel="nofollow">Fairseq</a>.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.NoRepeatNGramLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.NoRepeatNGramLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.NoRepeatNGramLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L337" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.NoBadWordsLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">NoBadWordsLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.NoBadWordsLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.NoBadWordsLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L389" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bad_words_ids<span class="opacity-60">: typing.List[typing.List[int]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.NoBadWordsLogitsProcessor.bad_words_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.NoBadWordsLogitsProcessor.bad_words_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bad_words_ids</strong> (<code>List[List[int]]</code>) &#x2014; List of list of token ids that are not allowed to be generated. In order to get the token ids of the words that should not appear in the generated text, use <code>tokenizer(bad_words, add_prefix_space=True, add_special_tokens=False).input_ids</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.NoBadWordsLogitsProcessor.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.NoBadWordsLogitsProcessor.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> that enforces that specified sequences will never be sampled.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.NoBadWordsLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.NoBadWordsLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.NoBadWordsLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L431" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PrefixConstrainedLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PrefixConstrainedLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PrefixConstrainedLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PrefixConstrainedLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L517" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">prefix_allowed_tokens_fn<span class="opacity-60">: typing.Callable[[int, torch.Tensor], typing.List[int]]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_beams<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> that enforces constrained generation and is useful for prefix-conditioned constrained generation. See <a href="https://arxiv.org/abs/2010.00904" rel="nofollow">Autoregressive Entity Retrieval</a> for more information.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PrefixConstrainedLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.PrefixConstrainedLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PrefixConstrainedLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L534" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.HammingDiversityLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">HammingDiversityLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.HammingDiversityLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.HammingDiversityLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L543" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">diversity_penalty<span class="opacity-60">: float</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_beams<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_beam_groups<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.HammingDiversityLogitsProcessor.diversity_penalty" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.HammingDiversityLogitsProcessor.diversity_penalty"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>diversity_penalty</strong> (<code>float</code>) &#x2014; This value is subtracted from a beam&#x2019;s score if it generates a token same as any beam from other group at a particular time. Note that <code>diversity_penalty</code> is only effective if <code>group beam search</code> is enabled.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.HammingDiversityLogitsProcessor.num_beams" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.HammingDiversityLogitsProcessor.num_beams"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_beams</strong> (<code>int</code>) &#x2014; Number of beams used for group beam search. See <a href="https://arxiv.org/pdf/1610.02424.pdf" rel="nofollow">this paper</a> for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.HammingDiversityLogitsProcessor.num_beam_groups" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.HammingDiversityLogitsProcessor.num_beam_groups"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_beam_groups</strong> (<code>int</code>) &#x2014; Number of groups to divide <code>num_beams</code> into in order to ensure diversity among different groups of beams. See <a href="https://arxiv.org/pdf/1610.02424.pdf" rel="nofollow">this paper</a> for more details.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> that enforces diverse beam search. Note that this logits processor is only effective for <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.group_beam_search">PreTrainedModel.group_beam_search()</a>. See <a href="https://arxiv.org/pdf/1610.02424.pdf" rel="nofollow">Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence Models</a> for more details.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.HammingDiversityLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.HammingDiversityLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.HammingDiversityLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L574" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">current_tokens<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">beam_group_idx<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ForcedBOSTokenLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ForcedBOSTokenLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ForcedBOSTokenLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ForcedBOSTokenLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L603" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token_id<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ForcedBOSTokenLogitsProcessor.bos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ForcedBOSTokenLogitsProcessor.bos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token_id</strong> (<code>int</code>) &#x2014; The id of the token to force as the first generated token.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> that enforces the specified token as the first generated token.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ForcedBOSTokenLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.ForcedBOSTokenLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ForcedBOSTokenLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L615" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ForcedEOSTokenLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ForcedEOSTokenLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ForcedEOSTokenLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ForcedEOSTokenLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L624" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ForcedEOSTokenLogitsProcessor.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ForcedEOSTokenLogitsProcessor.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>) &#x2014; The maximum length of the sequence to be generated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ForcedEOSTokenLogitsProcessor.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ForcedEOSTokenLogitsProcessor.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> that enforces the specified token as the last generated token when <code>max_length</code> is reached.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ForcedEOSTokenLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.ForcedEOSTokenLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ForcedEOSTokenLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L639" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.InfNanRemoveLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">InfNanRemoveLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.InfNanRemoveLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.InfNanRemoveLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L648" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> that removes all <code>nan</code> and <code>inf</code> values to avoid the generation method to fail. Note that using the logits processor should only be used if necessary since it can slow down the generation method. <code>max_length</code> is reached.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.InfNanRemoveLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.InfNanRemoveLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.InfNanRemoveLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_logits_process.py#L655" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L53" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Abstract base class for all logit processors that can be applied during generation.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L56" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cur_len<span class="opacity-60">: int</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>tf.Tensor</code> of shape <code>(batch_size, config.vocab_size)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLogitsProcessor.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLogitsProcessor.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLogitsProcessor.__call__.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLogitsProcessor.__call__.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLogitsProcessor.__call__.cur_len" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLogitsProcessor.__call__.cur_len"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cur_len</strong> (<code>int</code>) &#x2014; The current length of valid input sequence tokens. In the TF implementation, the input_ids&#x2019; sequence length is the maximum length generate can produce, and we need to know which of its tokens are valid. kwargs &#x2014; Additional logits processor specific kwargs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.TFLogitsProcessor.__call__.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>tf.Tensor</code> of shape <code>(batch_size, config.vocab_size)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The processed prediction scores.</p> <!-- HTML_TAG_END --></p> </div></div> <p>TF method for processing logits.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFLogitsProcessorList"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFLogitsProcessorList</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFLogitsProcessorList" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFLogitsProcessorList"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L75" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">iterable<span class="opacity-60"> = ()</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>This class can be used to create a list of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.TFLogitsProcessor">TFLogitsProcessor</a> to subsequently process a <code>scores</code> input tensor. This class inherits from list and adds a specific <em><strong>call</strong></em> method to apply each <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.TFLogitsProcessor">TFLogitsProcessor</a> to the inputs.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFLogitsProcessorList.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFLogitsProcessorList.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFLogitsProcessorList.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L82" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cur_len<span class="opacity-60">: int</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>tf.Tensor</code> of shape <code>(batch_size, config.vocab_size)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLogitsProcessorList.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLogitsProcessorList.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLogitsProcessorList.__call__.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLogitsProcessorList.__call__.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLogitsProcessorList.__call__.cur_len" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLogitsProcessorList.__call__.cur_len"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cur_len</strong> (<code>int</code>) &#x2014; The current length of valid input sequence tokens. In the TF implementation, the input_ids&#x2019; sequence length is the maximum length generate can produce, and we need to know which of its tokens are valid. kwargs &#x2014; Additional logits processor specific kwargs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.TFLogitsProcessorList.__call__.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>tf.Tensor</code> of shape <code>(batch_size, config.vocab_size)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The processed prediction scores.</p> <!-- HTML_TAG_END --></p> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFLogitsWarper"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFLogitsWarper</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFLogitsWarper" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFLogitsWarper"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L64" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Abstract base class for all logit warpers that can be applied during generation with multinomial sampling.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFLogitsWarper.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFLogitsWarper.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFLogitsWarper.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L67" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cur_len<span class="opacity-60">: int</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>tf.Tensor</code> of shape <code>(batch_size, config.vocab_size)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLogitsWarper.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLogitsWarper.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLogitsWarper.__call__.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLogitsWarper.__call__.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLogitsWarper.__call__.cur_len" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLogitsWarper.__call__.cur_len"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cur_len</strong> (<code>int</code>) &#x2014; The current length of valid input sequence tokens. In the TF implementation, the input_ids&#x2019; sequence length is the maximum length generate can produce, and we need to know which of its tokens are valid. kwargs &#x2014; Additional logits processor specific kwargs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.TFLogitsWarper.__call__.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>tf.Tensor</code> of shape <code>(batch_size, config.vocab_size)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The processed prediction scores.</p> <!-- HTML_TAG_END --></p> </div></div> <p>TF method for warping logits.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFTemperatureLogitsWarper"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFTemperatureLogitsWarper</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFTemperatureLogitsWarper" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFTemperatureLogitsWarper"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L98" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">temperature<span class="opacity-60">: float</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFTemperatureLogitsWarper.temperature" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFTemperatureLogitsWarper.temperature"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>temperature</strong> (<code>float</code>) &#x2014; The value used to module the logits distribution.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.TFLogitsWarper">TFLogitsWarper</a> for temperature (exponential scaling output probability distribution).</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFTemperatureLogitsWarper.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFTemperatureLogitsWarper.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFTemperatureLogitsWarper.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L113" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cur_len<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFTopPLogitsWarper"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFTopPLogitsWarper</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFTopPLogitsWarper" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFTopPLogitsWarper"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L147" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_p<span class="opacity-60">: float</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filter_value<span class="opacity-60">: float = -inf</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_tokens_to_keep<span class="opacity-60">: int = 1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFTopPLogitsWarper.top_p" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFTopPLogitsWarper.top_p"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_p</strong> (<code>float</code>) &#x2014; If set to &lt; 1, only the smallest set of most probable tokens with probabilities that add up to <code>top_p</code> or higher are kept for generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFTopPLogitsWarper.filter_value" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFTopPLogitsWarper.filter_value"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>filter_value</strong> (<code>float</code>, <em>optional</em>, defaults to <code>-float(&quot;Inf&quot;)</code>) &#x2014; All filtered values will be set to this float value.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFTopPLogitsWarper.min_tokens_to_keep" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFTopPLogitsWarper.min_tokens_to_keep"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimum number of tokens that cannot be filtered.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.TFLogitsWarper">TFLogitsWarper</a> that performs top-p, i.e. restricting to top tokens summing to &lt;= prob_cut_off.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFTopPLogitsWarper.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFTopPLogitsWarper.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFTopPLogitsWarper.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L169" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cur_len<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFTopKLogitsWarper"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFTopKLogitsWarper</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFTopKLogitsWarper" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFTopKLogitsWarper"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L118" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_k<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filter_value<span class="opacity-60">: float = -inf</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_tokens_to_keep<span class="opacity-60">: int = 1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFTopKLogitsWarper.top_k" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFTopKLogitsWarper.top_k"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_k</strong> (<code>int</code>) &#x2014; The number of highest probability vocabulary tokens to keep for top-k-filtering.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFTopKLogitsWarper.filter_value" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFTopKLogitsWarper.filter_value"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>filter_value</strong> (<code>float</code>, <em>optional</em>, defaults to <code>-float(&quot;Inf&quot;)</code>) &#x2014; All filtered values will be set to this float value.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFTopKLogitsWarper.min_tokens_to_keep" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFTopKLogitsWarper.min_tokens_to_keep"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimum number of tokens that cannot be filtered.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.TFLogitsWarper">TFLogitsWarper</a> that performs top-k, i.e. restricting to the k highest probability elements.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFTopKLogitsWarper.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFTopKLogitsWarper.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFTopKLogitsWarper.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L139" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cur_len<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFMinLengthLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFMinLengthLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFMinLengthLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFMinLengthLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L201" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_length<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMinLengthLogitsProcessor.min_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMinLengthLogitsProcessor.min_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_length</strong> (<code>int</code>) &#x2014; The minimum length below which the score of <code>eos_token_id</code> is set to <code>-float(&quot;Inf&quot;)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMinLengthLogitsProcessor.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMinLengthLogitsProcessor.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.TFLogitsProcessor">TFLogitsProcessor</a> enforcing a min-length by setting EOS probability to 0.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFMinLengthLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFMinLengthLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFMinLengthLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L227" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cur_len<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFNoBadWordsLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFNoBadWordsLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFNoBadWordsLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFNoBadWordsLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L287" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bad_words_ids<span class="opacity-60">: typing.List[typing.List[int]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFNoBadWordsLogitsProcessor.bad_words_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFNoBadWordsLogitsProcessor.bad_words_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bad_words_ids</strong> (<code>List[List[int]]</code>) &#x2014; List of list of token ids that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use <code>tokenizer(bad_word, add_prefix_space=True).input_ids</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFNoBadWordsLogitsProcessor.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFNoBadWordsLogitsProcessor.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.TFLogitsProcessor">TFLogitsProcessor</a> that enforces that specified sequences will never be sampled.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFNoBadWordsLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFNoBadWordsLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFNoBadWordsLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L364" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cur_len<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFNoRepeatNGramLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFNoRepeatNGramLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFNoRepeatNGramLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFNoRepeatNGramLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L385" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ngram_size<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFNoRepeatNGramLogitsProcessor.ngram_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFNoRepeatNGramLogitsProcessor.ngram_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ngram_size</strong> (<code>int</code>) &#x2014; All ngrams of size <code>ngram_size</code> can only occur once.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.TFLogitsProcessor">TFLogitsProcessor</a> that enforces no repetition of n-grams. See <a href="https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345" rel="nofollow">Fairseq</a>.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFNoRepeatNGramLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFNoRepeatNGramLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFNoRepeatNGramLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L424" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cur_len<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRepetitionPenaltyLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFRepetitionPenaltyLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFRepetitionPenaltyLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRepetitionPenaltyLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L237" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">penalty<span class="opacity-60">: float</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRepetitionPenaltyLogitsProcessor.repetition_penalty" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRepetitionPenaltyLogitsProcessor.repetition_penalty"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repetition_penalty</strong> (<code>float</code>) &#x2014; The parameter for repetition penalty. 1.0 means no penalty. See <a href="https://arxiv.org/pdf/1909.05858.pdf" rel="nofollow">this paper</a> for more details.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.TFLogitsProcessor">TFLogitsProcessor</a> enforcing an exponential penalty on repeated sequences.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRepetitionPenaltyLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFRepetitionPenaltyLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRepetitionPenaltyLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L279" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cur_len<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFForcedBOSTokenLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFForcedBOSTokenLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFForcedBOSTokenLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFForcedBOSTokenLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L446" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token_id<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFForcedBOSTokenLogitsProcessor.bos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFForcedBOSTokenLogitsProcessor.bos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token_id</strong> (<code>int</code>) &#x2014; The id of the token to force as the first generated token.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.TFLogitsProcessor">TFLogitsProcessor</a> that enforces the specified token as the first generated token.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFForcedBOSTokenLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFForcedBOSTokenLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFForcedBOSTokenLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L460" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cur_len<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFForcedEOSTokenLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFForcedEOSTokenLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFForcedEOSTokenLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFForcedEOSTokenLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L476" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFForcedEOSTokenLogitsProcessor.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFForcedEOSTokenLogitsProcessor.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>) &#x2014; The maximum length of the sequence to be generated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TFForcedEOSTokenLogitsProcessor.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFForcedEOSTokenLogitsProcessor.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.TFLogitsProcessor">TFLogitsProcessor</a> that enforces the specified token as the last generated token when <code>max_length</code> is reached.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFForcedEOSTokenLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFForcedEOSTokenLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFForcedEOSTokenLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_logits_process.py#L493" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cur_len<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L50" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Abstract base class for all logit processors that can be applied during generation.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L53" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: ndarray</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>jnp.ndarray</code> of shape <code>(batch_size, config.vocab_size)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxLogitsProcessor.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxLogitsProcessor.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxLogitsProcessor.__call__.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxLogitsProcessor.__call__.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs &#x2014; Additional logits processor specific kwargs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.FlaxLogitsProcessor.__call__.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>jnp.ndarray</code> of shape <code>(batch_size, config.vocab_size)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The processed prediction scores.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Flax method for processing logits.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxLogitsProcessorList"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxLogitsProcessorList</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxLogitsProcessorList" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxLogitsProcessorList"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L72" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">iterable<span class="opacity-60"> = ()</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>This class can be used to create a list of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.FlaxLogitsProcessor">FlaxLogitsProcessor</a> or <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.FlaxLogitsWarper">FlaxLogitsWarper</a> to subsequently process a <code>scores</code> input tensor. This class inherits from list and adds a specific <em><strong>call</strong></em> method to apply each <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.FlaxLogitsProcessor">FlaxLogitsProcessor</a> or <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.FlaxLogitsWarper">FlaxLogitsWarper</a> to the inputs.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxLogitsProcessorList.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxLogitsProcessorList.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxLogitsProcessorList.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L79" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cur_len<span class="opacity-60">: int</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>jnp.ndarray</code> of shape <code>(batch_size, config.vocab_size)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxLogitsProcessorList.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxLogitsProcessorList.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxLogitsProcessorList.__call__.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxLogitsProcessorList.__call__.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs &#x2014; Additional logits processor specific kwargs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.FlaxLogitsProcessorList.__call__.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>jnp.ndarray</code> of shape <code>(batch_size, config.vocab_size)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The processed prediction scores.</p> <!-- HTML_TAG_END --></p> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxLogitsWarper"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxLogitsWarper</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxLogitsWarper" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxLogitsWarper"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L61" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Abstract base class for all logit warpers that can be applied during generation with multinomial sampling.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxLogitsWarper.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxLogitsWarper.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxLogitsWarper.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L64" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: ndarray</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>jnp.ndarray</code> of shape <code>(batch_size, config.vocab_size)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxLogitsWarper.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxLogitsWarper.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxLogitsWarper.__call__.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxLogitsWarper.__call__.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs &#x2014; Additional logits processor specific kwargs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.FlaxLogitsWarper.__call__.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>jnp.ndarray</code> of shape <code>(batch_size, config.vocab_size)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The processed prediction scores.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Flax method for warping logits.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxTemperatureLogitsWarper"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxTemperatureLogitsWarper</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxTemperatureLogitsWarper" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxTemperatureLogitsWarper"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L95" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">temperature<span class="opacity-60">: float</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxTemperatureLogitsWarper.temperature" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxTemperatureLogitsWarper.temperature"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>temperature</strong> (<code>float</code>) &#x2014; The value used to module the logits distribution.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.FlaxLogitsWarper">FlaxLogitsWarper</a> for temperature (exponential scaling output probability distribution).</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxTemperatureLogitsWarper.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxTemperatureLogitsWarper.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxTemperatureLogitsWarper.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L110" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cur_len<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxTopPLogitsWarper"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxTopPLogitsWarper</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxTopPLogitsWarper" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxTopPLogitsWarper"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L115" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_p<span class="opacity-60">: float</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filter_value<span class="opacity-60">: float = -inf</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_tokens_to_keep<span class="opacity-60">: int = 1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxTopPLogitsWarper.top_p" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxTopPLogitsWarper.top_p"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_p</strong> (<code>float</code>) &#x2014; If set to &lt; 1, only the smallest set of most probable tokens with probabilities that add up to <code>top_p</code> or higher are kept for generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxTopPLogitsWarper.filter_value" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxTopPLogitsWarper.filter_value"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>filter_value</strong> (<code>float</code>, <em>optional</em>, defaults to <code>-float(&quot;Inf&quot;)</code>) &#x2014; All filtered values will be set to this float value.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxTopPLogitsWarper.min_tokens_to_keep" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxTopPLogitsWarper.min_tokens_to_keep"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimum number of tokens that cannot be filtered.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.FlaxLogitsWarper">FlaxLogitsWarper</a> that performs top-p, i.e. restricting to top tokens summing to prob_cut_off &lt;= prob_cut_off.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxTopPLogitsWarper.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxTopPLogitsWarper.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxTopPLogitsWarper.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L137" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cur_len<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxTopKLogitsWarper"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxTopKLogitsWarper</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxTopKLogitsWarper" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxTopKLogitsWarper"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L157" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_k<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filter_value<span class="opacity-60">: float = -inf</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_tokens_to_keep<span class="opacity-60">: int = 1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxTopKLogitsWarper.top_k" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxTopKLogitsWarper.top_k"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_k</strong> (<code>int</code>) &#x2014; The number of highest probability vocabulary tokens to keep for top-k-filtering.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxTopKLogitsWarper.filter_value" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxTopKLogitsWarper.filter_value"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>filter_value</strong> (<code>float</code>, <em>optional</em>, defaults to <code>-float(&quot;Inf&quot;)</code>) &#x2014; All filtered values will be set to this float value.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxTopKLogitsWarper.min_tokens_to_keep" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxTopKLogitsWarper.min_tokens_to_keep"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimum number of tokens that cannot be filtered.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.FlaxLogitsWarper">FlaxLogitsWarper</a> that performs top-k, i.e. restricting to the k highest probability elements.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxTopKLogitsWarper.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxTopKLogitsWarper.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxTopKLogitsWarper.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L178" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cur_len<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxForcedBOSTokenLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxForcedBOSTokenLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxForcedBOSTokenLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxForcedBOSTokenLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L193" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token_id<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxForcedBOSTokenLogitsProcessor.bos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxForcedBOSTokenLogitsProcessor.bos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token_id</strong> (<code>int</code>) &#x2014; The id of the token to force as the first generated token.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.FlaxLogitsProcessor">FlaxLogitsProcessor</a> that enforces the specified token as the first generated token.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxForcedBOSTokenLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxForcedBOSTokenLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxForcedBOSTokenLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L205" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cur_len<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxForcedEOSTokenLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxForcedEOSTokenLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxForcedEOSTokenLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxForcedEOSTokenLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L215" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxForcedEOSTokenLogitsProcessor.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxForcedEOSTokenLogitsProcessor.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>) &#x2014; The maximum length of the sequence to be generated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxForcedEOSTokenLogitsProcessor.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxForcedEOSTokenLogitsProcessor.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.FlaxLogitsProcessor">FlaxLogitsProcessor</a> that enforces the specified token as the last generated token when <code>max_length</code> is reached.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxForcedEOSTokenLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxForcedEOSTokenLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxForcedEOSTokenLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L230" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cur_len<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxMinLengthLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxMinLengthLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxMinLengthLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxMinLengthLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L240" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_length<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMinLengthLogitsProcessor.min_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMinLengthLogitsProcessor.min_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_length</strong> (<code>int</code>) &#x2014; The minimum length below which the score of <code>eos_token_id</code> is set to <code>-float(&quot;Inf&quot;)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMinLengthLogitsProcessor.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMinLengthLogitsProcessor.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.FlaxLogitsProcessor">FlaxLogitsProcessor</a> enforcing a min-length by setting EOS probability to 0.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxMinLengthLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxMinLengthLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxMinLengthLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_flax_logits_process.py#L261" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cur_len<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <h2 class="relative group"><a id="transformers.StoppingCriteria" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.StoppingCriteria"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>StoppingCriteria </span></h2> <p>A <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.StoppingCriteria">StoppingCriteria</a> can be used to change when to stop generation (other than EOS token).</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.StoppingCriteria"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">StoppingCriteria</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.StoppingCriteria" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.StoppingCriteria"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_stopping_criteria.py#L33" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Abstract base class for all stopping criteria that can be applied during generation.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.StoppingCriteria.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.StoppingCriteria.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.StoppingCriteria.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_stopping_criteria.py#L36" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.StoppingCriteria.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.StoppingCriteria.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.StoppingCriteria.__call__.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.StoppingCriteria.__call__.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs &#x2014; Additional stopping criteria specific kwargs.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.StoppingCriteriaList"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">StoppingCriteriaList</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.StoppingCriteriaList" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.StoppingCriteriaList"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_stopping_criteria.py#L110" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">iterable<span class="opacity-60"> = ()</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.StoppingCriteriaList.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.StoppingCriteriaList.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.StoppingCriteriaList.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_stopping_criteria.py#L111" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.StoppingCriteriaList.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.StoppingCriteriaList.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.StoppingCriteriaList.__call__.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.StoppingCriteriaList.__call__.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs &#x2014; Additional stopping criteria specific kwargs.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MaxLengthCriteria"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">MaxLengthCriteria</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.MaxLengthCriteria" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MaxLengthCriteria"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_stopping_criteria.py#L41" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.MaxLengthCriteria.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaxLengthCriteria.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>) &#x2014; The maximum length that the output sequence can have in number of tokens.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This class can be used to stop generation whenever the full generated number of tokens exceeds <code>max_length</code>. Keep in mind for decoder-only type of transformers, this will include the initial prompted tokens.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MaxLengthCriteria.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.MaxLengthCriteria.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MaxLengthCriteria.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_stopping_criteria.py#L54" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.MaxLengthCriteria.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaxLengthCriteria.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.MaxLengthCriteria.__call__.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaxLengthCriteria.__call__.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs &#x2014; Additional stopping criteria specific kwargs.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MaxTimeCriteria"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">MaxTimeCriteria</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.MaxTimeCriteria" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MaxTimeCriteria"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_stopping_criteria.py#L88" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_time<span class="opacity-60">: float</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">initial_timestamp<span class="opacity-60">: typing.Optional[float] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.MaxTimeCriteria.max_time" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaxTimeCriteria.max_time"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_time</strong> (<code>float</code>) &#x2014; The maximum allowed time in seconds for the generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.MaxTimeCriteria.initial_time" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaxTimeCriteria.initial_time"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>initial_time</strong> (<code>float</code>, <em>optional</em>, defaults to <code>time.time()</code>) &#x2014; The start of the generation allowed time.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This class can be used to stop generation whenever the full generation exceeds some amount of time. By default, the time will start being counted when you initialize this function. You can override this by passing an <code>initial_time</code>.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MaxTimeCriteria.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.MaxTimeCriteria.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MaxTimeCriteria.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_stopping_criteria.py#L105" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.MaxTimeCriteria.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaxTimeCriteria.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_19429/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.MaxTimeCriteria.__call__.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaxTimeCriteria.__call__.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs &#x2014; Additional stopping criteria specific kwargs.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div></div></div> <h2 class="relative group"><a id="transformers.Constraint" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Constraint"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Constraints </span></h2> <p>A <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.Constraint">Constraint</a> can be used to force the generation to include specific tokens or sequences in the output.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Constraint"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Constraint</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Constraint" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Constraint"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_constraints.py#L5" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Abstract base class for all constraints that can be applied during generation. It must define how the constraint can be satisfied.</p> <p>All classes that inherit Constraint must follow the requirement that</p> <div class="relative group rounded-md"><a id="transformers.Constraint.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Constraint.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->completed = <span class="hljs-literal">False</span> <span class="hljs-keyword">while</span> <span class="hljs-keyword">not</span> completed: _, completed = constraint.update(constraint.advance())<!-- HTML_TAG_END --></pre></div></div> <p>will always terminate (halt).</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Constraint.advance"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>advance</span></h4><!-- HTML_TAG_END --> <a id="transformers.Constraint.advance" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Constraint.advance"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_constraints.py#L48" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>token_ids(<code>torch.tensor</code>)</span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div id="transformers.Constraint.advance.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>token_ids(<code>torch.tensor</code>)</p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Must be a tensor of a list of indexable tokens, not some integer.</p> <!-- HTML_TAG_END --></p> </div></div> <p>When called, returns the token that would take this constraint one step closer to being fulfilled.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Constraint.copy"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>copy</span></h4><!-- HTML_TAG_END --> <a id="transformers.Constraint.copy" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Constraint.copy"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_constraints.py#L113" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stateful<span class="opacity-60"> = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>constraint(<code>Constraint</code>)</span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div id="transformers.Constraint.copy.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>constraint(<code>Constraint</code>)</p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The same constraint as the one being called from.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Creates a new instance of this constraint.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Constraint.does_advance"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>does_advance</span></h4><!-- HTML_TAG_END --> <a id="transformers.Constraint.does_advance" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Constraint.does_advance"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_constraints.py#L60" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_id<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Reads in a token and returns whether it creates progress.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Constraint.remaining"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>remaining</span></h4><!-- HTML_TAG_END --> <a id="transformers.Constraint.remaining" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Constraint.remaining"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_constraints.py#L104" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Returns the number of remaining steps of <code>advance()</code> in order to complete this constraint.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Constraint.reset"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>reset</span></h4><!-- HTML_TAG_END --> <a id="transformers.Constraint.reset" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Constraint.reset"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_constraints.py#L94" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Resets the state of this constraint to its initialization. We would call this in cases where the fulfillment of a constraint is abrupted by an unwanted token.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Constraint.test"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>test</span></h4><!-- HTML_TAG_END --> <a id="transformers.Constraint.test" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Constraint.test"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_constraints.py#L24" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Tests whether this constraint has been properly defined.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Constraint.update"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>update</span></h4><!-- HTML_TAG_END --> <a id="transformers.Constraint.update" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Constraint.update"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_constraints.py#L69" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_id<span class="opacity-60">: int</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>stepped(<code>bool</code>)</span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div id="transformers.Constraint.update.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>stepped(<code>bool</code>)</p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Whether this constraint has become one step closer to being fulfuilled. completed(<code>bool</code>): Whether this constraint has been completely fulfilled by this token being generated. reset (<code>bool</code>): Whether this constraint has reset its progress by this token being generated.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Reads in a token and returns booleans that indicate the progress made by it. This function will update the state of this object unlikes <code>does_advance(self, token_id: int)</code>.</p> <p>This isn’t to test whether a certain token will advance the progress; it’s to update its state as if it has been generated. This becomes important if token_id != desired token (refer to else statement in PhrasalConstraint)</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PhrasalConstraint"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PhrasalConstraint</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PhrasalConstraint" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PhrasalConstraint"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_constraints.py#L129" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids<span class="opacity-60">: typing.List[int]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PhrasalConstraint.token_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PhrasalConstraint.token_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids</strong> (<code>List[int]</code>) &#x2014; The id of the token that must be generated by the output.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.Constraint">Constraint</a> enforcing that an ordered sequence of tokens is included in the output.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DisjunctiveConstraint"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DisjunctiveConstraint</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DisjunctiveConstraint" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DisjunctiveConstraint"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_constraints.py#L261" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">nested_token_ids<span class="opacity-60">: typing.List[typing.List[int]]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DisjunctiveConstraint.nested_token_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DisjunctiveConstraint.nested_token_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>nested_token_ids</strong> (<code>List[List[int]]</code>) &#x2014; a list of words, where each word is a list of ids. This constraint<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.DisjunctiveConstraint.is" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DisjunctiveConstraint.is"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is</strong> fulfilled by generating just one from the list of words. &#x2014;<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>A special <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.Constraint">Constraint</a> that is fulfilled by fulfilling just one of several constraints.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConstraintListState"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ConstraintListState</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ConstraintListState" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConstraintListState"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_constraints.py#L350" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">constraints<span class="opacity-60">: typing.List[transformers.generation_beam_constraints.Constraint]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstraintListState.constraints" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstraintListState.constraints"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>constraints</strong> (<code>List[Constraint]</code>) &#x2014; A list of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.Constraint">Constraint</a> objects that must be fulfilled by the beam scorer.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>A class for beam scorers to track its progress through a list of constraints.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConstraintListState.advance"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>advance</span></h4><!-- HTML_TAG_END --> <a id="transformers.ConstraintListState.advance" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConstraintListState.advance"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_constraints.py#L382" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>The list of tokens to generate such that we can make progress. By “list” we don’t mean the list of token that will fully fulfill a constraint.</p> <p>Given constraints <code>c_i = {t_ij | j == # of tokens}</code>, If we’re not in the middle of progressing through a specific constraint <code>c_i</code>, we return:</p> <p><code>[t_k1 for k in indices of unfulfilled constraints]</code></p> <p>If we are in the middle of a constraint, then we return: <code>[t_ij]</code>, where <code>i</code> is the index of the inprogress constraint, <code>j</code> is the next step for the constraint.</p> <p>Though we don’t care which constraint is fulfilled first, if we are in the progress of fulfilling a constraint, that’s the only one we’ll return.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConstraintListState.reset"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>reset</span></h4><!-- HTML_TAG_END --> <a id="transformers.ConstraintListState.reset" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConstraintListState.reset"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_constraints.py#L417" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids<span class="opacity-60">: typing.Optional[typing.List[int]]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>token_ids: the tokens generated thus far to reset the state of the progress through constraints.</p></div></div> <h2 class="relative group"><a id="transformers.BeamScorer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamScorer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BeamSearch </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BeamScorer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">BeamScorer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.BeamScorer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BeamScorer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_search.py#L88" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Abstract base class for all beam scorers that are used for <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_search">beam_search()</a> and <a href="/docs/transformers/pr_19429/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_sample">beam_sample()</a>.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BeamScorer.process"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>process</span></h4><!-- HTML_TAG_END --> <a id="transformers.BeamScorer.process" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BeamScorer.process"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_search.py#L94" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">next_scores<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">next_tokens<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">next_indices<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>UserDict</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamScorer.process.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamScorer.process.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * num_beams, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using any class inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamScorer.process.next_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamScorer.process.next_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>next_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2 * num_beams)</code>) &#x2014; Current scores of the top <code>2 * num_beams</code> non-finished beam hypotheses.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamScorer.process.next_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamScorer.process.next_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>next_tokens</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, 2 * num_beams)</code>) &#x2014; <code>input_ids</code> of the tokens corresponding to the top <code>2 * num_beams</code> non-finished beam hypotheses.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamScorer.process.next_indices" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamScorer.process.next_indices"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>next_indices</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, 2 * num_beams)</code>) &#x2014; Beam indices indicating to which beam hypothesis the <code>next_tokens</code> correspond.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamScorer.process.pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamScorer.process.pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamScorer.process.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamScorer.process.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.BeamScorer.process.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>UserDict</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A dictionary composed of the fields as defined above:</p> <ul> <li><strong>next_beam_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) — Updated scores of all non-finished beams.</li> <li><strong>next_beam_tokens</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) — Next tokens to be added to the non-finished beam_hypotheses.</li> <li><strong>next_beam_indices</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) — Beam indices indicating to which beam the next tokens shall be added.</li> </ul> <!-- HTML_TAG_END --></p> </div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BeamScorer.finalize"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>finalize</span></h4><!-- HTML_TAG_END --> <a id="transformers.BeamScorer.finalize" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BeamScorer.finalize"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_search.py#L106" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">next_scores<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">next_tokens<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">next_indices<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: int</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>torch.LongTensor</code> of shape <code>(batch_size * num_return_sequences, sequence_length)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamScorer.finalize.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamScorer.finalize.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * num_beams, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using any class inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamScorer.finalize.final_beam_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamScorer.finalize.final_beam_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>final_beam_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) &#x2014; The final scores of all non-finished beams.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamScorer.finalize.final_beam_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamScorer.finalize.final_beam_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>final_beam_tokens</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) &#x2014; The last tokens to be added to the non-finished beam_hypotheses.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamScorer.finalize.final_beam_indices" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamScorer.finalize.final_beam_indices"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>final_beam_indices</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) &#x2014; The beam indices indicating to which beam the <code>final_beam_tokens</code> shall be added.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamScorer.finalize.pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamScorer.finalize.pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamScorer.finalize.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamScorer.finalize.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.BeamScorer.finalize.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>torch.LongTensor</code> of shape <code>(batch_size * num_return_sequences, sequence_length)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.</p> <!-- HTML_TAG_END --></p> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BeamSearchScorer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">BeamSearchScorer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.BeamSearchScorer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BeamSearchScorer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_search.py#L120" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_size<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_beams<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">device<span class="opacity-60">: device</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">length_penalty<span class="opacity-60">: typing.Optional[float] = 1.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_early_stopping<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_beam_hyps_to_keep<span class="opacity-60">: typing.Optional[int] = 1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_beam_groups<span class="opacity-60">: typing.Optional[int] = 1</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamSearchScorer.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamSearchScorer.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>) &#x2014; Batch Size of <code>input_ids</code> for which standard beam search decoding is run in parallel.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamSearchScorer.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamSearchScorer.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>) &#x2014; The maximum length of the sequence to be generated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamSearchScorer.num_beams" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamSearchScorer.num_beams"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_beams</strong> (<code>int</code>) &#x2014; Number of beams for beam search.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamSearchScorer.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamSearchScorer.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>torch.device</code>) &#x2014; Defines the device type (<em>e.g.</em>, <code>&quot;cpu&quot;</code> or <code>&quot;cuda&quot;</code>) on which this instance of <code>BeamSearchScorer</code> will be allocated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamSearchScorer.length_penalty" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamSearchScorer.length_penalty"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>length_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log likelihood of the sequence (i.e. negative), <code>length_penalty</code> &gt; 0.0 promotes longer sequences, while <code>length_penalty</code> &lt; 0.0 encourages shorter sequences.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamSearchScorer.do_early_stopping" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamSearchScorer.do_early_stopping"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_early_stopping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to stop the beam search when at least <code>num_beams</code> sentences are finished per batch or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamSearchScorer.num_beam_hyps_to_keep" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamSearchScorer.num_beam_hyps_to_keep"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_beam_hyps_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The number of beam hypotheses that shall be returned upon calling <code>~transformer.BeamSearchScorer.finalize</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamSearchScorer.num_beam_groups" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamSearchScorer.num_beam_groups"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_beam_groups</strong> (<code>int</code>) &#x2014; Number of groups to divide <code>num_beams</code> into in order to ensure diversity among different groups of beams. See <a href="https://arxiv.org/pdf/1610.02424.pdf" rel="nofollow">this paper</a> for more details.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> implementing standard beam search decoding.</p> <p>Adapted in part from <a href="https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529" rel="nofollow">Facebook’s XLM beam search code</a>.</p> <p>Reference for the diverse beam search algorithm and implementation <a href="https://github.com/ashwinkalyan/dbs/blob/master/dbs/beam_utils.lua" rel="nofollow">Ashwin Kalyan’s DBS implementation</a></p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BeamSearchScorer.process"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>process</span></h4><!-- HTML_TAG_END --> <a id="transformers.BeamSearchScorer.process" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BeamSearchScorer.process"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_search.py#L208" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">next_scores<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">next_tokens<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">next_indices<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">beam_indices<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BeamSearchScorer.finalize"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>finalize</span></h4><!-- HTML_TAG_END --> <a id="transformers.BeamSearchScorer.finalize" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BeamSearchScorer.finalize"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_search.py#L302" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">final_beam_scores<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">final_beam_tokens<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">final_beam_indices<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: int</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">beam_indices<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConstrainedBeamSearchScorer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ConstrainedBeamSearchScorer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ConstrainedBeamSearchScorer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConstrainedBeamSearchScorer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_search.py#L390" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_size<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_beams<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">constraints<span class="opacity-60">: typing.List[transformers.generation_beam_constraints.Constraint]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">device<span class="opacity-60">: device</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">length_penalty<span class="opacity-60">: typing.Optional[float] = 1.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_early_stopping<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_beam_hyps_to_keep<span class="opacity-60">: typing.Optional[int] = 1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_beam_groups<span class="opacity-60">: typing.Optional[int] = 1</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>) &#x2014; Batch Size of <code>input_ids</code> for which standard beam search decoding is run in parallel.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>) &#x2014; The maximum length of the sequence to be generated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.num_beams" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.num_beams"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_beams</strong> (<code>int</code>) &#x2014; Number of beams for beam search.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.constraints" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.constraints"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>constraints</strong> (<code>List[Constraint]</code>) &#x2014; A list of positive constraints represented as <code>Constraint</code> objects that must be fulfilled in the generation output. For more information, the documentation of <a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.Constraint">Constraint</a> should be read.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>torch.device</code>) &#x2014; Defines the device type (<em>e.g.</em>, <code>&quot;cpu&quot;</code> or <code>&quot;cuda&quot;</code>) on which this instance of <code>BeamSearchScorer</code> will be allocated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.length_penalty" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.length_penalty"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>length_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log likelihood of the sequence (i.e. negative), <code>length_penalty</code> &gt; 0.0 promotes longer sequences, while <code>length_penalty</code> &lt; 0.0 encourages shorter sequences.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.do_early_stopping" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.do_early_stopping"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_early_stopping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to stop the beam search when at least <code>num_beams</code> sentences are finished per batch or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.num_beam_hyps_to_keep" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.num_beam_hyps_to_keep"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_beam_hyps_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The number of beam hypotheses that shall be returned upon calling <code>~transformer.BeamSearchScorer.finalize</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.num_beam_groups" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.num_beam_groups"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_beam_groups</strong> (<code>int</code>) &#x2014; Number of groups to divide <code>num_beams</code> into in order to ensure diversity among different groups of beams. See <a href="https://arxiv.org/pdf/1610.02424.pdf" rel="nofollow">this paper</a> for more details.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_19429/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> implementing constrained beam search decoding.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConstrainedBeamSearchScorer.process"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>process</span></h4><!-- HTML_TAG_END --> <a id="transformers.ConstrainedBeamSearchScorer.process" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConstrainedBeamSearchScorer.process"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_search.py#L486" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">next_scores<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">next_tokens<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">next_indices<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores_for_all_vocab<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>UserDict</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.process.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.process.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * num_beams, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using any class inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.process.next_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.process.next_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>next_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2 * num_beams)</code>) &#x2014; Current scores of the top <code>2 * num_beams</code> non-finished beam hypotheses.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.process.next_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.process.next_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>next_tokens</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, 2 * num_beams)</code>) &#x2014; <code>input_ids</code> of the tokens corresponding to the top <code>2 * num_beams</code> non-finished beam hypotheses.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.process.next_indices" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.process.next_indices"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>next_indices</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, 2 * num_beams)</code>) &#x2014; Beam indices indicating to which beam hypothesis the <code>next_tokens</code> correspond.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.process.scores_for_all_vocab" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.process.scores_for_all_vocab"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores_for_all_vocab</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams, sequence_length)</code>) &#x2014; The scores of all tokens in the vocabulary for each of the beam hypotheses.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.process.pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.process.pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.process.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.process.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.ConstrainedBeamSearchScorer.process.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>UserDict</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A dictionary composed of the fields as defined above:</p> <ul> <li> <p><strong>next_beam_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) — Updated scores of all non-finished beams.</p> </li> <li> <p><strong>next_beam_tokens</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) — Next tokens to be added to the non-finished beam_hypotheses.</p> </li> <li> <p><strong>next_beam_indices</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) — Beam indices indicating to which beam the next tokens shall be added.</p> </li> </ul> <!-- HTML_TAG_END --></p> </div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConstrainedBeamSearchScorer.finalize"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>finalize</span></h4><!-- HTML_TAG_END --> <a id="transformers.ConstrainedBeamSearchScorer.finalize" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConstrainedBeamSearchScorer.finalize"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_beam_search.py#L768" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">final_beam_scores<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">final_beam_tokens<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">final_beam_indices<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: int</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <h2 class="relative group"><a id="transformers.top_k_top_p_filtering" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.top_k_top_p_filtering"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Utilities </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.top_k_top_p_filtering"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.top_k_top_p_filtering</span></h4><!-- HTML_TAG_END --> <a id="transformers.top_k_top_p_filtering" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.top_k_top_p_filtering"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_utils.py#L3416" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_k<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_p<span class="opacity-60">: float = 1.0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filter_value<span class="opacity-60">: float = -inf</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_tokens_to_keep<span class="opacity-60">: int = 1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.top_k_top_p_filtering.top_k" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.top_k_top_p_filtering.top_k"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If &gt; 0, only keep the top k tokens with highest probability (top-k filtering)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.top_k_top_p_filtering.top_p" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.top_k_top_p_filtering.top_p"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_p</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; If &lt; 1.0, only keep the top tokens with cumulative probability &gt;= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (<a href="http://arxiv.org/abs/1904.09751" rel="nofollow">http://arxiv.org/abs/1904.09751</a>)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.top_k_top_p_filtering.min_tokens_to_keep" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.top_k_top_p_filtering.min_tokens_to_keep"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimumber of tokens we keep per batch example in the output.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Filter a distribution of logits using top-k and/or nucleus (top-p) filtering</p> <p>From: <a href="https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317" rel="nofollow">https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317</a></p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.tf_top_k_top_p_filtering"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.tf_top_k_top_p_filtering</span></h4><!-- HTML_TAG_END --> <a id="transformers.tf_top_k_top_p_filtering" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.tf_top_k_top_p_filtering"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/generation_tf_utils.py#L3207" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_k<span class="opacity-60"> = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_p<span class="opacity-60"> = 1.0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filter_value<span class="opacity-60"> = -inf</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_tokens_to_keep<span class="opacity-60"> = 1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.tf_top_k_top_p_filtering.top_k" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.tf_top_k_top_p_filtering.top_k"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If &gt; 0, only keep the top k tokens with highest probability (top-k filtering)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.tf_top_k_top_p_filtering.top_p" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.tf_top_k_top_p_filtering.top_p"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_p</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; If &lt; 1.0, only keep the top tokens with cumulative probability &gt;= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (<a href="http://arxiv.org/abs/1904.09751" rel="nofollow">http://arxiv.org/abs/1904.09751</a>)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.tf_top_k_top_p_filtering.min_tokens_to_keep" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.tf_top_k_top_p_filtering.min_tokens_to_keep"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimumber of tokens we keep per batch example in the output.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Filter a distribution of logits using top-k and/or nucleus (top-p) filtering</p> <p>From: <a href="https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317" rel="nofollow">https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317</a></p></div> <script type="module" data-hydrate="1dgubt9"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="1dgubt9"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/internal/generation_utils.mdx-hf-doc-builder.js") ], params: {} } }); </script>
74
0
hf_public_repos/doc-build-dev/transformers/pr_19429/en
hf_public_repos/doc-build-dev/transformers/pr_19429/en/internal/tokenization_utils.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;utilities-for-tokenizers&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.PreTrainedTokenizerBase&quot;,&quot;title&quot;:&quot;PreTrainedTokenizerBase&quot;},{&quot;local&quot;:&quot;transformers.SpecialTokensMixin&quot;,&quot;title&quot;:&quot;SpecialTokensMixin&quot;},{&quot;local&quot;:&quot;transformers.tokenization_utils_base.TruncationStrategy&quot;,&quot;title&quot;:&quot;Enums and namedtuples&quot;}],&quot;title&quot;:&quot;Utilities for Tokenizers&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/pages/internal/tokenization_utils.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/Docstring-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_19429/en/_app/chunks/ExampleCodeBlock-hf-doc-builder.js"> <h1 class="relative group"><a id="utilities-for-tokenizers" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#utilities-for-tokenizers"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Utilities for Tokenizers </span></h1> <p>This page lists all the utility functions used by the tokenizers, mainly the class <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase">PreTrainedTokenizerBase</a> that implements the common methods between <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> and <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a> and the mixin <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.SpecialTokensMixin">SpecialTokensMixin</a>.</p> <p>Most of those are only useful if you are studying the code of the tokenizers in the library.</p> <h2 class="relative group"><a id="transformers.PreTrainedTokenizerBase" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PreTrainedTokenizerBase </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PreTrainedTokenizerBase</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L1453" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.model_max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.model_max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum length (in number of tokens) for the inputs to the transformer model. When the tokenizer is loaded with <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.from_pretrained">from_pretrained()</a>, this will be set to the value stored for the associated model in <code>max_model_input_sizes</code> (see above). If no value is provided, will default to VERY_LARGE_INTEGER (<code>int(1e30)</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.padding_side" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.padding_side"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding_side</strong> (<code>str</code>, <em>optional</em>) &#x2014; The side on which the model should have padding applied. Should be selected between [&#x2018;right&#x2019;, &#x2018;left&#x2019;]. Default value is picked from the class attribute of the same name.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.truncation_side" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.truncation_side"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation_side</strong> (<code>str</code>, <em>optional</em>) &#x2014; The side on which the model should have truncation applied. Should be selected between [&#x2018;right&#x2019;, &#x2018;left&#x2019;]. Default value is picked from the class attribute of the same name.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.model_input_names" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.model_input_names"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_input_names</strong> (<code>List[string]</code>, <em>optional</em>) &#x2014; The list of inputs accepted by the forward pass of the model (like <code>&quot;token_type_ids&quot;</code> or <code>&quot;attention_mask&quot;</code>). Default value is picked from the class attribute of the same name.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.bos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.bos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the beginning of a sentence. Will be associated to <code>self.bos_token</code> and <code>self.bos_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.eos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.eos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the end of a sentence. Will be associated to <code>self.eos_token</code> and <code>self.eos_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.unk_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.unk_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>unk_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing an out-of-vocabulary token. Will be associated to <code>self.unk_token</code> and <code>self.unk_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.sep_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.sep_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sep_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token separating two different sentences in the same input (used by BERT for instance). Will be associated to <code>self.sep_token</code> and <code>self.sep_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.pad_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.pad_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. Will be associated to <code>self.pad_token</code> and <code>self.pad_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.cls_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.cls_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the class of the input (used by BERT for instance). Will be associated to <code>self.cls_token</code> and <code>self.cls_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.mask_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.mask_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing a masked token (used by masked-language modeling pretraining objectives, like BERT). Will be associated to <code>self.mask_token</code> and <code>self.mask_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.additional_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.additional_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>additional_special_tokens</strong> (tuple or list of <code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A tuple or a list of additional special tokens. Add them here to ensure they won&#x2019;t be split by the tokenization process. Will be associated to <code>self.additional_special_tokens</code> and <code>self.additional_special_tokens_ids</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> and <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>.</p> <p>Handles shared (mostly boiler plate) methods for those two classes.</p> <p>Class attributes (overridden by derived classes)</p> <ul><li><strong>vocab_files_names</strong> (<code>Dict[str, str]</code>) — A dictionary with, as keys, the <code>__init__</code> keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string).</li> <li><strong>pretrained_vocab_files_map</strong> (<code>Dict[str, Dict[str, str]]</code>) — A dictionary of dictionaries, with the high-level keys being the <code>__init__</code> keyword name of each vocabulary file required by the model, the low-level being the <code>short-cut-names</code> of the pretrained models with, as associated values, the <code>url</code> to the associated pretrained vocabulary file.</li> <li><strong>max_model_input_sizes</strong> (<code>Dict[str, Optional[int]]</code>) — A dictionary with, as keys, the <code>short-cut-names</code> of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or <code>None</code> if the model has no maximum input size.</li> <li><strong>pretrained_init_configuration</strong> (<code>Dict[str, Dict[str, Any]]</code>) — A dictionary with, as keys, the <code>short-cut-names</code> of the pretrained models, and as associated values, a dictionary of specific arguments to pass to the <code>__init__</code> method of the tokenizer class for this pretrained model when loading the tokenizer with the <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.from_pretrained">from_pretrained()</a> method.</li> <li><strong>model_input_names</strong> (<code>List[str]</code>) — A list of inputs expected in the forward pass of the model.</li> <li><strong>padding_side</strong> (<code>str</code>) — The default value for the side on which the model should have padding applied. Should be <code>&#39;right&#39;</code> or <code>&#39;left&#39;</code>.</li> <li><strong>truncation_side</strong> (<code>str</code>) — The default value for the side on which the model should have truncation applied. Should be <code>&#39;right&#39;</code> or <code>&#39;left&#39;</code>.</li></ul> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L2410" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[typing.List[str]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_pair<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[typing.List[str]], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_target<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[typing.List[str]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_pair_target<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[typing.List[str]], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_special_tokens<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.utils.generic.PaddingStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">truncation<span class="opacity-60">: typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stride<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_split_into_words<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.utils.generic.TensorType, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_token_type_ids<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_attention_mask<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_overflowing_tokens<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_special_tokens_mask<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_offsets_mapping<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_length<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">verbose<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.text_pair" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.text_pair"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text_pair</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.text_target" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.text_target"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text_target</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.text_pair_target" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.text_pair_target"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text_pair_target</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>, <em>optional</em>) &#x2014; The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.add_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.add_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.is_split_into_words" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.is_split_into_words"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_overflowing_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_overflowing_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_special_tokens_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_special_tokens_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_offsets_mapping" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_offsets_mapping"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.verbose" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.verbose"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizerBase.__call__.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> — List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> — List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>“token_type_ids”</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> — List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>“attention_mask”</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>overflowing_tokens</strong> — List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> — Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> — List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> — The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> <!-- HTML_TAG_END --></p> </div></div> <p>Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.as_target_tokenizer"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>as_target_tokenizer</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.as_target_tokenizer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.as_target_tokenizer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3536" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to sequence-to-sequence models that need a slightly different processing for the labels.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.batch_decode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>batch_decode</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.batch_decode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.batch_decode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3370" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences<span class="opacity-60">: typing.Union[typing.List[int], typing.List[typing.List[int]], ForwardRef(&#39;np.ndarray&#39;), ForwardRef(&#39;torch.Tensor&#39;), ForwardRef(&#39;tf.Tensor&#39;)]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">skip_special_tokens<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">clean_up_tokenization_spaces<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[str]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_decode.sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_decode.sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences</strong> (<code>Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_decode.skip_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_decode.skip_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_decode.clean_up_tokenization_spaces" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_decode.clean_up_tokenization_spaces"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_decode.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_decode.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizerBase.batch_decode.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[str]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The list of decoded sentences.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Convert a list of lists of token ids into a list of strings by calling decode.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.batch_encode_plus"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>batch_encode_plus</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.batch_encode_plus" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L2707" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_text_or_text_pairs<span class="opacity-60">: typing.Union[typing.List[str], typing.List[typing.Tuple[str, str]], typing.List[typing.List[str]], typing.List[typing.Tuple[typing.List[str], typing.List[str]]], typing.List[typing.List[int]], typing.List[typing.Tuple[typing.List[int], typing.List[int]]]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_special_tokens<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.utils.generic.PaddingStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">truncation<span class="opacity-60">: typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stride<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_split_into_words<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.utils.generic.TensorType, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_token_type_ids<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_attention_mask<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_overflowing_tokens<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_special_tokens_mask<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_offsets_mapping<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_length<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">verbose<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.batch_text_or_text_pairs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.batch_text_or_text_pairs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_text_or_text_pairs</strong> (<code>List[str]</code>, <code>List[Tuple[str, str]]</code>, <code>List[List[str]]</code>, <code>List[Tuple[List[str], List[str]]]</code>, and for not-fast tokenizers, also <code>List[List[int]]</code>, <code>List[Tuple[List[int], List[int]]]</code>) &#x2014; Batch of sequences or pair of sequences to be encoded. This can be a list of string/string-sequences/int-sequences or a list of pair of string/string-sequences/int-sequence (see details in <code>encode_plus</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.add_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.add_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.is_split_into_words" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.is_split_into_words"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.return_token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.return_token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.return_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.return_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.return_overflowing_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.return_overflowing_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.return_special_tokens_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.return_special_tokens_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.return_offsets_mapping" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.return_offsets_mapping"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.return_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.return_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.verbose" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.verbose"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizerBase.batch_encode_plus.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> — List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> — List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>“token_type_ids”</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> — List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>“attention_mask”</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>overflowing_tokens</strong> — List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> — Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> — List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> — The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> <!-- HTML_TAG_END --></p> </div></div> <p>Tokenize and prepare for the model a list of sequences or a list of pairs of sequences.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>This method is deprecated, <code>__call__</code> should be used instead.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.build_inputs_with_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>build_inputs_with_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.build_inputs_with_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.build_inputs_with_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3003" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.build_inputs_with_special_tokens.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.build_inputs_with_special_tokens.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; The first tokenized sequence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.build_inputs_with_special_tokens.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.build_inputs_with_special_tokens.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; The second tokenized sequence.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizerBase.build_inputs_with_special_tokens.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The model input with special tokens.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens.</p> <p>This implementation does not add special tokens and this method should be overridden in a subclass.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.clean_up_tokenization"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>clean_up_tokenization</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.clean_up_tokenization" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.clean_up_tokenization"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3479" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">out_string<span class="opacity-60">: str</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>str</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.clean_up_tokenization.out_string" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.clean_up_tokenization.out_string"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>out_string</strong> (<code>str</code>) &#x2014; The text to clean up.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizerBase.clean_up_tokenization.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>str</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The cleaned-up string.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.convert_tokens_to_string"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>convert_tokens_to_string</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.convert_tokens_to_string" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.convert_tokens_to_string"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3357" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokens<span class="opacity-60">: typing.List[str]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>str</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.convert_tokens_to_string.tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.convert_tokens_to_string.tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokens</strong> (<code>List[str]</code>) &#x2014; The token to join in a string.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizerBase.convert_tokens_to_string.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>str</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The joined tokens.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Converts a sequence of tokens in a single string. The most simple way to do it is <code>&quot; &quot;.join(tokens)</code> but we often want to remove sub-word tokenization artifacts at the same time.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>create_token_type_ids_from_sequences</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L2983" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; The first tokenized sequence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; The second tokenized sequence.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The token type ids.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Create the token type IDs corresponding to the sequences passed. <a href="../glossary#token-type-ids">What are token type IDs?</a></p> <p>Should be overridden in a subclass if the model has a special way of building those.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.decode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>decode</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.decode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.decode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3403" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids<span class="opacity-60">: typing.Union[int, typing.List[int], ForwardRef(&#39;np.ndarray&#39;), ForwardRef(&#39;torch.Tensor&#39;), ForwardRef(&#39;tf.Tensor&#39;)]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">skip_special_tokens<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">clean_up_tokenization_spaces<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>str</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.decode.token_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.decode.token_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids</strong> (<code>Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.decode.skip_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.decode.skip_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.decode.clean_up_tokenization_spaces" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.decode.clean_up_tokenization_spaces"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.decode.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.decode.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizerBase.decode.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>str</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The decoded sentence.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces.</p> <p>Similar to doing <code>self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))</code>.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.encode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>encode</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.encode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.encode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L2220" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[int]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_pair<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[int], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_special_tokens<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.utils.generic.PaddingStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">truncation<span class="opacity-60">: typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stride<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.utils.generic.TensorType, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code>, <code>torch.Tensor</code>, <code>tf.Tensor</code> or <code>np.ndarray</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code>) &#x2014; The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.text_pair" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.text_pair"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text_pair</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code>, <em>optional</em>) &#x2014; Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.add_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.add_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.is_split_into_words" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.is_split_into_words"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul> <p>**kwargs &#x2014; Passed along to the <code>.tokenize()</code> method.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizerBase.encode.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code>, <code>torch.Tensor</code>, <code>tf.Tensor</code> or <code>np.ndarray</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The tokenized ids of the text.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary.</p> <p>Same as doing <code>self.convert_tokens_to_ids(self.tokenize(text))</code>.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.encode_plus"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>encode_plus</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.encode_plus" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.encode_plus"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L2611" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[int]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_pair<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[int], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_special_tokens<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.utils.generic.PaddingStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">truncation<span class="opacity-60">: typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stride<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_split_into_words<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.utils.generic.TensorType, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_token_type_ids<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_attention_mask<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_overflowing_tokens<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_special_tokens_mask<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_offsets_mapping<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_length<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">verbose<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code> (the latter only for not-fast tokenizers)) &#x2014; The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.text_pair" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.text_pair"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text_pair</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code>, <em>optional</em>) &#x2014; Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.add_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.add_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.is_split_into_words" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.is_split_into_words"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.return_token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.return_token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.return_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.return_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.return_overflowing_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.return_overflowing_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.return_special_tokens_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.return_special_tokens_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.return_offsets_mapping" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.return_offsets_mapping"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.return_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.return_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.verbose" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.verbose"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizerBase.encode_plus.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> — List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> — List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>“token_type_ids”</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> — List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>“attention_mask”</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>overflowing_tokens</strong> — List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> — Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> — List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> — The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> <!-- HTML_TAG_END --></p> </div></div> <p>Tokenize and prepare for the model a sequence or a pair of sequences.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>This method is deprecated, <code>__call__</code> should be used instead.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.from_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.from_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.from_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L1570" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pretrained_model_name_or_path<span class="opacity-60">: typing.Union[str, os.PathLike]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*init_inputs<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.from_pretrained.pretrained_model_name_or_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.from_pretrained.pretrained_model_name_or_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a predefined tokenizer hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing vocabulary files required by the tokenizer, for instance saved using the <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.save_pretrained">save_pretrained()</a> method, e.g., <code>./my_model_directory/</code>.</li> <li>(<strong>Deprecated</strong>, not applicable to all derived classes) A path or url to a single saved vocabulary file (if and only if the tokenizer only requires a single vocabulary file like Bert or XLNet), e.g., <code>./my_model_directory/vocab.txt</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.from_pretrained.cache_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.from_pretrained.cache_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cache_dir</strong> (<code>str</code> or <code>os.PathLike</code>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.from_pretrained.force_download" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.from_pretrained.force_download"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to force the (re-)download the vocabulary files and override the cached versions if they exist.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.from_pretrained.resume_download" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.from_pretrained.resume_download"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>resume_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to delete incompletely received files. Attempt to resume the download if such a file exists.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.from_pretrained.proxies" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.from_pretrained.proxies"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>proxies</strong> (<code>Dict[str, str]</code>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <code>{&apos;http&apos;: &apos;foo.bar:3128&apos;, &apos;http://hostname&apos;: &apos;foo.bar:4012&apos;}</code>. The proxies are used on each request.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.from_pretrained.use_auth_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.from_pretrained.use_auth_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_auth_token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>huggingface-cli login</code> (stored in <code>~/.huggingface</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.from_pretrained.local_files_only" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.from_pretrained.local_files_only"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>local_files_only</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to only rely on local files and not to attempt to download any files.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.from_pretrained.revision" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.from_pretrained.revision"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;main&quot;</code>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <code>revision</code> can be any identifier allowed by git.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.from_pretrained.subfolder" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.from_pretrained.subfolder"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>subfolder</strong> (<code>str</code>, <em>optional</em>) &#x2014; In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for facebook/rag-token-base), specify it here.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.from_pretrained.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.from_pretrained.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the Tokenizer <code>__init__</code> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.from_pretrained.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.from_pretrained.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the Tokenizer <code>__init__</code> method. Can be used to set special tokens like <code>bos_token</code>, <code>eos_token</code>, <code>unk_token</code>, <code>sep_token</code>, <code>pad_token</code>, <code>cls_token</code>, <code>mask_token</code>, <code>additional_special_tokens</code>. See parameters in the <code>__init__</code> for more details.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Instantiate a <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase">PreTrainedTokenizerBase</a> (or a derived class) from a predefined tokenizer.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Passing <code>use_auth_token=True</code> is required when you want to use a private model.</p></div> <div class="relative group rounded-md"><a id="transformers.PreTrainedTokenizerBase.from_pretrained.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.from_pretrained.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment"># We can&#x27;t instantiate directly the base class *PreTrainedTokenizerBase* so let&#x27;s show our examples on a derived class: BertTokenizer</span> <span class="hljs-comment"># Download vocabulary from huggingface.co and cache.</span> tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) <span class="hljs-comment"># Download vocabulary from huggingface.co (user-uploaded) and cache.</span> tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;dbmdz/bert-base-german-cased&quot;</span>) <span class="hljs-comment"># If vocabulary files are in a directory (e.g. tokenizer was saved using *save_pretrained(&#x27;./test/saved_model/&#x27;)*)</span> tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;./test/saved_model/&quot;</span>) <span class="hljs-comment"># If the tokenizer uses a single vocabulary file, you can point directly to this file</span> tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;./test/saved_model/my_vocab.txt&quot;</span>) <span class="hljs-comment"># You can link tokens to special vocabulary when instantiating</span> tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>, unk_token=<span class="hljs-string">&quot;&lt;unk&gt;&quot;</span>) <span class="hljs-comment"># You should be sure &#x27;&lt;unk&gt;&#x27; is in the vocabulary when doing that.</span> <span class="hljs-comment"># Otherwise use tokenizer.add_special_tokens({&#x27;unk_token&#x27;: &#x27;&lt;unk&gt;&#x27;}) instead)</span> <span class="hljs-keyword">assert</span> tokenizer.unk_token == <span class="hljs-string">&quot;&lt;unk&gt;&quot;</span><!-- HTML_TAG_END --></pre></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.get_special_tokens_mask"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_special_tokens_mask</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.get_special_tokens_mask" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.get_special_tokens_mask"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3448" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">already_has_special_tokens<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A list of integers in the range [0, 1]</span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.get_special_tokens_mask.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.get_special_tokens_mask.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of ids of the first sequence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.get_special_tokens_mask.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.get_special_tokens_mask.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; List of ids of the second sequence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.get_special_tokens_mask.already_has_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.get_special_tokens_mask.already_has_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizerBase.get_special_tokens_mask.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A list of integers in the range [0, 1]</p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>1 for a special token, 0 for a sequence token.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer <code>prepare_for_model</code> or <code>encode_plus</code> methods.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.get_vocab"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_vocab</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.get_vocab" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.get_vocab"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L1558" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Dict[str, int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div id="transformers.PreTrainedTokenizerBase.get_vocab.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Dict[str, int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The vocabulary.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Returns the vocabulary as a dictionary of token to index.</p> <p><code>tokenizer.get_vocab()[token]</code> is equivalent to <code>tokenizer.convert_tokens_to_ids(token)</code> when <code>token</code> is in the vocab.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.pad"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>pad</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.pad" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.pad"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L2810" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoded_inputs<span class="opacity-60">: typing.Union[transformers.tokenization_utils_base.BatchEncoding, typing.List[transformers.tokenization_utils_base.BatchEncoding], typing.Dict[str, typing.List[int]], typing.Dict[str, typing.List[typing.List[int]]], typing.List[typing.Dict[str, typing.List[int]]]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.utils.generic.PaddingStrategy] = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_attention_mask<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.utils.generic.TensorType, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">verbose<span class="opacity-60">: bool = True</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.pad.encoded_inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.pad.encoded_inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoded_inputs</strong> (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding">BatchEncoding</a>, list of <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding">BatchEncoding</a>, <code>Dict[str, List[int]]</code>, <code>Dict[str, List[List[int]]</code> or <code>List[Dict[str, List[int]]]</code>) &#x2014; Tokenized inputs. Can represent one input (<a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding">BatchEncoding</a> or <code>Dict[str, List[int]]</code>) or a batch of tokenized inputs (list of <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding">BatchEncoding</a>, <em>Dict[str, List[List[int]]]</em> or <em>List[Dict[str, List[int]]]</em>) so you can use this method during preprocessing as well as in a PyTorch Dataloader collate function.</p> <p>Instead of <code>List[int]</code> you can have tensors (numpy arrays, PyTorch tensors or TensorFlow tensors), see the note above for the return type.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.pad.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.pad.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Select a strategy to pad the returned sequences (according to the model&#x2019;s padding side and padding index) among:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.pad.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.pad.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Maximum length of the returned list and optionally padding length (see above).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.pad.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.pad.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value.</p> <p>This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability</p> <blockquote> <p>= 7.5 (Volta).</p> </blockquote><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.pad.return_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.pad.return_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.pad.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.pad.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.pad.verbose" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.pad.verbose"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length in the batch.</p> <p>Padding side (left/right) padding token ids are defined at the tokenizer level (with <code>self.padding_side</code>, <code>self.pad_token_id</code> and <code>self.pad_token_type_id</code>).</p> <p>Please note that with a fast tokenizer, using the <code>__call__</code> method is faster than using a method to encode the text followed by a call to the <code>pad</code> method to get a padded encoding.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If the <code>encoded_inputs</code> passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the result will use the same type unless you provide a different tensor type with <code>return_tensors</code>. In the case of PyTorch tensors, you will lose the specific device of your tensors however.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.prepare_for_model"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>prepare_for_model</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.prepare_for_model" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.prepare_for_model"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3023" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ids<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pair_ids<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_special_tokens<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.utils.generic.PaddingStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">truncation<span class="opacity-60">: typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stride<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.utils.generic.TensorType, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_token_type_ids<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_attention_mask<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_overflowing_tokens<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_special_tokens_mask<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_offsets_mapping<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_length<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">verbose<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">prepend_batch_axis<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ids</strong> (<code>List[int]</code>) &#x2014; Tokenized input ids of the first sequence. Can be obtained from a string by chaining the <code>tokenize</code> and <code>convert_tokens_to_ids</code> methods.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.pair_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.pair_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pair_ids</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Tokenized input ids of the second sequence. Can be obtained from a string by chaining the <code>tokenize</code> and <code>convert_tokens_to_ids</code> methods.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.add_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.add_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.is_split_into_words" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.is_split_into_words"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.return_token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.return_token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.return_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.return_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.return_overflowing_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.return_overflowing_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.return_special_tokens_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.return_special_tokens_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.return_offsets_mapping" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.return_offsets_mapping"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.return_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.return_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.verbose" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.verbose"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizerBase.prepare_for_model.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> — List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> — List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>“token_type_ids”</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> — List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>“attention_mask”</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>overflowing_tokens</strong> — List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> — Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> — List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> — The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> <!-- HTML_TAG_END --></p> </div></div> <p>Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Please Note, for <em>pair_ids</em> different than <code>None</code> and <em>truncation_strategy = longest_first</em> or <code>True</code>, it is not possible to return overflowing tokens. Such a combination of arguments will raise an error.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>prepare_seq2seq_batch</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3579" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">src_texts<span class="opacity-60">: typing.List[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tgt_texts<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_target_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: str = &#39;longest&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: str = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">truncation<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.src_texts" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.src_texts"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>src_texts</strong> (<code>List[str]</code>) &#x2014; List of documents to summarize or source language texts.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.tgt_texts" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.tgt_texts"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tgt_texts</strong> (<code>list</code>, <em>optional</em>) &#x2014; List of summaries or target language texts.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length for encoder inputs (documents to summarize or source language texts) If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.max_target_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.max_target_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_target_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length of decoder inputs (target language texts or summaries) If left unset or set to <code>None</code>, this will use the max_length value.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). **kwargs &#x2014; Additional keyword arguments passed along to <code>self.__call__</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li><strong>input_ids</strong> — List of token ids to be fed to the encoder.</li> <li><strong>attention_mask</strong> — List of indices specifying which tokens should be attended to by the model.</li> <li><strong>labels</strong> — List of token ids for tgt_texts.</li> </ul> <p>The full set of keys <code>[input_ids, attention_mask, labels]</code>, will only be returned if tgt_texts is passed. Otherwise, input_ids, attention_mask will be the only keys.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Prepare model inputs for translation. For best performance, translate one sentence at a time.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.push_to_hub"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>push_to_hub</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.push_to_hub" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.push_to_hub"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/utils/hub.py#L712" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repo_id<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_temp_dir<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">commit_message<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">private<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_auth_token<span class="opacity-60">: typing.Union[bool, str, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_shard_size<span class="opacity-60">: typing.Union[int, str, NoneType] = &#39;10GB&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">create_pr<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**deprecated_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.push_to_hub.repo_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.push_to_hub.repo_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repo_id</strong> (<code>str</code>) &#x2014; The name of the repository you want to push your tokenizer to. It should contain your organization name when pushing to a given organization.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.push_to_hub.use_temp_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.push_to_hub.use_temp_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to use a temporary directory to store the files saved before they are pushed to the Hub. Will default to <code>True</code> if there is no directory named like <code>repo_id</code>, <code>False</code> otherwise.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.push_to_hub.commit_message" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.push_to_hub.commit_message"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;Upload tokenizer&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.push_to_hub.private" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.push_to_hub.private"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.push_to_hub.use_auth_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.push_to_hub.use_auth_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>huggingface-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.push_to_hub.max_shard_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.push_to_hub.max_shard_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_shard_size</strong> (<code>int</code> or <code>str</code>, <em>optional</em>, defaults to <code>&quot;10GB&quot;</code>) &#x2014; Only applicable for models. The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like <code>&quot;5MB&quot;</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.push_to_hub.create_pr" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.push_to_hub.create_pr"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>create_pr</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to create a PR with the uploaded files or directly commit.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Upload the tokenizer files to the 🤗 Model Hub while synchronizing a local clone of the repo in <code>repo_path_or_name</code>.</p> <div class="relative group rounded-md"><a id="transformers.PreTrainedTokenizerBase.push_to_hub.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.push_to_hub.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the tokenizer to your namespace with the name &quot;my-finetuned-bert&quot;.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the tokenizer to an organization with the name &quot;my-finetuned-bert&quot;.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;huggingface/my-finetuned-bert&quot;</span>)<!-- HTML_TAG_END --></pre></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.register_for_auto_class"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>register_for_auto_class</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.register_for_auto_class" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.register_for_auto_class"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3553" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">auto_class<span class="opacity-60"> = &#39;AutoTokenizer&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.register_for_auto_class.auto_class" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.register_for_auto_class.auto_class"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>auto_class</strong> (<code>str</code> or <code>type</code>, <em>optional</em>, defaults to <code>&quot;AutoTokenizer&quot;</code>) &#x2014; The auto class to register this new tokenizer with.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Register this class with a given auto class. This should only be used for custom tokenizers as the ones in the library are already mapped with <code>AutoTokenizer</code>.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>This API is experimental and may have some slight breaking changes in the next releases.</p></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.save_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.save_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.save_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L2020" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_directory<span class="opacity-60">: typing.Union[str, os.PathLike]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">legacy_format<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filename_prefix<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A tuple of <code>str</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.save_pretrained.save_directory" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.save_pretrained.save_directory"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; The path to a directory where the tokenizer will be saved.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.save_pretrained.legacy_format" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.save_pretrained.legacy_format"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>legacy_format</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Only applicable for a fast tokenizer. If unset (default), will save the tokenizer in the unified JSON format as well as in legacy format if it exists, i.e. with tokenizer specific vocabulary and a separate added_tokens files.</p> <p>If <code>False</code>, will only save the tokenizer in the unified JSON format. This format is incompatible with &#x201C;slow&#x201D; tokenizers (not powered by the <em>tokenizers</em> library), so the tokenizer will not be able to be loaded in the corresponding &#x201C;slow&#x201D; tokenizer.</p> <p>If <code>True</code>, will save the tokenizer in legacy format. If the &#x201C;slow&#x201D; tokenizer doesn&#x2019;t exits, a value error is raised. filename_prefix &#x2014; (<code>str</code>, <em>optional</em>): A prefix to add to the names of the files saved by the tokenizer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.save_pretrained.push_to_hub" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.save_pretrained.push_to_hub"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with <code>repo_id</code> (will default to the name of <code>save_directory</code> in your namespace). kwargs &#x2014; Additional key word arguments passed along to the <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.push_to_hub">push_to_hub()</a> method.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizerBase.save_pretrained.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A tuple of <code>str</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The files saved.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Save the full tokenizer state.</p> <p>This method make sure the full tokenizer can then be re-loaded using the <code>~tokenization_utils_base.PreTrainedTokenizer.from_pretrained</code> class method..</p> <p>Warning,None This won’t save modifications you may have applied to the tokenizer after the instantiation (for instance, modifying <code>tokenizer.do_lower_case</code> after creation).</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.save_vocabulary"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_vocabulary</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.save_vocabulary" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.save_vocabulary"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L2182" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_directory<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filename_prefix<span class="opacity-60">: typing.Optional[str] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Tuple(str)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.save_vocabulary.save_directory" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.save_vocabulary.save_directory"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_directory</strong> (<code>str</code>) &#x2014; The directory in which to save the vocabulary.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.save_vocabulary.filename_prefix" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.save_vocabulary.filename_prefix"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>filename_prefix</strong> (<code>str</code>, <em>optional</em>) &#x2014; An optional prefix to add to the named of the saved files.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizerBase.save_vocabulary.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Tuple(str)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Paths to the files saved.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Save only the vocabulary of the tokenizer (vocabulary + added tokens).</p> <p>This method won’t save the configuration and special token mappings of the tokenizer. Use <code>_save_pretrained()</code> to save the whole state of the tokenizer.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.tokenize"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>tokenize</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.tokenize" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.tokenize"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L2200" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pair<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_special_tokens<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[str]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.tokenize.text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.tokenize.text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text</strong> (<code>str</code>) &#x2014; The sequence to be encoded.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.tokenize.pair" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.tokenize.pair"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pair</strong> (<code>str</code>, <em>optional</em>) &#x2014; A second sequence to be encoded with the first.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.tokenize.add_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.tokenize.add_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to add the special tokens associated with the corresponding model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.tokenize.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.tokenize.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific encode method. See details in <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__"><strong>call</strong>()</a><!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizerBase.tokenize.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[str]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The list of tokens.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Converts a string in a sequence of tokens, replacing unknown tokens with the <code>unk_token</code>.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.truncate_sequences"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>truncate_sequences</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.truncate_sequences" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.truncate_sequences"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L3159" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ids<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pair_ids<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_tokens_to_remove<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">truncation_strategy<span class="opacity-60">: typing.Union[str, transformers.tokenization_utils_base.TruncationStrategy] = &#39;longest_first&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stride<span class="opacity-60">: int = 0</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Tuple[List[int], List[int], List[int]]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.truncate_sequences.ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.truncate_sequences.ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ids</strong> (<code>List[int]</code>) &#x2014; Tokenized input ids of the first sequence. Can be obtained from a string by chaining the <code>tokenize</code> and <code>convert_tokens_to_ids</code> methods.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.truncate_sequences.pair_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.truncate_sequences.pair_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pair_ids</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Tokenized input ids of the second sequence. Can be obtained from a string by chaining the <code>tokenize</code> and <code>convert_tokens_to_ids</code> methods.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.truncate_sequences.num_tokens_to_remove" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.truncate_sequences.num_tokens_to_remove"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_tokens_to_remove</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Number of tokens to remove using the truncation strategy.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.truncate_sequences.truncation_strategy" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.truncate_sequences.truncation_strategy"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; The strategy to follow for truncation. Can be:</p> <ul> <li><code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.truncate_sequences.stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.truncate_sequences.stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a positive number, the overflowing tokens returned will contain some tokens from the main sequence returned. The value of this argument defines the number of additional tokens.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.PreTrainedTokenizerBase.truncate_sequences.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Tuple[List[int], List[int], List[int]]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The truncated <code>ids</code>, the truncated <code>pair_ids</code> and the list of overflowing tokens. Note: The <em>longest_first</em> strategy returns empty list of overflowing tokens if a pair of sequences (or a batch of pairs) is provided.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Truncates a sequence pair in-place following the strategy.</p></div></div> <h2 class="relative group"><a id="transformers.SpecialTokensMixin" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpecialTokensMixin"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>SpecialTokensMixin </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SpecialTokensMixin"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">SpecialTokensMixin</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.SpecialTokensMixin" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SpecialTokensMixin"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L763" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">verbose<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SpecialTokensMixin.bos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpecialTokensMixin.bos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the beginning of a sentence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SpecialTokensMixin.eos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpecialTokensMixin.eos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the end of a sentence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SpecialTokensMixin.unk_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpecialTokensMixin.unk_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>unk_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing an out-of-vocabulary token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SpecialTokensMixin.sep_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpecialTokensMixin.sep_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sep_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token separating two different sentences in the same input (used by BERT for instance).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SpecialTokensMixin.pad_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpecialTokensMixin.pad_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SpecialTokensMixin.cls_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpecialTokensMixin.cls_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the class of the input (used by BERT for instance).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SpecialTokensMixin.mask_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpecialTokensMixin.mask_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing a masked token (used by masked-language modeling pretraining objectives, like BERT).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SpecialTokensMixin.additional_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpecialTokensMixin.additional_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>additional_special_tokens</strong> (tuple or list of <code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A tuple or a list of additional special tokens.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>A mixin derived by <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> and <a href="/docs/transformers/pr_19429/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a> to handle specific behaviors related to special tokens. In particular, this class hold the attributes which can be used to directly access these special tokens in a model-independent manner and allow to set and update the special tokens.</p> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SpecialTokensMixin.add_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>add_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.SpecialTokensMixin.add_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SpecialTokensMixin.add_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L843" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">special_tokens_dict<span class="opacity-60">: typing.Dict[str, typing.Union[str, tokenizers.AddedToken]]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SpecialTokensMixin.add_special_tokens.special_tokens_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpecialTokensMixin.add_special_tokens.special_tokens_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>special_tokens_dict</strong> (dictionary <em>str</em> to <em>str</em> or <code>tokenizers.AddedToken</code>) &#x2014; Keys should be in the list of predefined special attributes: [<code>bos_token</code>, <code>eos_token</code>, <code>unk_token</code>, <code>sep_token</code>, <code>pad_token</code>, <code>cls_token</code>, <code>mask_token</code>, <code>additional_special_tokens</code>].</p> <p>Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the <code>unk_token</code> to them).<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.SpecialTokensMixin.add_special_tokens.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Number of tokens added to the vocabulary.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Add a dictionary of special tokens (eos, pad, cls, etc.) to the encoder and link them to class attributes. If special tokens are NOT in the vocabulary, they are added to it (indexed starting from the last index of the current vocabulary).</p> <p>Note,None When adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of the model so that its embedding matrix matches the tokenizer.</p> <p>In order to do that, please use the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.resize_token_embeddings">resize_token_embeddings()</a> method.</p> <p>Using <code>add_special_tokens</code> will ensure your special tokens can be used in several ways:</p> <ul><li>Special tokens are carefully handled by the tokenizer (they are never split).</li> <li>You can easily refer to special tokens using tokenizer class attributes like <code>tokenizer.cls_token</code>. This makes it easy to develop model-agnostic training and fine-tuning scripts.</li></ul> <p>When possible, special tokens are already registered for provided pretrained models (for instance <a href="/docs/transformers/pr_19429/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a> <code>cls_token</code> is already registered to be :obj<em>’[CLS]’</em> and XLM’s one is also registered to be <code>&#39;&lt;/s&gt;&#39;</code>).</p> <div class="relative group rounded-md"><a id="transformers.SpecialTokensMixin.add_special_tokens.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpecialTokensMixin.add_special_tokens.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment"># Let&#x27;s see how to add a new classification token to GPT-2</span> tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) model = GPT2Model.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) special_tokens_dict = {<span class="hljs-string">&quot;cls_token&quot;</span>: <span class="hljs-string">&quot;&lt;CLS&gt;&quot;</span>} num_added_toks = tokenizer.add_special_tokens(special_tokens_dict) <span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;We have added&quot;</span>, num_added_toks, <span class="hljs-string">&quot;tokens&quot;</span>) <span class="hljs-comment"># Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer.</span> model.resize_token_embeddings(<span class="hljs-built_in">len</span>(tokenizer)) <span class="hljs-keyword">assert</span> tokenizer.cls_token == <span class="hljs-string">&quot;&lt;CLS&gt;&quot;</span><!-- HTML_TAG_END --></pre></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SpecialTokensMixin.add_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>add_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.SpecialTokensMixin.add_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SpecialTokensMixin.add_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L915" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">new_tokens<span class="opacity-60">: typing.Union[str, tokenizers.AddedToken, typing.List[typing.Union[str, tokenizers.AddedToken]]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">special_tokens<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SpecialTokensMixin.add_tokens.new_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpecialTokensMixin.add_tokens.new_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>new_tokens</strong> (<code>str</code>, <code>tokenizers.AddedToken</code> or a list of <em>str</em> or <code>tokenizers.AddedToken</code>) &#x2014; Tokens are only added if they are not already in the vocabulary. <code>tokenizers.AddedToken</code> wraps a string token to let you personalize its behavior: whether this token should only match against a single word, whether this token should strip all potential whitespaces on the left side, whether this token should strip all potential whitespaces on the right side, etc.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.SpecialTokensMixin.add_tokens.special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpecialTokensMixin.add_tokens.special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Can be used to specify if the token is a special token. This mostly change the normalization behavior (special tokens like CLS or [MASK] are usually not lower-cased for instance).</p> <p>See details for <code>tokenizers.AddedToken</code> in HuggingFace tokenizers library.<!-- HTML_TAG_END --> </span></span> </li></ul> <div id="transformers.SpecialTokensMixin.add_tokens.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Number of tokens added to the vocabulary.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to it with indices starting from length of the current vocabulary and and will be isolated before the tokenization algorithm is applied. Added tokens and tokens from the vocabulary of the tokenization algorithm are therefore not treated in the same way.</p> <p>Note, when adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of the model so that its embedding matrix matches the tokenizer.</p> <p>In order to do that, please use the <a href="/docs/transformers/pr_19429/en/main_classes/model#transformers.PreTrainedModel.resize_token_embeddings">resize_token_embeddings()</a> method.</p> <div class="relative group rounded-md"><a id="transformers.SpecialTokensMixin.add_tokens.example" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpecialTokensMixin.add_tokens.example"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment"># Let&#x27;s see how to increase the vocabulary of Bert model and tokenizer</span> tokenizer = BertTokenizerFast.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) model = BertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) num_added_toks = tokenizer.add_tokens([<span class="hljs-string">&quot;new_tok1&quot;</span>, <span class="hljs-string">&quot;my_new-tok2&quot;</span>]) <span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;We have added&quot;</span>, num_added_toks, <span class="hljs-string">&quot;tokens&quot;</span>) <span class="hljs-comment"># Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer.</span> model.resize_token_embeddings(<span class="hljs-built_in">len</span>(tokenizer))<!-- HTML_TAG_END --></pre></div></div></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SpecialTokensMixin.sanitize_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>sanitize_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.SpecialTokensMixin.sanitize_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SpecialTokensMixin.sanitize_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L831" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div id="transformers.SpecialTokensMixin.sanitize_special_tokens.returns" class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The number of tokens added in the vocabulary during the operation.</p> <!-- HTML_TAG_END --></p> </div></div> <p>Make sure that all the special tokens attributes of the tokenizer (<code>tokenizer.mask_token</code>, <code>tokenizer.cls_token</code>, etc.) are in the vocabulary.</p> <p>Add the missing ones to the vocabulary if needed.</p></div></div> <h2 class="relative group"><a id="transformers.tokenization_utils_base.TruncationStrategy" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.tokenization_utils_base.TruncationStrategy"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Enums and namedtuples </span></h2> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.tokenization_utils_base.TruncationStrategy"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.tokenization_utils_base.</span><span class="font-semibold">TruncationStrategy</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.tokenization_utils_base.TruncationStrategy" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.tokenization_utils_base.TruncationStrategy"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L121" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">value<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">names<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">module<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">qualname<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">type<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start<span class="opacity-60"> = 1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Possible values for the <code>truncation</code> argument in <a href="/docs/transformers/pr_19429/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizerBase.<strong>call</strong>()</a>. Useful for tab-completion in an IDE.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.CharSpan"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">CharSpan</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.CharSpan" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.CharSpan"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L133" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.CharSpan.start" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.CharSpan.start"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start</strong> (<code>int</code>) &#x2014; Index of the first character in the original string.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.CharSpan.end" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.CharSpan.end"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end</strong> (<code>int</code>) &#x2014; Index of the character following the last character in the original string.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Character span in the original string.</p></div> <div class="docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"> <div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TokenSpan"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TokenSpan</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TokenSpan" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TokenSpan"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/vr_19429/src/transformers/tokenization_utils_base.py#L146" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenSpan.start" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenSpan.start"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start</strong> (<code>int</code>) &#x2014; Index of the first token in the span.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3 rounded "><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenSpan.end" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenSpan.end"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end</strong> (<code>int</code>) &#x2014; Index of the token following the last token in the span.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Token span in an encoded string (list of tokens).</p></div> <script type="module" data-hydrate="1xgz5om"> import { start } from "/docs/transformers/pr_19429/en/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="1xgz5om"]').parentNode, paths: {"base":"/docs/transformers/pr_19429/en","assets":"/docs/transformers/pr_19429/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_19429/en/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_19429/en/_app/pages/internal/tokenization_utils.mdx-hf-doc-builder.js") ], params: {} } }); </script>
75
0
hf_public_repos/doc-build-dev/transformers/pr_16143
hf_public_repos/doc-build-dev/transformers/pr_16143/en/migration.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;migrating-from-previous-packages&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;migrating-from-transformers-v3x-to-v4x&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;1-autotokenizers-and-pipelines-now-use-fast-rust-tokenizers-by-default&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;how-to-obtain-the-same-behavior-as-v3x-in-v4x&quot;,&quot;title&quot;:&quot;How to obtain the same behavior as v3.x in v4.x&quot;}],&quot;title&quot;:&quot;1. AutoTokenizers and pipelines now use fast (rust) tokenizers by default.&quot;},{&quot;local&quot;:&quot;2-sentencepiece-is-removed-from-the-required-dependencies&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;how-to-obtain-the-same-behavior-as-v3x-in-v4x&quot;,&quot;title&quot;:&quot;How to obtain the same behavior as v3.x in v4.x&quot;}],&quot;title&quot;:&quot;2. SentencePiece is removed from the required dependencies&quot;},{&quot;local&quot;:&quot;3-the-architecture-of-the-repo-has-been-updated-so-that-each-model-resides-in-its-folder&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;how-to-obtain-the-same-behavior-as-v3x-in-v4x&quot;,&quot;title&quot;:&quot;How to obtain the same behavior as v3.x in v4.x&quot;}],&quot;title&quot;:&quot;3. The architecture of the repo has been updated so that each model resides in its folder&quot;},{&quot;local&quot;:&quot;4-switching-the-returndict-argument-to-true-by-default&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;how-to-obtain-the-same-behavior-as-v3x-in-v4x&quot;,&quot;title&quot;:&quot;How to obtain the same behavior as v3.x in v4.x&quot;}],&quot;title&quot;:&quot;4. Switching the `return_dict` argument to `True` by default&quot;},{&quot;local&quot;:&quot;5-removed-some-deprecated-attributes&quot;,&quot;title&quot;:&quot;5. Removed some deprecated attributes&quot;}],&quot;title&quot;:&quot;Migrating from transformers `v3.x` to `v4.x`&quot;},{&quot;local&quot;:&quot;migrating-from-pytorchtransformers-to-transformers&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;positional-order-of-some-models-keywords-inputs-attentionmask-tokentypeids-changed&quot;,&quot;title&quot;:&quot;Positional order of some models&#39; keywords inputs (`attention_mask`, `token_type_ids`...) changed&quot;}],&quot;title&quot;:&quot;Migrating from pytorch-transformers to 🤗 Transformers&quot;},{&quot;local&quot;:&quot;migrating-from-pytorchpretrainedbert&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;models-always-output-tuples&quot;,&quot;title&quot;:&quot;Models always output `tuples`&quot;},{&quot;local&quot;:&quot;serialization&quot;,&quot;title&quot;:&quot;Serialization&quot;},{&quot;local&quot;:&quot;optimizers-bertadam-openaiadam-are-now-adamw-schedules-are-standard-pytorch-schedules&quot;,&quot;title&quot;:&quot;Optimizers: BertAdam &amp; OpenAIAdam are now AdamW, schedules are standard PyTorch schedules&quot;}],&quot;title&quot;:&quot;Migrating from pytorch-pretrained-bert&quot;}],&quot;title&quot;:&quot;Migrating from previous packages&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/migration.mdx-040dfdaa.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="migrating-from-previous-packages" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#migrating-from-previous-packages"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Migrating from previous packages </span></h1> <h2 class="relative group"><a id="migrating-from-transformers-v3x-to-v4x" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#migrating-from-transformers-v3x-to-v4x"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Migrating from transformers <code>v3.x</code> to <code>v4.x</code></span></h2> <p>A couple of changes were introduced when the switch from version 3 to version 4 was done. Below is a summary of the expected changes:</p> <h4 class="relative group"><a id="1-autotokenizers-and-pipelines-now-use-fast-rust-tokenizers-by-default" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#1-autotokenizers-and-pipelines-now-use-fast-rust-tokenizers-by-default"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>1. AutoTokenizers and pipelines now use fast (rust) tokenizers by default. </span></h4> <p>The python and rust tokenizers have roughly the same API, but the rust tokenizers have a more complete feature set.</p> <p>This introduces two breaking changes:</p> <ul><li>The handling of overflowing tokens between the python and rust tokenizers is different.</li> <li>The rust tokenizers do not accept integers in the encoding methods.</li></ul> <h5 class="relative group"><a id="how-to-obtain-the-same-behavior-as-v3x-in-v4x" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#how-to-obtain-the-same-behavior-as-v3x-in-v4x"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>How to obtain the same behavior as v3.x in v4.x </span></h5> <ul><li>The pipelines now contain additional features out of the box. See the <a href="main_classes/pipelines#transformers.TokenClassificationPipeline">token-classification pipeline with the <code>grouped_entities</code> flag</a>.</li> <li>The auto-tokenizers now return rust tokenizers. In order to obtain the python tokenizers instead, the user may use the <code>use_fast</code> flag by setting it to <code>False</code>:</li></ul> <p>In version <code>v3.x</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>to obtain the same in version <code>v4.x</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>, use_fast=<span class="hljs-literal">False</span>)<!-- HTML_TAG_END --></pre></div> <h4 class="relative group"><a id="2-sentencepiece-is-removed-from-the-required-dependencies" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#2-sentencepiece-is-removed-from-the-required-dependencies"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>2. SentencePiece is removed from the required dependencies </span></h4> <p>The requirement on the SentencePiece dependency has been lifted from the <code>setup.py</code>. This is done so that we may have a channel on anaconda cloud without relying on <code>conda-forge</code>. This means that the tokenizers that depend on the SentencePiece library will not be available with a standard <code>transformers</code> installation.</p> <p>This includes the <strong>slow</strong> versions of:</p> <ul><li><code>XLNetTokenizer</code></li> <li><code>AlbertTokenizer</code></li> <li><code>CamembertTokenizer</code></li> <li><code>MBartTokenizer</code></li> <li><code>PegasusTokenizer</code></li> <li><code>T5Tokenizer</code></li> <li><code>ReformerTokenizer</code></li> <li><code>XLMRobertaTokenizer</code></li></ul> <h5 class="relative group"><a id="how-to-obtain-the-same-behavior-as-v3x-in-v4x" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#how-to-obtain-the-same-behavior-as-v3x-in-v4x"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>How to obtain the same behavior as v3.x in v4.x </span></h5> <p>In order to obtain the same behavior as version <code>v3.x</code>, you should install <code>sentencepiece</code> additionally:</p> <p>In version <code>v3.x</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install transformers<!-- HTML_TAG_END --></pre></div> <p>to obtain the same in version <code>v4.x</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install transformers[sentencepiece]<!-- HTML_TAG_END --></pre></div> <p>or</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install transformers sentencepiece<!-- HTML_TAG_END --></pre></div> <h4 class="relative group"><a id="3-the-architecture-of-the-repo-has-been-updated-so-that-each-model-resides-in-its-folder" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#3-the-architecture-of-the-repo-has-been-updated-so-that-each-model-resides-in-its-folder"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>3. The architecture of the repo has been updated so that each model resides in its folder </span></h4> <p>The past and foreseeable addition of new models means that the number of files in the directory <code>src/transformers</code> keeps growing and becomes harder to navigate and understand. We made the choice to put each model and the files accompanying it in their own sub-directories.</p> <p>This is a breaking change as importing intermediary layers using a model’s module directly needs to be done via a different path.</p> <h5 class="relative group"><a id="how-to-obtain-the-same-behavior-as-v3x-in-v4x" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#how-to-obtain-the-same-behavior-as-v3x-in-v4x"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>How to obtain the same behavior as v3.x in v4.x </span></h5> <p>In order to obtain the same behavior as version <code>v3.x</code>, you should update the path used to access the layers.</p> <p>In version <code>v3.x</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->from transformers.modeling_bert import BertLayer<!-- HTML_TAG_END --></pre></div> <p>to obtain the same in version <code>v4.x</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->from transformers.models.bert.modeling_bert import BertLayer<!-- HTML_TAG_END --></pre></div> <h4 class="relative group"><a id="4-switching-the-returndict-argument-to-true-by-default" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#4-switching-the-returndict-argument-to-true-by-default"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>4. Switching the <code>return_dict</code> argument to <code>True</code> by default </span></h4> <p>The <a href="main_classes/output"><code>return_dict</code> argument</a> enables the return of dict-like python objects containing the model outputs, instead of the standard tuples. This object is self-documented as keys can be used to retrieve values, while also behaving as a tuple as users may retrieve objects by index or by slice.</p> <p>This is a breaking change as the limitation of that tuple is that it cannot be unpacked: <code>value0, value1 = outputs</code> will not work.</p> <h5 class="relative group"><a id="how-to-obtain-the-same-behavior-as-v3x-in-v4x" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#how-to-obtain-the-same-behavior-as-v3x-in-v4x"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>How to obtain the same behavior as v3.x in v4.x </span></h5> <p>In order to obtain the same behavior as version <code>v3.x</code>, you should specify the <code>return_dict</code> argument to <code>False</code>, either in the model configuration or during the forward pass.</p> <p>In version <code>v3.x</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->model = BertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) outputs = model(**inputs)<!-- HTML_TAG_END --></pre></div> <p>to obtain the same in version <code>v4.x</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->model = BertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) outputs = model(**inputs, return_dict=False)<!-- HTML_TAG_END --></pre></div> <p>or</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->model = BertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>, return_dict=False) outputs = model(**inputs)<!-- HTML_TAG_END --></pre></div> <h4 class="relative group"><a id="5-removed-some-deprecated-attributes" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#5-removed-some-deprecated-attributes"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>5. Removed some deprecated attributes </span></h4> <p>Attributes that were deprecated have been removed if they had been deprecated for at least a month. The full list of deprecated attributes can be found in <a href="https://github.com/huggingface/transformers/pull/8604" rel="nofollow">#8604</a>.</p> <p>Here is a list of these attributes/methods/arguments and what their replacements should be:</p> <p>In several models, the labels become consistent with the other models:</p> <ul><li><code>masked_lm_labels</code> becomes <code>labels</code> in <code>AlbertForMaskedLM</code> and <code>AlbertForPreTraining</code>.</li> <li><code>masked_lm_labels</code> becomes <code>labels</code> in <code>BertForMaskedLM</code> and <code>BertForPreTraining</code>.</li> <li><code>masked_lm_labels</code> becomes <code>labels</code> in <code>DistilBertForMaskedLM</code>.</li> <li><code>masked_lm_labels</code> becomes <code>labels</code> in <code>ElectraForMaskedLM</code>.</li> <li><code>masked_lm_labels</code> becomes <code>labels</code> in <code>LongformerForMaskedLM</code>.</li> <li><code>masked_lm_labels</code> becomes <code>labels</code> in <code>MobileBertForMaskedLM</code>.</li> <li><code>masked_lm_labels</code> becomes <code>labels</code> in <code>RobertaForMaskedLM</code>.</li> <li><code>lm_labels</code> becomes <code>labels</code> in <code>BartForConditionalGeneration</code>.</li> <li><code>lm_labels</code> becomes <code>labels</code> in <code>GPT2DoubleHeadsModel</code>.</li> <li><code>lm_labels</code> becomes <code>labels</code> in <code>OpenAIGPTDoubleHeadsModel</code>.</li> <li><code>lm_labels</code> becomes <code>labels</code> in <code>T5ForConditionalGeneration</code>.</li></ul> <p>In several models, the caching mechanism becomes consistent with the other models:</p> <ul><li><code>decoder_cached_states</code> becomes <code>past_key_values</code> in all BART-like, FSMT and T5 models.</li> <li><code>decoder_past_key_values</code> becomes <code>past_key_values</code> in all BART-like, FSMT and T5 models.</li> <li><code>past</code> becomes <code>past_key_values</code> in all CTRL models.</li> <li><code>past</code> becomes <code>past_key_values</code> in all GPT-2 models.</li></ul> <p>Regarding the tokenizer classes:</p> <ul><li>The tokenizer attribute <code>max_len</code> becomes <code>model_max_length</code>.</li> <li>The tokenizer attribute <code>return_lengths</code> becomes <code>return_length</code>.</li> <li>The tokenizer encoding argument <code>is_pretokenized</code> becomes <code>is_split_into_words</code>.</li></ul> <p>Regarding the <code>Trainer</code> class:</p> <ul><li>The <code>Trainer</code> argument <code>tb_writer</code> is removed in favor of the callback <code>TensorBoardCallback(tb_writer=...)</code>.</li> <li>The <code>Trainer</code> argument <code>prediction_loss_only</code> is removed in favor of the class argument <code>args.prediction_loss_only</code>.</li> <li>The <code>Trainer</code> attribute <code>data_collator</code> should be a callable.</li> <li>The <code>Trainer</code> method <code>_log</code> is deprecated in favor of <code>log</code>.</li> <li>The <code>Trainer</code> method <code>_training_step</code> is deprecated in favor of <code>training_step</code>.</li> <li>The <code>Trainer</code> method <code>_prediction_loop</code> is deprecated in favor of <code>prediction_loop</code>.</li> <li>The <code>Trainer</code> method <code>is_local_master</code> is deprecated in favor of <code>is_local_process_zero</code>.</li> <li>The <code>Trainer</code> method <code>is_world_master</code> is deprecated in favor of <code>is_world_process_zero</code>.</li></ul> <p>Regarding the <code>TFTrainer</code> class:</p> <ul><li>The <code>TFTrainer</code> argument <code>prediction_loss_only</code> is removed in favor of the class argument <code>args.prediction_loss_only</code>.</li> <li>The <code>Trainer</code> method <code>_log</code> is deprecated in favor of <code>log</code>.</li> <li>The <code>TFTrainer</code> method <code>_prediction_loop</code> is deprecated in favor of <code>prediction_loop</code>.</li> <li>The <code>TFTrainer</code> method <code>_setup_wandb</code> is deprecated in favor of <code>setup_wandb</code>.</li> <li>The <code>TFTrainer</code> method <code>_run_model</code> is deprecated in favor of <code>run_model</code>.</li></ul> <p>Regarding the <code>TrainingArguments</code> class:</p> <ul><li>The <code>TrainingArguments</code> argument <code>evaluate_during_training</code> is deprecated in favor of <code>evaluation_strategy</code>.</li></ul> <p>Regarding the Transfo-XL model:</p> <ul><li>The Transfo-XL configuration attribute <code>tie_weight</code> becomes <code>tie_words_embeddings</code>.</li> <li>The Transfo-XL modeling method <code>reset_length</code> becomes <code>reset_memory_length</code>.</li></ul> <p>Regarding pipelines:</p> <ul><li>The <code>FillMaskPipeline</code> argument <code>topk</code> becomes <code>top_k</code>.</li></ul> <h2 class="relative group"><a id="migrating-from-pytorchtransformers-to-transformers" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#migrating-from-pytorchtransformers-to-transformers"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Migrating from pytorch-transformers to 🤗 Transformers </span></h2> <p>Here is a quick summary of what you should take care of when migrating from <code>pytorch-transformers</code> to 🤗 Transformers.</p> <h3 class="relative group"><a id="positional-order-of-some-models-keywords-inputs-attentionmask-tokentypeids-changed" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#positional-order-of-some-models-keywords-inputs-attentionmask-tokentypeids-changed"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Positional order of some models&#39; keywords inputs (<code>attention_mask</code>, <code>token_type_ids</code>...) changed </span></h3> <p>To be able to use Torchscript (see #1010, #1204 and #1195) the specific order of some models <strong>keywords inputs</strong> (<code>attention_mask</code>, <code>token_type_ids</code>…) has been changed.</p> <p>If you used to call the models with keyword names for keyword arguments, e.g. <code>model(inputs_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)</code>, this should not cause any change.</p> <p>If you used to call the models with positional inputs for keyword arguments, e.g. <code>model(inputs_ids, attention_mask, token_type_ids)</code>, you may have to double check the exact order of input arguments.</p> <h2 class="relative group"><a id="migrating-from-pytorchpretrainedbert" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#migrating-from-pytorchpretrainedbert"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Migrating from pytorch-pretrained-bert </span></h2> <p>Here is a quick summary of what you should take care of when migrating from <code>pytorch-pretrained-bert</code> to 🤗 Transformers</p> <h3 class="relative group"><a id="models-always-output-tuples" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#models-always-output-tuples"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Models always output <code>tuples</code></span></h3> <p>The main breaking change when migrating from <code>pytorch-pretrained-bert</code> to 🤗 Transformers is that the models forward method always outputs a <code>tuple</code> with various elements depending on the model and the configuration parameters.</p> <p>The exact content of the tuples for each model are detailed in the models’ docstrings and the <a href="https://huggingface.co/transformers/" rel="nofollow">documentation</a>.</p> <p>In pretty much every case, you will be fine by taking the first element of the output as the output you previously used in <code>pytorch-pretrained-bert</code>.</p> <p>Here is a <code>pytorch-pretrained-bert</code> to 🤗 Transformers conversion example for a <code>BertForSequenceClassification</code> classification model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment"># Let&#x27;s load our model</span> model = BertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) <span class="hljs-comment"># If you used to have this line in pytorch-pretrained-bert:</span> loss = model(input_ids, labels=labels) <span class="hljs-comment"># Now just use this line in 🤗 Transformers to extract the loss from the output tuple:</span> outputs = model(input_ids, labels=labels) loss = outputs[<span class="hljs-number">0</span>] <span class="hljs-comment"># In 🤗 Transformers you can also have access to the logits:</span> loss, logits = outputs[:<span class="hljs-number">2</span>] <span class="hljs-comment"># And even the attention weights if you configure the model to output them (and other outputs too, see the docstrings and documentation)</span> model = BertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>, output_attentions=<span class="hljs-literal">True</span>) outputs = model(input_ids, labels=labels) loss, logits, attentions = outputs<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="serialization" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#serialization"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Serialization </span></h3> <p>Breaking change in the <code>from_pretrained()</code>method:</p> <ol><li><p>Models are now set in evaluation mode by default when instantiated with the <code>from_pretrained()</code> method. To train them don’t forget to set them back in training mode (<code>model.train()</code>) to activate the dropout modules.</p></li> <li><p>The additional <code>*inputs</code> and <code>**kwargs</code> arguments supplied to the <code>from_pretrained()</code> method used to be directly passed to the underlying model’s class <code>__init__()</code> method. They are now used to update the model configuration attribute first which can break derived model classes build based on the previous <code>BertForSequenceClassification</code> examples. More precisely, the positional arguments <code>*inputs</code> provided to <code>from_pretrained()</code> are directly forwarded the model <code>__init__()</code> method while the keyword arguments <code>**kwargs</code> (i) which match configuration class attributes are used to update said attributes (ii) which don’t match any configuration class attributes are forwarded to the model <code>__init__()</code> method.</p></li></ol> <p>Also, while not a breaking change, the serialization methods have been standardized and you probably should switch to the new method <code>save_pretrained(save_directory)</code> if you were using any other serialization method before.</p> <p>Here is an example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment">### Let&#x27;s load a model and tokenizer</span> model = BertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) <span class="hljs-comment">### Do some stuff to our model and tokenizer</span> <span class="hljs-comment"># Ex: add new tokens to the vocabulary and embeddings of our model</span> tokenizer.add_tokens([<span class="hljs-string">&quot;[SPECIAL_TOKEN_1]&quot;</span>, <span class="hljs-string">&quot;[SPECIAL_TOKEN_2]&quot;</span>]) model.resize_token_embeddings(<span class="hljs-built_in">len</span>(tokenizer)) <span class="hljs-comment"># Train our model</span> train(model) <span class="hljs-comment">### Now let&#x27;s save our model and tokenizer to a directory</span> model.save_pretrained(<span class="hljs-string">&quot;./my_saved_model_directory/&quot;</span>) tokenizer.save_pretrained(<span class="hljs-string">&quot;./my_saved_model_directory/&quot;</span>) <span class="hljs-comment">### Reload the model and the tokenizer</span> model = BertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;./my_saved_model_directory/&quot;</span>) tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;./my_saved_model_directory/&quot;</span>)<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="optimizers-bertadam-openaiadam-are-now-adamw-schedules-are-standard-pytorch-schedules" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#optimizers-bertadam-openaiadam-are-now-adamw-schedules-are-standard-pytorch-schedules"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Optimizers: BertAdam &amp; OpenAIAdam are now AdamW, schedules are standard PyTorch schedules </span></h3> <p>The two optimizers previously included, <code>BertAdam</code> and <code>OpenAIAdam</code>, have been replaced by a single <code>AdamW</code> optimizer which has a few differences:</p> <ul><li>it only implements weights decay correction,</li> <li>schedules are now externals (see below),</li> <li>gradient clipping is now also external (see below).</li></ul> <p>The new optimizer <code>AdamW</code> matches PyTorch <code>Adam</code> optimizer API and let you use standard PyTorch or apex methods for the schedule and clipping.</p> <p>The schedules are now standard <a href="https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate" rel="nofollow">PyTorch learning rate schedulers</a> and not part of the optimizer anymore.</p> <p>Here is a conversion examples from <code>BertAdam</code> with a linear warmup and decay schedule to <code>AdamW</code> and the same schedule:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment"># Parameters:</span> lr = <span class="hljs-number">1e-3</span> max_grad_norm = <span class="hljs-number">1.0</span> num_training_steps = <span class="hljs-number">1000</span> num_warmup_steps = <span class="hljs-number">100</span> warmup_proportion = <span class="hljs-built_in">float</span>(num_warmup_steps) / <span class="hljs-built_in">float</span>(num_training_steps) <span class="hljs-comment"># 0.1</span> <span class="hljs-comment">### Previously BertAdam optimizer was instantiated like this:</span> optimizer = BertAdam( model.parameters(), lr=lr, schedule=<span class="hljs-string">&quot;warmup_linear&quot;</span>, warmup=warmup_proportion, num_training_steps=num_training_steps, ) <span class="hljs-comment">### and used like this:</span> <span class="hljs-keyword">for</span> batch <span class="hljs-keyword">in</span> train_data: loss = model(batch) loss.backward() optimizer.step() <span class="hljs-comment">### In 🤗 Transformers, optimizer and schedules are split and instantiated like this:</span> optimizer = AdamW( model.parameters(), lr=lr, correct_bias=<span class="hljs-literal">False</span> ) <span class="hljs-comment"># To reproduce BertAdam specific behavior set correct_bias=False</span> scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps ) <span class="hljs-comment"># PyTorch scheduler</span> <span class="hljs-comment">### and used like this:</span> <span class="hljs-keyword">for</span> batch <span class="hljs-keyword">in</span> train_data: loss = model(batch) loss.backward() torch.nn.utils.clip_grad_norm_( model.parameters(), max_grad_norm ) <span class="hljs-comment"># Gradient clipping is not in AdamW anymore (so you can use amp without issue)</span> optimizer.step() scheduler.step()<!-- HTML_TAG_END --></pre></div> <script type="module" data-hydrate="1kh1foc"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1kh1foc"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/migration.mdx-040dfdaa.js") ], params: {} } }); </script>
76
0
hf_public_repos/doc-build-dev/transformers/pr_16143
hf_public_repos/doc-build-dev/transformers/pr_16143/en/pr_checks.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;checks-on-a-pull-request&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;tests&quot;,&quot;title&quot;:&quot;Tests&quot;},{&quot;local&quot;:&quot;documentation-build&quot;,&quot;title&quot;:&quot;Documentation build&quot;},{&quot;local&quot;:&quot;code-and-documentation-style&quot;,&quot;title&quot;:&quot;Code and documentation style&quot;},{&quot;local&quot;:&quot;repository-consistency&quot;,&quot;title&quot;:&quot;Repository consistency&quot;}],&quot;title&quot;:&quot;Checks on a Pull Request&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/pr_checks.mdx-69a387c3.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="checks-on-a-pull-request" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#checks-on-a-pull-request"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Checks on a Pull Request </span></h1> <p>When you open a pull request on 🤗 Transformers, a fair number of checks will be run to make sure the patch you are adding is not breaking anything existing. Those checks are of four types:</p> <ul><li>regular tests</li> <li>documentation build</li> <li>code and documentation style</li> <li>general repository consistency</li></ul> <p>In this document, we will take a stab at explaining what those various checks are and the reason behind them, as well as how to debug them locally if one of them fails on your PR.</p> <p>Note that they all require you to have a dev install:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install transformers[dev]<!-- HTML_TAG_END --></pre></div> <p>or for an editable install:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install -e .[dev]<!-- HTML_TAG_END --></pre></div> <p>inside the Transformers repo.</p> <h2 class="relative group"><a id="tests" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#tests"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Tests </span></h2> <p>All the jobs that begin with <code>ci/circleci: run_tests_</code> run parts of the Transformers testing suite. Each of those jobs focuses on a part of the library in a certain environment: for instance <code>ci/circleci: run_tests_pipelines_tf</code> runs the pipelines test in an environment where TensorFlow only is installed.</p> <p>Note that to avoid running tests when there is no real change in the modules they are testing, only part of the test suite is run each time: a utility is run to determine the differences in the library between before and after the PR (what GitHub shows you in the “Files changes” tab) and picks the tests impacted by that diff. That utility can be run locally with:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python utils/tests_fetcher.py<!-- HTML_TAG_END --></pre></div> <p>from the root of the Transformers repo. It will:</p> <ol><li>Check for each file in the diff if the changes are in the code or only in comments or docstrings. Only the files with real code changes are kept.</li> <li>Build an internal map that gives for each file of the source code of the library all the files it recursively impacts. Module A is said to impact module B if module B imports module A. For the recursive impact, we need a chain of modules going from module A to module B in which each module imports the previous one.</li> <li>Apply this map on the files gathered in step 1, which gives us the list of model files impacted by the PR.</li> <li>Map each of those files to their corresponding test file(s) and get the list of tests to run.</li></ol> <p>When executing the script locally, you should get the results of step 1, 3 and 4 printed and thus know which tests are run. The script will also create a file named <code>test_list.txt</code> which contains the list of tests to run, and you can run them locally with the following command:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -m pytest -n 8 --dist=loadfile -rA -s $(<span class="hljs-built_in">cat</span> test_list.txt)<!-- HTML_TAG_END --></pre></div> <p>Just in case anything slipped through the cracks, the full test suite is also run daily.</p> <h2 class="relative group"><a id="documentation-build" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#documentation-build"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Documentation build </span></h2> <p>The job <code>ci/circleci: build_doc</code> runs a build of the documentation just to make sure everything will be okay once your PR is merged. If that steps fails, you can inspect it locally by going into the <code>docs</code> folder of the Transformers repo and then typing</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->make html<!-- HTML_TAG_END --></pre></div> <p>Sphinx is not known for its helpful error messages, so you might have to try a few things to really find the source of the error.</p> <h2 class="relative group"><a id="code-and-documentation-style" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#code-and-documentation-style"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Code and documentation style </span></h2> <p>Code formatting is applied to all the source files, the examples and the tests using <code>black</code> and <code>isort</code>. We also have a custom tool taking care of the formatting of docstrings and <code>rst</code> files (<code>utils/style_doc.py</code>), as well as the order of the lazy imports performed in the Transformers <code>__init__.py</code> files (<code>utils/custom_init_isort.py</code>). All of this can be launched by executing</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->make style<!-- HTML_TAG_END --></pre></div> <p>The CI checks those have been applied inside the <code>ci/circleci: check_code_quality</code> check. It also runs <code>flake8</code>, that will have a basic look at your code and will complain if it finds an undefined variable, or one that is not used. To run that check locally, use</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->make quality<!-- HTML_TAG_END --></pre></div> <p>This can take a lot of time, so to run the same thing on only the files you modified in the current branch, run</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->make fixup<!-- HTML_TAG_END --></pre></div> <p>This last command will also run all the additional checks for the repository consistency. Let’s have a look at them.</p> <h2 class="relative group"><a id="repository-consistency" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#repository-consistency"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Repository consistency </span></h2> <p>This regroups all the tests to make sure your PR leaves the repository in a good state, and is performed by the <code>ci/circleci: check_repository_consistency</code> check. You can locally run that check by executing the following:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->make repo-consistency<!-- HTML_TAG_END --></pre></div> <p>This checks that:</p> <ul><li>All objects added to the init are documented (performed by <code>utils/check_repo.py</code>)</li> <li>All <code>__init__.py</code> files have the same content in their two sections (performed by <code>utils/check_inits.py</code>)</li> <li>All code identified as a copy from another module is consistent with the original (performed by <code>utils/check_copies.py</code>)</li> <li>The translations of the READMEs and the index of the doc have the same model list as the main README (performed by <code>utils/check_copies.py</code>)</li> <li>The auto-generated tables in the documentation are up to date (performed by <code>utils/check_table.py</code>)</li> <li>The library has all objects available even if not all optional dependencies are installed (performed by <code>utils/check_dummies.py</code>)</li></ul> <p>Should this check fail, the first two items require manual fixing, the last four can be fixed automatically for you by running the command</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->make fix-copies<!-- HTML_TAG_END --></pre></div> <p>Additional checks concern PRs that add new models, mainly that:</p> <ul><li>All models added are in an Auto-mapping (performed by <code>utils/check_repo.py</code>)</li> <li>All models are properly tested (performed by <code>utils/check_repo.py</code>)</li></ul> <script type="module" data-hydrate="1jdq08m"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1jdq08m"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/pr_checks.mdx-69a387c3.js") ], params: {} } }); </script>
77
0
hf_public_repos/doc-build-dev/transformers/pr_16143
hf_public_repos/doc-build-dev/transformers/pr_16143/en/installation.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;installation&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;install-with-pip&quot;,&quot;title&quot;:&quot;Install with pip&quot;},{&quot;local&quot;:&quot;install-from-source&quot;,&quot;title&quot;:&quot;Install from source&quot;},{&quot;local&quot;:&quot;editable-install&quot;,&quot;title&quot;:&quot;Editable install&quot;},{&quot;local&quot;:&quot;install-with-conda&quot;,&quot;title&quot;:&quot;Install with conda&quot;},{&quot;local&quot;:&quot;cache-setup&quot;,&quot;title&quot;:&quot;Cache setup&quot;},{&quot;local&quot;:&quot;offline-mode&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;fetch-models-and-tokenizers-to-use-offline&quot;,&quot;title&quot;:&quot;Fetch models and tokenizers to use offline&quot;}],&quot;title&quot;:&quot;Offline mode&quot;}],&quot;title&quot;:&quot;Installation&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/installation.mdx-ccd851fb.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="installation" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#installation"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Installation </span></h1> <p>Install 🤗 Transformers for whichever deep learning library you’re working with, setup your cache, and optionally configure 🤗 Transformers to run offline.</p> <p>🤗 Transformers is tested on Python 3.6+, PyTorch 1.1.0+, TensorFlow 2.0+, and Flax. Follow the installation instructions below for the deep learning library you are using:</p> <ul><li><a href="https://pytorch.org/get-started/locally/" rel="nofollow">PyTorch</a> installation instructions.</li> <li><a href="https://www.tensorflow.org/install/pip" rel="nofollow">TensorFlow 2.0</a> installation instructions.</li> <li><a href="https://flax.readthedocs.io/en/latest/" rel="nofollow">Flax</a> installation instructions.</li></ul> <h2 class="relative group"><a id="install-with-pip" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#install-with-pip"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Install with pip </span></h2> <p>You should install 🤗 Transformers in a <a href="https://docs.python.org/3/library/venv.html" rel="nofollow">virtual environment</a>. If you’re unfamiliar with Python virtual environments, take a look at this <a href="https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/" rel="nofollow">guide</a>. A virtual environment makes it easier to manage different projects, and avoid compatibility issues between dependencies.</p> <p>Start by creating a virtual environment in your project directory:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -m venv .<span class="hljs-built_in">env</span><!-- HTML_TAG_END --></pre></div> <p>Activate the virtual environment:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-built_in">source</span> .<span class="hljs-built_in">env</span>/bin/activate<!-- HTML_TAG_END --></pre></div> <p>Now you’re ready to install 🤗 Transformers with the following command:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install transformers<!-- HTML_TAG_END --></pre></div> <p>For CPU-support only, you can conveniently install 🤗 Transformers and a deep learning library in one line. For example, install 🤗 Transformers and PyTorch with:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install transformers[torch]<!-- HTML_TAG_END --></pre></div> <p>🤗 Transformers and TensorFlow 2.0:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install transformers[tf-cpu]<!-- HTML_TAG_END --></pre></div> <p>🤗 Transformers and Flax:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install transformers[flax]<!-- HTML_TAG_END --></pre></div> <p>Finally, check if 🤗 Transformers has been properly installed by running the following command. It will download a pretrained model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -c <span class="hljs-string">&quot;from transformers import pipeline; print(pipeline(&#x27;sentiment-analysis&#x27;)(&#x27;we love you&#x27;))&quot;</span><!-- HTML_TAG_END --></pre></div> <p>Then print out the label and score:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->[{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;POSITIVE&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: 0.9998704791069031}]<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="install-from-source" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#install-from-source"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Install from source </span></h2> <p>Install 🤗 Transformers from source with the following command:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install git+https://github.com/huggingface/transformers<!-- HTML_TAG_END --></pre></div> <p>This command installs the bleeding edge <code>master</code> version rather than the latest <code>stable</code> version. The <code>master</code> version is useful for staying up-to-date with the latest developments. For instance, if a bug has been fixed since the last official release but a new release hasn’t been rolled out yet. However, this means the <code>master</code> version may not always be stable. We strive to keep the <code>master</code> version operational, and most issues are usually resolved within a few hours or a day. If you run into a problem, please open an <a href="https://github.com/huggingface/transformers/issues" rel="nofollow">Issue</a> so we can fix it even sooner!</p> <p>Check if 🤗 Transformers has been properly installed by running the following command:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -c <span class="hljs-string">&quot;from transformers import pipeline; print(pipeline(&#x27;sentiment-analysis&#x27;)(&#x27;I love you&#x27;))&quot;</span><!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="editable-install" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#editable-install"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Editable install </span></h2> <p>You will need an editable install if you’d like to:</p> <ul><li>Use the <code>master</code> version of the source code.</li> <li>Contribute to 🤗 Transformers and need to test changes in the code.</li></ul> <p>Clone the repository and install 🤗 Transformers with the following commands:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->git <span class="hljs-built_in">clone</span> https://github.com/huggingface/transformers.git <span class="hljs-built_in">cd</span> transformers pip install -e .<!-- HTML_TAG_END --></pre></div> <p>These commands will link the folder you cloned the repository to and your Python library paths. Python will now look inside the folder you cloned to in addition to the normal library paths. For example, if your Python packages are typically installed in <code>~/anaconda3/envs/main/lib/python3.7/site-packages/</code>, Python will also search the folder you cloned to: <code>~/transformers/</code>.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>You must keep the <code>transformers</code> folder if you want to keep using the library.</p></div> <p>Now you can easily update your clone to the latest version of 🤗 Transformers with the following command:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-built_in">cd</span> ~/transformers/ git pull<!-- HTML_TAG_END --></pre></div> <p>Your Python environment will find the <code>master</code> version of 🤗 Transformers on the next run.</p> <h2 class="relative group"><a id="install-with-conda" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#install-with-conda"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Install with conda </span></h2> <p>Install from the conda channel <code>huggingface</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->conda install -c huggingface transformers<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="cache-setup" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#cache-setup"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Cache setup </span></h2> <p>Pretrained models are downloaded and locally cached at: <code>~/.cache/huggingface/transformers/</code>. This is the default directory given by the shell environment variable <code>TRANSFORMERS_CACHE</code>. On Windows, the default directory is given by <code>C:\Users\username\.cache\huggingface\transformers</code>. You can change the shell environment variables shown below - in order of priority - to specify a different cache directory:</p> <ol><li>Shell environment variable (default): <code>TRANSFORMERS_CACHE</code>.</li> <li>Shell environment variable: <code>HF_HOME</code> + <code>transformers/</code>.</li> <li>Shell environment variable: <code>XDG_CACHE_HOME</code> + <code>/huggingface/transformers</code>.</li></ol> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>🤗 Transformers will use the shell environment variables <code>PYTORCH_TRANSFORMERS_CACHE</code> or <code>PYTORCH_PRETRAINED_BERT_CACHE</code> if you are coming from an earlier iteration of this library and have set those environment variables, unless you specify the shell environment variable <code>TRANSFORMERS_CACHE</code>.</p></div> <h2 class="relative group"><a id="offline-mode" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#offline-mode"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Offline mode </span></h2> <p>🤗 Transformers is able to run in a firewalled or offline environment by only using local files. Set the environment variable <code>TRANSFORMERS_OFFLINE=1</code> to enable this behavior.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Add <a href="https://huggingface.co/docs/datasets/" rel="nofollow">🤗 Datasets</a> to your offline training workflow by setting the environment variable <code>HF_DATASETS_OFFLINE=1</code>.</p></div> <p>For example, you would typically run a program on a normal network firewalled to external instances with the following command:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python examples/pytorch/translation/run_translation.py --model_name_or_path t5-small --dataset_name wmt16 --dataset_config ro-en ...<!-- HTML_TAG_END --></pre></div> <p>Run this same program in an offline instance with:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \ python examples/pytorch/translation/run_translation.py --model_name_or_path t5-small --dataset_name wmt16 --dataset_config ro-en ...<!-- HTML_TAG_END --></pre></div> <p>The script should now run without hanging or waiting to timeout because it knows it should only look for local files.</p> <h3 class="relative group"><a id="fetch-models-and-tokenizers-to-use-offline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#fetch-models-and-tokenizers-to-use-offline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Fetch models and tokenizers to use offline </span></h3> <p>Another option for using 🤗 Transformers offline is to download the files ahead of time, and then point to their local path when you need to use them offline. There are three ways to do this:</p> <ul><li><p>Download a file through the user interface on the <a href="https://huggingface.co/models" rel="nofollow">Model Hub</a> by clicking on the ↓ icon.</p> <p><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/download-icon.png" alt="download-icon"></p></li> <li><p>Use the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">PreTrainedModel.from_pretrained()</a> and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.save_pretrained">PreTrainedModel.save_pretrained()</a> workflow:</p> <ol><li><p>Download your files ahead of time with <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">PreTrainedModel.from_pretrained()</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForSeq2SeqLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bigscience/T0_3B&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;bigscience/T0_3B&quot;</span>)<!-- HTML_TAG_END --></pre></div></li> <li><p>Save your files to a specified directory with <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.save_pretrained">PreTrainedModel.save_pretrained()</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.save_pretrained(<span class="hljs-string">&quot;./your/path/bigscience_t0&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;./your/path/bigscience_t0&quot;</span>)<!-- HTML_TAG_END --></pre></div></li> <li><p>Now when you’re offline, reload your files with <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">PreTrainedModel.from_pretrained()</a> from the specified directory:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;./your/path/bigscience_t0&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModel.from_pretrained(<span class="hljs-string">&quot;./your/path/bigscience_t0&quot;</span>)<!-- HTML_TAG_END --></pre></div></li></ol></li> <li><p>Programmatically download files with the <a href="https://github.com/huggingface/huggingface_hub/tree/main/src/huggingface_hub" rel="nofollow">huggingface_hub</a> library:</p> <ol><li><p>Install the <code>huggingface_hub</code> library in your virtual environment:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -m pip install huggingface_hub<!-- HTML_TAG_END --></pre></div></li> <li><p>Use the <a href="https://huggingface.co/docs/hub/adding-a-library#download-files-from-the-hub" rel="nofollow"><code>hf_hub_download</code></a> function to download a file to a specific path. For example, the following command downloads the <code>config.json</code> file from the <a href="https://huggingface.co/bigscience/T0_3B" rel="nofollow">T0</a> model to your desired path:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> huggingface_hub <span class="hljs-keyword">import</span> hf_hub_download <span class="hljs-meta">&gt;&gt;&gt; </span>hf_hub_download(repo_id=<span class="hljs-string">&quot;bigscience/T0_3B&quot;</span>, filename=<span class="hljs-string">&quot;config.json&quot;</span>, cache_dir=<span class="hljs-string">&quot;./your/path/bigscience_t0&quot;</span>)<!-- HTML_TAG_END --></pre></div></li></ol></li></ul> <p>Once your file is downloaded and locally cached, specify it’s local path to load and use it:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&quot;./your/path/bigscience_t0/config.json&quot;</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>See the <a href="https://huggingface.co/docs/hub/how-to-downstream" rel="nofollow">How to download files from the Hub</a> section for more details on downloading files stored on the Hub.</p></div> <script type="module" data-hydrate="r5ib8k"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="r5ib8k"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/installation.mdx-ccd851fb.js") ], params: {} } }); </script>
78
0
hf_public_repos/doc-build-dev/transformers/pr_16143
hf_public_repos/doc-build-dev/transformers/pr_16143/en/converting_tensorflow_models.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;converting-tensorflow-checkpoints&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;bert&quot;,&quot;title&quot;:&quot;BERT&quot;},{&quot;local&quot;:&quot;albert&quot;,&quot;title&quot;:&quot;ALBERT&quot;},{&quot;local&quot;:&quot;openai-gpt&quot;,&quot;title&quot;:&quot;OpenAI GPT&quot;},{&quot;local&quot;:&quot;openai-gpt2&quot;,&quot;title&quot;:&quot;OpenAI GPT-2&quot;},{&quot;local&quot;:&quot;transformerxl&quot;,&quot;title&quot;:&quot;Transformer-XL&quot;},{&quot;local&quot;:&quot;xlnet&quot;,&quot;title&quot;:&quot;XLNet&quot;},{&quot;local&quot;:&quot;xlm&quot;,&quot;title&quot;:&quot;XLM&quot;},{&quot;local&quot;:&quot;t5&quot;,&quot;title&quot;:&quot;T5&quot;}],&quot;title&quot;:&quot;Converting Tensorflow Checkpoints&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/converting_tensorflow_models.mdx-9367eeca.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="converting-tensorflow-checkpoints" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#converting-tensorflow-checkpoints"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Converting Tensorflow Checkpoints </span></h1> <p>A command-line interface is provided to convert original Bert/GPT/GPT-2/Transformer-XL/XLNet/XLM checkpoints to models that can be loaded using the <code>from_pretrained</code> methods of the library.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Since 2.3.0 the conversion script is now part of the transformers CLI (<strong>transformers-cli</strong>) available in any transformers &gt;= 2.3.0 installation.</p> <p>The documentation below reflects the <strong>transformers-cli convert</strong> command format.</p></div> <h2 class="relative group"><a id="bert" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#bert"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BERT </span></h2> <p>You can convert any TensorFlow checkpoint for BERT (in particular <a href="https://github.com/google-research/bert#pre-trained-models" rel="nofollow">the pre-trained models released by Google</a>) in a PyTorch save file by using the <a href="https://github.com/huggingface/transformers/tree/master/src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py" rel="nofollow">convert_bert_original_tf_checkpoint_to_pytorch.py</a> script.</p> <p>This CLI takes as input a TensorFlow checkpoint (three files starting with <code>bert_model.ckpt</code>) and the associated configuration file (<code>bert_config.json</code>), and creates a PyTorch model for this configuration, loads the weights from the TensorFlow checkpoint in the PyTorch model and saves the resulting model in a standard PyTorch save file that can be imported using <code>from_pretrained()</code> (see example in <a href="quicktour">quicktour</a> , <a href="https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification/run_glue.py" rel="nofollow">run_glue.py</a> ).</p> <p>You only need to run this conversion script <strong>once</strong> to get a PyTorch model. You can then disregard the TensorFlow checkpoint (the three files starting with <code>bert_model.ckpt</code>) but be sure to keep the configuration file (\ <code>bert_config.json</code>) and the vocabulary file (<code>vocab.txt</code>) as these are needed for the PyTorch model too.</p> <p>To run this specific conversion script you will need to have TensorFlow and PyTorch installed (<code>pip install tensorflow</code>). The rest of the repository only requires PyTorch.</p> <p>Here is an example of the conversion process for a pre-trained <code>BERT-Base Uncased</code> model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-built_in">export</span> BERT_BASE_DIR=/path/to/bert/uncased_L-12_H-768_A-12 transformers-cli convert --model_type bert \ --tf_checkpoint <span class="hljs-variable">$BERT_BASE_DIR</span>/bert_model.ckpt \ --config <span class="hljs-variable">$BERT_BASE_DIR</span>/bert_config.json \ --pytorch_dump_output <span class="hljs-variable">$BERT_BASE_DIR</span>/pytorch_model.bin<!-- HTML_TAG_END --></pre></div> <p>You can download Google’s pre-trained models for the conversion <a href="https://github.com/google-research/bert#pre-trained-models" rel="nofollow">here</a>.</p> <h2 class="relative group"><a id="albert" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#albert"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ALBERT </span></h2> <p>Convert TensorFlow model checkpoints of ALBERT to PyTorch using the <a href="https://github.com/huggingface/transformers/tree/master/src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py" rel="nofollow">convert_albert_original_tf_checkpoint_to_pytorch.py</a> script.</p> <p>The CLI takes as input a TensorFlow checkpoint (three files starting with <code>model.ckpt-best</code>) and the accompanying configuration file (<code>albert_config.json</code>), then creates and saves a PyTorch model. To run this conversion you will need to have TensorFlow and PyTorch installed.</p> <p>Here is an example of the conversion process for the pre-trained <code>ALBERT Base</code> model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-built_in">export</span> ALBERT_BASE_DIR=/path/to/albert/albert_base transformers-cli convert --model_type albert \ --tf_checkpoint <span class="hljs-variable">$ALBERT_BASE_DIR</span>/model.ckpt-best \ --config <span class="hljs-variable">$ALBERT_BASE_DIR</span>/albert_config.json \ --pytorch_dump_output <span class="hljs-variable">$ALBERT_BASE_DIR</span>/pytorch_model.bin<!-- HTML_TAG_END --></pre></div> <p>You can download Google’s pre-trained models for the conversion <a href="https://github.com/google-research/albert#pre-trained-models" rel="nofollow">here</a>.</p> <h2 class="relative group"><a id="openai-gpt" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#openai-gpt"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>OpenAI GPT </span></h2> <p>Here is an example of the conversion process for a pre-trained OpenAI GPT model, assuming that your NumPy checkpoint save as the same format than OpenAI pretrained model (see <a href="https://github.com/openai/finetune-transformer-lm" rel="nofollow">here</a>\ )</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-built_in">export</span> OPENAI_GPT_CHECKPOINT_FOLDER_PATH=/path/to/openai/pretrained/numpy/weights transformers-cli convert --model_type gpt \ --tf_checkpoint <span class="hljs-variable">$OPENAI_GPT_CHECKPOINT_FOLDER_PATH</span> \ --pytorch_dump_output <span class="hljs-variable">$PYTORCH_DUMP_OUTPUT</span> \ [--config OPENAI_GPT_CONFIG] \ [--finetuning_task_name OPENAI_GPT_FINETUNED_TASK] \<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="openai-gpt2" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#openai-gpt2"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>OpenAI GPT-2 </span></h2> <p>Here is an example of the conversion process for a pre-trained OpenAI GPT-2 model (see <a href="https://github.com/openai/gpt-2" rel="nofollow">here</a>)</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-built_in">export</span> OPENAI_GPT2_CHECKPOINT_PATH=/path/to/gpt2/pretrained/weights transformers-cli convert --model_type gpt2 \ --tf_checkpoint <span class="hljs-variable">$OPENAI_GPT2_CHECKPOINT_PATH</span> \ --pytorch_dump_output <span class="hljs-variable">$PYTORCH_DUMP_OUTPUT</span> \ [--config OPENAI_GPT2_CONFIG] \ [--finetuning_task_name OPENAI_GPT2_FINETUNED_TASK]<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="transformerxl" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformerxl"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Transformer-XL </span></h2> <p>Here is an example of the conversion process for a pre-trained Transformer-XL model (see <a href="https://github.com/kimiyoung/transformer-xl/tree/master/tf#obtain-and-evaluate-pretrained-sota-models" rel="nofollow">here</a>)</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-built_in">export</span> TRANSFO_XL_CHECKPOINT_FOLDER_PATH=/path/to/transfo/xl/checkpoint transformers-cli convert --model_type transfo_xl \ --tf_checkpoint <span class="hljs-variable">$TRANSFO_XL_CHECKPOINT_FOLDER_PATH</span> \ --pytorch_dump_output <span class="hljs-variable">$PYTORCH_DUMP_OUTPUT</span> \ [--config TRANSFO_XL_CONFIG] \ [--finetuning_task_name TRANSFO_XL_FINETUNED_TASK]<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="xlnet" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#xlnet"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XLNet </span></h2> <p>Here is an example of the conversion process for a pre-trained XLNet model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-built_in">export</span> TRANSFO_XL_CHECKPOINT_PATH=/path/to/xlnet/checkpoint <span class="hljs-built_in">export</span> TRANSFO_XL_CONFIG_PATH=/path/to/xlnet/config transformers-cli convert --model_type xlnet \ --tf_checkpoint <span class="hljs-variable">$TRANSFO_XL_CHECKPOINT_PATH</span> \ --config <span class="hljs-variable">$TRANSFO_XL_CONFIG_PATH</span> \ --pytorch_dump_output <span class="hljs-variable">$PYTORCH_DUMP_OUTPUT</span> \ [--finetuning_task_name XLNET_FINETUNED_TASK] \<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="xlm" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#xlm"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XLM </span></h2> <p>Here is an example of the conversion process for a pre-trained XLM model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-built_in">export</span> XLM_CHECKPOINT_PATH=/path/to/xlm/checkpoint transformers-cli convert --model_type xlm \ --tf_checkpoint <span class="hljs-variable">$XLM_CHECKPOINT_PATH</span> \ --pytorch_dump_output <span class="hljs-variable">$PYTORCH_DUMP_OUTPUT</span> [--config XML_CONFIG] \ [--finetuning_task_name XML_FINETUNED_TASK]<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="t5" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#t5"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>T5 </span></h2> <p>Here is an example of the conversion process for a pre-trained T5 model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-built_in">export</span> T5=/path/to/t5/uncased_L-12_H-768_A-12 transformers-cli convert --model_type t5 \ --tf_checkpoint <span class="hljs-variable">$T5</span>/t5_model.ckpt \ --config <span class="hljs-variable">$T5</span>/t5_config.json \ --pytorch_dump_output <span class="hljs-variable">$T5</span>/pytorch_model.bin<!-- HTML_TAG_END --></pre></div> <script type="module" data-hydrate="k1exsm"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="k1exsm"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/converting_tensorflow_models.mdx-9367eeca.js") ], params: {} } }); </script>
79
0
hf_public_repos/doc-build-dev/transformers/pr_16143
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_toctree.yml
- sections: - local: index title: 🤗 Transformers - local: quicktour title: Quick tour - local: installation title: Installation - local: philosophy title: Philosophy - local: glossary title: Glossary title: Get started - sections: - local: pipeline_tutorial title: Pipelines for inference - local: autoclass_tutorial title: Load pretrained instances with an AutoClass - local: preprocessing title: Preprocess - local: task_summary title: Summary of the tasks - local: model_summary title: Summary of the models - local: training title: Fine-tuning a pretrained model - local: accelerate title: Distributed training with 🤗 Accelerate - local: model_sharing title: Share a model - local: tokenizer_summary title: Summary of the tokenizers - local: multilingual title: Multi-lingual models title: Tutorials - sections: - local: create_a_model title: Create a custom model - local: multilingual title: Inference for multilingual models - local: troubleshooting title: Troubleshoot - local: custom_datasets title: Fine-tuning with custom datasets - sections: - local: tasks/sequence_classification title: Text classification - local: tasks/token_classification title: Token classification - local: tasks/question_answering title: Question answering - local: tasks/language_modeling title: Language modeling - local: tasks/translation title: Translation - local: tasks/summarization title: Summarization - local: tasks/multiple_choice title: Multiple choice - local: tasks/audio_classification title: Audio classification - local: tasks/asr title: Automatic speech recognition - local: tasks/image_classification title: Image classification title: Fine-tune for downstream tasks - local: run_scripts title: Train with a script - local: notebooks title: "🤗 Transformers Notebooks" - local: sagemaker title: Run training on Amazon SageMaker - local: community title: Community - local: converting_tensorflow_models title: Converting Tensorflow Checkpoints - local: migration title: Migrating from previous packages - local: contributing title: How to contribute to transformers? - local: add_new_model title: "How to add a model to 🤗 Transformers?" - local: add_new_pipeline title: "How to add a pipeline to 🤗 Transformers?" - local: fast_tokenizers title: "Using tokenizers from 🤗 Tokenizers" - local: performance title: 'Performance and Scalability: How To Fit a Bigger Model and Train It Faster' - local: parallelism title: Model Parallelism - local: testing title: Testing - local: debugging title: Debugging - local: serialization title: Exporting 🤗 Transformers models - local: custom_models title: Sharing custom models - local: pr_checks title: Checks on a Pull Request title: How-to guides - sections: - local: bertology title: BERTology - local: perplexity title: Perplexity of fixed-length models - local: benchmarks title: Benchmarks title: Research - sections: - sections: - local: main_classes/callback title: Callbacks - local: main_classes/configuration title: Configuration - local: main_classes/data_collator title: Data Collator - local: main_classes/keras_callbacks title: Keras callbacks - local: main_classes/logging title: Logging - local: main_classes/model title: Models - local: main_classes/text_generation title: Text Generation - local: main_classes/onnx title: ONNX - local: main_classes/optimizer_schedules title: Optimization - local: main_classes/output title: Model outputs - local: main_classes/pipelines title: Pipelines - local: main_classes/processors title: Processors - local: main_classes/tokenizer title: Tokenizer - local: main_classes/trainer title: Trainer - local: main_classes/deepspeed title: DeepSpeed Integration - local: main_classes/feature_extractor title: Feature Extractor title: Main Classes - sections: - local: model_doc/albert title: ALBERT - local: model_doc/auto title: Auto Classes - local: model_doc/bart title: BART - local: model_doc/barthez title: BARThez - local: model_doc/bartpho title: BARTpho - local: model_doc/beit title: BEiT - local: model_doc/bert title: BERT - local: model_doc/bertweet title: Bertweet - local: model_doc/bert-generation title: BertGeneration - local: model_doc/bert-japanese title: BertJapanese - local: model_doc/big_bird title: BigBird - local: model_doc/bigbird_pegasus title: BigBirdPegasus - local: model_doc/blenderbot title: Blenderbot - local: model_doc/blenderbot-small title: Blenderbot Small - local: model_doc/bort title: BORT - local: model_doc/byt5 title: ByT5 - local: model_doc/camembert title: CamemBERT - local: model_doc/canine title: CANINE - local: model_doc/convnext title: ConvNeXT - local: model_doc/clip title: CLIP - local: model_doc/convbert title: ConvBERT - local: model_doc/cpm title: CPM - local: model_doc/ctrl title: CTRL - local: model_doc/data2vec title: Data2Vec - local: model_doc/deberta title: DeBERTa - local: model_doc/deberta-v2 title: DeBERTa-v2 - local: model_doc/deit title: DeiT - local: model_doc/detr title: DETR - local: model_doc/dialogpt title: DialoGPT - local: model_doc/distilbert title: DistilBERT - local: model_doc/dit title: DiT - local: model_doc/dpr title: DPR - local: model_doc/electra title: ELECTRA - local: model_doc/encoder-decoder title: Encoder Decoder Models - local: model_doc/flaubert title: FlauBERT - local: model_doc/fnet title: FNet - local: model_doc/fsmt title: FSMT - local: model_doc/funnel title: Funnel Transformer - local: model_doc/herbert title: HerBERT - local: model_doc/ibert title: I-BERT - local: model_doc/imagegpt title: ImageGPT - local: model_doc/layoutlm title: LayoutLM - local: model_doc/layoutlmv2 title: LayoutLMV2 - local: model_doc/layoutxlm title: LayoutXLM - local: model_doc/led title: LED - local: model_doc/longformer title: Longformer - local: model_doc/luke title: LUKE - local: model_doc/lxmert title: LXMERT - local: model_doc/marian title: MarianMT - local: model_doc/maskformer title: MaskFormer - local: model_doc/m2m_100 title: M2M100 - local: model_doc/mbart title: MBart and MBart-50 - local: model_doc/megatron-bert title: MegatronBERT - local: model_doc/megatron_gpt2 title: MegatronGPT2 - local: model_doc/mluke title: MLUKE - local: model_doc/mobilebert title: MobileBERT - local: model_doc/mluke title: mLUKE - local: model_doc/mpnet title: MPNet - local: model_doc/mt5 title: MT5 - local: model_doc/nystromformer title: Nyströmformer - local: model_doc/openai-gpt title: OpenAI GPT - local: model_doc/gpt2 title: OpenAI GPT2 - local: model_doc/gptj title: GPT-J - local: model_doc/gpt_neo title: GPT Neo - local: model_doc/hubert title: Hubert - local: model_doc/perceiver title: Perceiver - local: model_doc/pegasus title: Pegasus - local: model_doc/phobert title: PhoBERT - local: model_doc/plbart title: PLBart - local: model_doc/poolformer title: PoolFormer - local: model_doc/prophetnet title: ProphetNet - local: model_doc/qdqbert title: QDQBert - local: model_doc/rag title: RAG - local: model_doc/realm title: REALM - local: model_doc/reformer title: Reformer - local: model_doc/rembert title: RemBERT - local: model_doc/retribert title: RetriBERT - local: model_doc/roberta title: RoBERTa - local: model_doc/roformer title: RoFormer - local: model_doc/segformer title: SegFormer - local: model_doc/sew title: SEW - local: model_doc/sew-d title: SEW-D - local: model_doc/speech-encoder-decoder title: Speech Encoder Decoder Models - local: model_doc/speech_to_text title: Speech2Text - local: model_doc/speech_to_text_2 title: Speech2Text2 - local: model_doc/splinter title: Splinter - local: model_doc/squeezebert title: SqueezeBERT - local: model_doc/swin title: Swin Transformer - local: model_doc/t5 title: T5 - local: model_doc/t5v1.1 title: T5v1.1 - local: model_doc/tapas title: TAPAS - local: model_doc/transfo-xl title: Transformer XL - local: model_doc/trocr title: TrOCR - local: model_doc/unispeech title: UniSpeech - local: model_doc/unispeech-sat title: UniSpeech-SAT - local: model_doc/vilt title: ViLT - local: model_doc/vision-encoder-decoder title: Vision Encoder Decoder Models - local: model_doc/vision-text-dual-encoder title: Vision Text Dual Encoder - local: model_doc/vit title: Vision Transformer (ViT) - local: model_doc/vit_mae title: ViTMAE - local: model_doc/visual_bert title: VisualBERT - local: model_doc/wav2vec2 title: Wav2Vec2 - local: model_doc/wav2vec2_phoneme title: Wav2Vec2Phoneme - local: model_doc/wavlm title: WavLM - local: model_doc/xglm title: XGLM - local: model_doc/xlm title: XLM - local: model_doc/xlm-prophetnet title: XLM-ProphetNet - local: model_doc/xlm-roberta title: XLM-RoBERTa - local: model_doc/xlm-roberta-xl title: XLM-RoBERTa-XL - local: model_doc/xlnet title: XLNet - local: model_doc/xlsr_wav2vec2 title: XLSR-Wav2Vec2 - local: model_doc/xls_r title: XLS-R - local: model_doc/yoso title: YOSO title: Models - sections: - local: internal/modeling_utils title: Custom Layers and Utilities - local: internal/pipelines_utils title: Utilities for pipelines - local: internal/tokenization_utils title: Utilities for Tokenizers - local: internal/trainer_utils title: Utilities for Trainer - local: internal/generation_utils title: Utilities for Generation - local: internal/file_utils title: General Utilities title: Internal Helpers title: API
80
0
hf_public_repos/doc-build-dev/transformers/pr_16143
hf_public_repos/doc-build-dev/transformers/pr_16143/en/autoclass_tutorial.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;load-pretrained-instances-with-an-autoclass&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;autotokenizer&quot;,&quot;title&quot;:&quot;AutoTokenizer&quot;},{&quot;local&quot;:&quot;autofeatureextractor&quot;,&quot;title&quot;:&quot;AutoFeatureExtractor&quot;},{&quot;local&quot;:&quot;autoprocessor&quot;,&quot;title&quot;:&quot;AutoProcessor&quot;},{&quot;local&quot;:&quot;automodel&quot;,&quot;title&quot;:&quot;AutoModel&quot;}],&quot;title&quot;:&quot;Load pretrained instances with an AutoClass&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/autoclass_tutorial.mdx-256b03d8.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlockFw-27a176a0.js"> <h1 class="relative group"><a id="load-pretrained-instances-with-an-autoclass" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#load-pretrained-instances-with-an-autoclass"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Load pretrained instances with an AutoClass </span></h1> <p>With so many different Transformer architectures, it can be challenging to create one for your checkpoint. As a part of 🤗 Transformers core philosophy to make the library easy, simple and flexible to use, an <code>AutoClass</code> automatically infer and load the correct architecture from a given checkpoint. The <code>from_pretrained</code> method lets you quickly load a pretrained model for any architecture so you don’t have to devote time and resources to train a model from scratch. Producing this type of checkpoint-agnostic code means if your code works for one checkpoint, it will work with another checkpoint - as long as it was trained for a similar task - even if the architecture is different.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Remember, architecture refers to the skeleton of the model and checkpoints are the weights for a given architecture. For example, <a href="https://huggingface.co/bert-base-uncased" rel="nofollow">BERT</a> is an architecture, while <code>bert-base-uncased</code> is a checkpoint. Model is a general term that can mean either architecture or checkpoint.</p></div> <p>In this tutorial, learn to:</p> <ul><li>Load a pretrained tokenizer.</li> <li>Load a pretrained feature extractor.</li> <li>Load a pretrained processor.</li> <li>Load a pretrained model.</li></ul> <h2 class="relative group"><a id="autotokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#autotokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>AutoTokenizer </span></h2> <p>Nearly every NLP task begins with a tokenizer. A tokenizer converts your input into a format that can be processed by the model.</p> <p>Load a tokenizer with <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoTokenizer.from_pretrained">AutoTokenizer.from_pretrained()</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Then tokenize your input as shown below:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>sequence = <span class="hljs-string">&quot;In a hole in the ground there lived a hobbit.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer(sequence)) {<span class="hljs-string">&#x27;input_ids&#x27;</span>: [<span class="hljs-number">101</span>, <span class="hljs-number">1999</span>, <span class="hljs-number">1037</span>, <span class="hljs-number">4920</span>, <span class="hljs-number">1999</span>, <span class="hljs-number">1996</span>, <span class="hljs-number">2598</span>, <span class="hljs-number">2045</span>, <span class="hljs-number">2973</span>, <span class="hljs-number">1037</span>, <span class="hljs-number">7570</span>, <span class="hljs-number">10322</span>, <span class="hljs-number">4183</span>, <span class="hljs-number">1012</span>, <span class="hljs-number">102</span>], <span class="hljs-string">&#x27;token_type_ids&#x27;</span>: [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], <span class="hljs-string">&#x27;attention_mask&#x27;</span>: [<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>]}<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="autofeatureextractor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#autofeatureextractor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>AutoFeatureExtractor </span></h2> <p>For audio and vision tasks, a feature extractor processes the audio signal or image into the correct input format.</p> <p>Load a feature extractor with <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoFeatureExtractor.from_pretrained">AutoFeatureExtractor.from_pretrained()</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoFeatureExtractor <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = AutoFeatureExtractor.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition&quot;</span> <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="autoprocessor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#autoprocessor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>AutoProcessor </span></h2> <p>Multimodal tasks require a processor that combines two types of preprocessing tools. For example, the <a href="model_doc/layoutlmv2">LayoutLMV2</a> model requires a feature extractor to handle images and a tokenizer to handle text; a processor combines both of them.</p> <p>Load a processor with <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoProcessor.from_pretrained">AutoProcessor.from_pretrained()</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoProcessor <span class="hljs-meta">&gt;&gt;&gt; </span>processor = AutoProcessor.from_pretrained(<span class="hljs-string">&quot;microsoft/layoutlmv2-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="automodel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#automodel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>AutoModel </span></h2> <p>Finally, the <code>AutoModelFor</code> classes let you load a pretrained model for a given task (see <a href="model_doc/auto">here</a> for a complete list of available tasks). For example, load a model for sequence classification with <code>AutoModelForSequenceClassification.from_pretrained()</code></p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="Copy code excerpt to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><div><div class="bg-white leading-none border border-gray-100 rounded-lg inline-flex p-0.5 text-sm mb-4 select-none"><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-l false"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <p class="!m-0 ">Pytorch</p> </button><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-r text-gray-500 filter grayscale"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <p class="!m-0 ">TensorFlow</p> </button></div></div><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Easily reuse the same checkpoint to load an architecture for a different task:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="Copy code excerpt to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><div><div class="bg-white leading-none border border-gray-100 rounded-lg inline-flex p-0.5 text-sm mb-4 select-none"><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-l false"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <p class="!m-0 ">Pytorch</p> </button><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-r text-gray-500 filter grayscale"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <p class="!m-0 ">TensorFlow</p> </button></div></div><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Generally, we recommend using the <code>AutoTokenizer</code> class and the <code>AutoModelFor</code> class to load pretrained instances of models. This will ensure you load the correct architecture every time. In the next <a href="preprocessing">tutorial</a>, learn how to use your newly loaded tokenizer, feature extractor and processor to preprocess a dataset for fine-tuning.</p> <script type="module" data-hydrate="iuk9bn"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="iuk9bn"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/autoclass_tutorial.mdx-256b03d8.js") ], params: {} } }); </script>
81
0
hf_public_repos/doc-build-dev/transformers/pr_16143
hf_public_repos/doc-build-dev/transformers/pr_16143/en/testing.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;testing&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;how-transformers-are-tested&quot;,&quot;title&quot;:&quot;How transformers are tested&quot;},{&quot;local&quot;:&quot;running-tests&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;choosing-which-tests-to-run&quot;,&quot;title&quot;:&quot;Choosing which tests to run&quot;},{&quot;local&quot;:&quot;getting-the-list-of-all-tests&quot;,&quot;title&quot;:&quot;Getting the list of all tests&quot;},{&quot;local&quot;:&quot;run-a-specific-test-module&quot;,&quot;title&quot;:&quot;Run a specific test module&quot;},{&quot;local&quot;:&quot;run-specific-tests&quot;,&quot;title&quot;:&quot;Run specific tests&quot;},{&quot;local&quot;:&quot;run-only-modified-tests&quot;,&quot;title&quot;:&quot;Run only modified tests&quot;},{&quot;local&quot;:&quot;automatically-rerun-failed-tests-on-source-modification&quot;,&quot;title&quot;:&quot;Automatically rerun failed tests on source modification&quot;},{&quot;local&quot;:&quot;skip-a-test-module&quot;,&quot;title&quot;:&quot;Skip a test module&quot;},{&quot;local&quot;:&quot;clearing-state&quot;,&quot;title&quot;:&quot;Clearing state&quot;},{&quot;local&quot;:&quot;running-tests-in-parallel&quot;,&quot;title&quot;:&quot;Running tests in parallel&quot;},{&quot;local&quot;:&quot;test-order-and-repetition&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;repeat-tests&quot;,&quot;title&quot;:&quot;Repeat tests&quot;},{&quot;local&quot;:&quot;run-tests-in-a-random-order&quot;,&quot;title&quot;:&quot;Run tests in a random order&quot;}],&quot;title&quot;:&quot;Test order and repetition&quot;},{&quot;local&quot;:&quot;look-and-feel-variations&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;pytestsugar&quot;,&quot;title&quot;:&quot;pytest-sugar&quot;},{&quot;local&quot;:&quot;report-each-subtest-name-and-its-progress&quot;,&quot;title&quot;:&quot;Report each sub-test name and its progress&quot;},{&quot;local&quot;:&quot;instantly-shows-failed-tests&quot;,&quot;title&quot;:&quot;Instantly shows failed tests&quot;}],&quot;title&quot;:&quot;Look and feel variations&quot;},{&quot;local&quot;:&quot;to-gpu-or-not-to-gpu&quot;,&quot;title&quot;:&quot;To GPU or not to GPU&quot;},{&quot;local&quot;:&quot;distributed-training&quot;,&quot;title&quot;:&quot;Distributed training&quot;},{&quot;local&quot;:&quot;output-capture&quot;,&quot;title&quot;:&quot;Output capture&quot;},{&quot;local&quot;:&quot;color-control&quot;,&quot;title&quot;:&quot;Color control&quot;},{&quot;local&quot;:&quot;sending-test-report-to-online-pastebin-service&quot;,&quot;title&quot;:&quot;Sending test report to online pastebin service&quot;}],&quot;title&quot;:&quot;Running tests&quot;},{&quot;local&quot;:&quot;writing-tests&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;parametrization&quot;,&quot;title&quot;:&quot;Parametrization&quot;},{&quot;local&quot;:&quot;files-and-directories&quot;,&quot;title&quot;:&quot;Files and directories&quot;},{&quot;local&quot;:&quot;temporary-files-and-directories&quot;,&quot;title&quot;:&quot;Temporary files and directories&quot;},{&quot;local&quot;:&quot;temporary-syspath-override&quot;,&quot;title&quot;:&quot;Temporary sys.path override&quot;},{&quot;local&quot;:&quot;skipping-tests&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;implementation&quot;,&quot;title&quot;:&quot;Implementation&quot;}],&quot;title&quot;:&quot;Skipping tests&quot;},{&quot;local&quot;:&quot;slow-tests&quot;,&quot;title&quot;:&quot;Slow tests&quot;},{&quot;local&quot;:&quot;testing-the-stdoutstderr-output&quot;,&quot;title&quot;:&quot;Testing the stdout/stderr output&quot;},{&quot;local&quot;:&quot;capturing-logger-stream&quot;,&quot;title&quot;:&quot;Capturing logger stream&quot;},{&quot;local&quot;:&quot;testing-with-environment-variables&quot;,&quot;title&quot;:&quot;Testing with environment variables&quot;},{&quot;local&quot;:&quot;getting-reproducible-results&quot;,&quot;title&quot;:&quot;Getting reproducible results&quot;},{&quot;local&quot;:&quot;debugging-tests&quot;,&quot;title&quot;:&quot;Debugging tests&quot;}],&quot;title&quot;:&quot;Writing tests&quot;},{&quot;local&quot;:&quot;working-with-github-actions-workflows&quot;,&quot;title&quot;:&quot;Working with github actions workflows&quot;},{&quot;local&quot;:&quot;testing-experimental-ci-features&quot;,&quot;title&quot;:&quot;Testing Experimental CI Features&quot;}],&quot;title&quot;:&quot;Testing&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/testing.mdx-614baedb.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="testing" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#testing"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Testing </span></h1> <p>Let’s take a look at how 🤗 Transformers models are tested and how you can write new tests and improve the existing ones.</p> <p>There are 2 test suites in the repository:</p> <ol><li><code>tests</code> — tests for the general API</li> <li><code>examples</code> — tests primarily for various applications that aren’t part of the API</li></ol> <h2 class="relative group"><a id="how-transformers-are-tested" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#how-transformers-are-tested"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>How transformers are tested </span></h2> <ol><li><p>Once a PR is submitted it gets tested with 9 CircleCi jobs. Every new commit to that PR gets retested. These jobs are defined in this <a href="https://github.com/huggingface/transformers-doc2mdx/tree/master/.circleci/config.yml" rel="nofollow">config file</a>, so that if needed you can reproduce the same environment on your machine.</p> <p>These CI jobs don’t run <code>@slow</code> tests.</p></li> <li><p>There are 3 jobs run by <a href="https://github.com/huggingface/transformers/actions" rel="nofollow">github actions</a>:</p> <ul><li><p><a href="https://github.com/huggingface/transformers-doc2mdx/tree/master/.github/workflows/github-torch-hub.yml" rel="nofollow">torch hub integration</a>: checks whether torch hub integration works.</p></li> <li><p><a href="https://github.com/huggingface/transformers-doc2mdx/tree/master/.github/workflows/self-push.yml" rel="nofollow">self-hosted (push)</a>: runs fast tests on GPU only on commits on <code>master</code>. It only runs if a commit on <code>master</code> has updated the code in one of the following folders: <code>src</code>, <code>tests</code>, <code>.github</code> (to prevent running on added model cards, notebooks, etc.)</p></li> <li><p><a href="https://github.com/huggingface/transformers-doc2mdx/tree/master/.github/workflows/self-scheduled.yml" rel="nofollow">self-hosted runner</a>: runs normal and slow tests on GPU in <code>tests</code> and <code>examples</code>:</p></li></ul></li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->RUN_SLOW=1 pytest tests/ RUN_SLOW=1 pytest examples/<!-- HTML_TAG_END --></pre></div> <p>The results can be observed <a href="https://github.com/huggingface/transformers/actions" rel="nofollow">here</a>.</p> <h2 class="relative group"><a id="running-tests" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#running-tests"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Running tests </span></h2> <h3 class="relative group"><a id="choosing-which-tests-to-run" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#choosing-which-tests-to-run"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Choosing which tests to run </span></h3> <p>This document goes into many details of how tests can be run. If after reading everything, you need even more details you will find them <a href="https://docs.pytest.org/en/latest/usage.html" rel="nofollow">here</a>.</p> <p>Here are some most useful ways of running tests.</p> <p>Run all:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest<!-- HTML_TAG_END --></pre></div> <p>or:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->make <span class="hljs-built_in">test</span><!-- HTML_TAG_END --></pre></div> <p>Note that the latter is defined as:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -m pytest -n auto --dist=loadfile -s -v ./tests/<!-- HTML_TAG_END --></pre></div> <p>which tells pytest to:</p> <ul><li>run as many test processes as they are CPU cores (which could be too many if you don’t have a ton of RAM!)</li> <li>ensure that all tests from the same file will be run by the same test process</li> <li>do not capture output</li> <li>run in verbose mode</li></ul> <h3 class="relative group"><a id="getting-the-list-of-all-tests" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#getting-the-list-of-all-tests"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Getting the list of all tests </span></h3> <p>All tests of the test suite:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest --collect-only -q<!-- HTML_TAG_END --></pre></div> <p>All tests of a given test file:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest tests/test_optimization.py --collect-only -q<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="run-a-specific-test-module" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#run-a-specific-test-module"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Run a specific test module </span></h3> <p>To run an individual test module:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest tests/test_logging.py<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="run-specific-tests" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#run-specific-tests"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Run specific tests </span></h3> <p>Since unittest is used inside most of the tests, to run specific subtests you need to know the name of the unittest class containing those tests. For example, it could be:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest tests/test_optimization.py::OptimizationTest::test_adam_w<!-- HTML_TAG_END --></pre></div> <p>Here:</p> <ul><li><code>tests/test_optimization.py</code> - the file with tests</li> <li><code>OptimizationTest</code> - the name of the class</li> <li><code>test_adam_w</code> - the name of the specific test function</li></ul> <p>If the file contains multiple classes, you can choose to run only tests of a given class. For example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest tests/test_optimization.py::OptimizationTest<!-- HTML_TAG_END --></pre></div> <p>will run all the tests inside that class.</p> <p>As mentioned earlier you can see what tests are contained inside the <code>OptimizationTest</code> class by running:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest tests/test_optimization.py::OptimizationTest --collect-only -q<!-- HTML_TAG_END --></pre></div> <p>You can run tests by keyword expressions.</p> <p>To run only tests whose name contains <code>adam</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest -k adam tests/test_optimization.py<!-- HTML_TAG_END --></pre></div> <p>Logical <code>and</code> and <code>or</code> can be used to indicate whether all keywords should match or either. <code>not</code> can be used to negate.</p> <p>To run all tests except those whose name contains <code>adam</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest -k <span class="hljs-string">&quot;not adam&quot;</span> tests/test_optimization.py<!-- HTML_TAG_END --></pre></div> <p>And you can combine the two patterns in one:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest -k <span class="hljs-string">&quot;ada and not adam&quot;</span> tests/test_optimization.py<!-- HTML_TAG_END --></pre></div> <p>For example to run both <code>test_adafactor</code> and <code>test_adam_w</code> you can use:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest -k <span class="hljs-string">&quot;test_adam_w or test_adam_w&quot;</span> tests/test_optimization.py<!-- HTML_TAG_END --></pre></div> <p>Note that we use <code>or</code> here, since we want either of the keywords to match to include both.</p> <p>If you want to include only tests that include both patterns, <code>and</code> is to be used:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest -k <span class="hljs-string">&quot;test and ada&quot;</span> tests/test_optimization.py<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="run-only-modified-tests" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#run-only-modified-tests"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Run only modified tests </span></h3> <p>You can run the tests related to the unstaged files or the current branch (according to Git) by using <a href="https://github.com/anapaulagomes/pytest-picked" rel="nofollow">pytest-picked</a>. This is a great way of quickly testing your changes didn’t break anything, since it won’t run the tests related to files you didn’t touch.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install pytest-picked<!-- HTML_TAG_END --></pre></div> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest --picked<!-- HTML_TAG_END --></pre></div> <p>All tests will be run from files and folders which are modified, but not yet committed.</p> <h3 class="relative group"><a id="automatically-rerun-failed-tests-on-source-modification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#automatically-rerun-failed-tests-on-source-modification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Automatically rerun failed tests on source modification </span></h3> <p><a href="https://github.com/pytest-dev/pytest-xdist" rel="nofollow">pytest-xdist</a> provides a very useful feature of detecting all failed tests, and then waiting for you to modify files and continuously re-rerun those failing tests until they pass while you fix them. So that you don’t need to re start pytest after you made the fix. This is repeated until all tests pass after which again a full run is performed.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install pytest-xdist<!-- HTML_TAG_END --></pre></div> <p>To enter the mode: <code>pytest -f</code> or <code>pytest --looponfail</code></p> <p>File changes are detected by looking at <code>looponfailroots</code> root directories and all of their contents (recursively). If the default for this value does not work for you, you can change it in your project by setting a configuration option in <code>setup.cfg</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-section">[tool:pytest]</span> <span class="hljs-attr">looponfailroots</span> = transformers tests<!-- HTML_TAG_END --></pre></div> <p>or <code>pytest.ini</code>/<code>tox.ini</code> files:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-section">[pytest]</span> <span class="hljs-attr">looponfailroots</span> = transformers tests<!-- HTML_TAG_END --></pre></div> <p>This would lead to only looking for file changes in the respective directories, specified relatively to the ini-file’s directory.</p> <p><a href="https://github.com/joeyespo/pytest-watch" rel="nofollow">pytest-watch</a> is an alternative implementation of this functionality.</p> <h3 class="relative group"><a id="skip-a-test-module" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#skip-a-test-module"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Skip a test module </span></h3> <p>If you want to run all test modules, except a few you can exclude them by giving an explicit list of tests to run. For example, to run all except <code>test_modeling_*.py</code> tests:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest *<span class="hljs-built_in">ls</span> -1 tests/*py | grep -v test_modeling*<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="clearing-state" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#clearing-state"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Clearing state </span></h3> <p>CI builds and when isolation is important (against speed), cache should be cleared:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest --cache-clear tests<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="running-tests-in-parallel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#running-tests-in-parallel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Running tests in parallel </span></h3> <p>As mentioned earlier <code>make test</code> runs tests in parallel via <code>pytest-xdist</code> plugin (<code>-n X</code> argument, e.g. <code>-n 2</code> to run 2 parallel jobs).</p> <p><code>pytest-xdist</code>’s <code>--dist=</code> option allows one to control how the tests are grouped. <code>--dist=loadfile</code> puts the tests located in one file onto the same process.</p> <p>Since the order of executed tests is different and unpredictable, if running the test suite with <code>pytest-xdist</code> produces failures (meaning we have some undetected coupled tests), use <a href="https://github.com/ESSS/pytest-replay" rel="nofollow">pytest-replay</a> to replay the tests in the same order, which should help with then somehow reducing that failing sequence to a minimum.</p> <h3 class="relative group"><a id="test-order-and-repetition" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#test-order-and-repetition"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Test order and repetition </span></h3> <p>It’s good to repeat the tests several times, in sequence, randomly, or in sets, to detect any potential inter-dependency and state-related bugs (tear down). And the straightforward multiple repetition is just good to detect some problems that get uncovered by randomness of DL.</p> <h4 class="relative group"><a id="repeat-tests" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#repeat-tests"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Repeat tests </span></h4> <ul><li><a href="https://github.com/dropbox/pytest-flakefinder" rel="nofollow">pytest-flakefinder</a>:</li></ul> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install pytest-flakefinder<!-- HTML_TAG_END --></pre></div> <p>And then run every test multiple times (50 by default):</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest --flake-finder --flake-runs=5 tests/test_failing_test.py<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>This plugin doesn’t work with <code>-n</code> flag from <code>pytest-xdist</code>.</p></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>There is another plugin <code>pytest-repeat</code>, but it doesn’t work with <code>unittest</code>.</p></div> <h4 class="relative group"><a id="run-tests-in-a-random-order" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#run-tests-in-a-random-order"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Run tests in a random order </span></h4> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install pytest-random-order<!-- HTML_TAG_END --></pre></div> <p>Important: the presence of <code>pytest-random-order</code> will automatically randomize tests, no configuration change or command line options is required.</p> <p>As explained earlier this allows detection of coupled tests - where one test’s state affects the state of another. When <code>pytest-random-order</code> is installed it will print the random seed it used for that session, e.g:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest tests [...] Using --random-order-bucket=module Using --random-order-seed=573663<!-- HTML_TAG_END --></pre></div> <p>So that if the given particular sequence fails, you can reproduce it by adding that exact seed, e.g.:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest --random-order-seed=573663 [...] Using --random-order-bucket=module Using --random-order-seed=573663<!-- HTML_TAG_END --></pre></div> <p>It will only reproduce the exact order if you use the exact same list of tests (or no list at all). Once you start to manually narrowing down the list you can no longer rely on the seed, but have to list them manually in the exact order they failed and tell pytest to not randomize them instead using <code>--random-order-bucket=none</code>, e.g.:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest --random-order-bucket=none tests/test_a.py tests/test_c.py tests/test_b.py<!-- HTML_TAG_END --></pre></div> <p>To disable the shuffling for all tests:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest --random-order-bucket=none<!-- HTML_TAG_END --></pre></div> <p>By default <code>--random-order-bucket=module</code> is implied, which will shuffle the files on the module levels. It can also shuffle on <code>class</code>, <code>package</code>, <code>global</code> and <code>none</code> levels. For the complete details please see its <a href="https://github.com/jbasko/pytest-random-order" rel="nofollow">documentation</a>.</p> <p>Another randomization alternative is: <a href="https://github.com/pytest-dev/pytest-randomly" rel="nofollow"><code>pytest-randomly</code></a>. This module has a very similar functionality/interface, but it doesn’t have the bucket modes available in <code>pytest-random-order</code>. It has the same problem of imposing itself once installed.</p> <h3 class="relative group"><a id="look-and-feel-variations" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#look-and-feel-variations"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Look and feel variations </span></h3> <h4 class="relative group"><a id="pytestsugar" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#pytestsugar"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>pytest-sugar </span></h4> <p><a href="https://github.com/Frozenball/pytest-sugar" rel="nofollow">pytest-sugar</a> is a plugin that improves the look-n-feel, adds a progressbar, and show tests that fail and the assert instantly. It gets activated automatically upon installation.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install pytest-sugar<!-- HTML_TAG_END --></pre></div> <p>To run tests without it, run:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest -p no:sugar<!-- HTML_TAG_END --></pre></div> <p>or uninstall it.</p> <h4 class="relative group"><a id="report-each-subtest-name-and-its-progress" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#report-each-subtest-name-and-its-progress"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Report each sub-test name and its progress </span></h4> <p>For a single or a group of tests via <code>pytest</code> (after <code>pip install pytest-pspec</code>):</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest --pspec tests/test_optimization.py<!-- HTML_TAG_END --></pre></div> <h4 class="relative group"><a id="instantly-shows-failed-tests" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#instantly-shows-failed-tests"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Instantly shows failed tests </span></h4> <p><a href="https://github.com/pytest-dev/pytest-instafail" rel="nofollow">pytest-instafail</a> shows failures and errors instantly instead of waiting until the end of test session.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install pytest-instafail<!-- HTML_TAG_END --></pre></div> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest --instafail<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="to-gpu-or-not-to-gpu" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#to-gpu-or-not-to-gpu"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>To GPU or not to GPU </span></h3> <p>On a GPU-enabled setup, to test in CPU-only mode add <code>CUDA_VISIBLE_DEVICES=&quot;&quot;</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->CUDA_VISIBLE_DEVICES=<span class="hljs-string">&quot;&quot;</span> pytest tests/test_logging.py<!-- HTML_TAG_END --></pre></div> <p>or if you have multiple gpus, you can specify which one is to be used by <code>pytest</code>. For example, to use only the second gpu if you have gpus <code>0</code> and <code>1</code>, you can run:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->CUDA_VISIBLE_DEVICES=<span class="hljs-string">&quot;1&quot;</span> pytest tests/test_logging.py<!-- HTML_TAG_END --></pre></div> <p>This is handy when you want to run different tasks on different GPUs.</p> <p>Some tests must be run on CPU-only, others on either CPU or GPU or TPU, yet others on multiple-GPUs. The following skip decorators are used to set the requirements of tests CPU/GPU/TPU-wise:</p> <ul><li><code>require_torch</code> - this test will run only under torch</li> <li><code>require_torch_gpu</code> - as <code>require_torch</code> plus requires at least 1 GPU</li> <li><code>require_torch_multi_gpu</code> - as <code>require_torch</code> plus requires at least 2 GPUs</li> <li><code>require_torch_non_multi_gpu</code> - as <code>require_torch</code> plus requires 0 or 1 GPUs</li> <li><code>require_torch_up_to_2_gpus</code> - as <code>require_torch</code> plus requires 0 or 1 or 2 GPUs</li> <li><code>require_torch_tpu</code> - as <code>require_torch</code> plus requires at least 1 TPU</li></ul> <p>Let’s depict the GPU requirements in the following table:</p> <p>| n gpus | decorator | |--------+--------------------------------| | <code>&gt;= 0</code> | <code>@require_torch</code> | | <code>&gt;= 1</code> | <code>@require_torch_gpu</code> | | <code>&gt;= 2</code> | <code>@require_torch_multi_gpu</code> | | <code>&lt; 2</code> | <code>@require_torch_non_multi_gpu</code> | | <code>&lt; 3</code> | <code>@require_torch_up_to_2_gpus</code> |</p> <p>For example, here is a test that must be run only when there are 2 or more GPUs available and pytorch is installed:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">@require_torch_multi_gpu</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_example_with_multi_gpu</span>():<!-- HTML_TAG_END --></pre></div> <p>If a test requires <code>tensorflow</code> use the <code>require_tf</code> decorator. For example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">@require_tf</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_tf_thing_with_tensorflow</span>():<!-- HTML_TAG_END --></pre></div> <p>These decorators can be stacked. For example, if a test is slow and requires at least one GPU under pytorch, here is how to set it up:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">@require_torch_gpu</span> <span class="hljs-meta">@slow</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_example_slow_on_gpu</span>():<!-- HTML_TAG_END --></pre></div> <p>Some decorators like <code>@parametrized</code> rewrite test names, therefore <code>@require_*</code> skip decorators have to be listed last for them to work correctly. Here is an example of the correct usage:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">@parameterized.expand(<span class="hljs-params">...</span>)</span> <span class="hljs-meta">@require_torch_multi_gpu</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_integration_foo</span>():<!-- HTML_TAG_END --></pre></div> <p>This order problem doesn’t exist with <code>@pytest.mark.parametrize</code>, you can put it first or last and it will still work. But it only works with non-unittests.</p> <p>Inside tests:</p> <ul><li>How many GPUs are available:</li></ul> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers.testing_utils <span class="hljs-keyword">import</span> get_gpu_count n_gpu = get_gpu_count() <span class="hljs-comment"># works with torch and tf</span><!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="distributed-training" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#distributed-training"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Distributed training </span></h3> <p><code>pytest</code> can’t deal with distributed training directly. If this is attempted - the sub-processes don’t do the right thing and end up thinking they are <code>pytest</code> and start running the test suite in loops. It works, however, if one spawns a normal process that then spawns off multiple workers and manages the IO pipes.</p> <p>Here are some tests that use it:</p> <ul><li><a href="https://github.com/huggingface/transformers-doc2mdx/tree/master/tests/test_trainer_distributed.py" rel="nofollow">test_trainer_distributed.py</a></li> <li><a href="https://github.com/huggingface/transformers-doc2mdx/tree/master/tests/deepspeed/test_deepspeed.py" rel="nofollow">test_deepspeed.py</a></li></ul> <p>To jump right into the execution point, search for the <code>execute_subprocess_async</code> call in those tests.</p> <p>You will need at least 2 GPUs to see these tests in action:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->CUDA_VISIBLE_DEVICES=0,1 RUN_SLOW=1 pytest -sv tests/test_trainer_distributed.py<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="output-capture" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#output-capture"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Output capture </span></h3> <p>During test execution any output sent to <code>stdout</code> and <code>stderr</code> is captured. If a test or a setup method fails, its according captured output will usually be shown along with the failure traceback.</p> <p>To disable output capturing and to get the <code>stdout</code> and <code>stderr</code> normally, use <code>-s</code> or <code>--capture=no</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest -s tests/test_logging.py<!-- HTML_TAG_END --></pre></div> <p>To send test results to JUnit format output:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->py.test tests --junitxml=result.xml<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="color-control" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#color-control"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Color control </span></h3> <p>To have no color (e.g., yellow on white background is not readable):</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest --color=no tests/test_logging.py<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="sending-test-report-to-online-pastebin-service" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#sending-test-report-to-online-pastebin-service"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Sending test report to online pastebin service </span></h3> <p>Creating a URL for each test failure:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest --pastebin=failed tests/test_logging.py<!-- HTML_TAG_END --></pre></div> <p>This will submit test run information to a remote Paste service and provide a URL for each failure. You may select tests as usual or add for example -x if you only want to send one particular failure.</p> <p>Creating a URL for a whole test session log:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest --pastebin=all tests/test_logging.py<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="writing-tests" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#writing-tests"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Writing tests </span></h2> <p>🤗 transformers tests are based on <code>unittest</code>, but run by <code>pytest</code>, so most of the time features from both systems can be used.</p> <p>You can read <a href="https://docs.pytest.org/en/stable/unittest.html" rel="nofollow">here</a> which features are supported, but the important thing to remember is that most <code>pytest</code> fixtures don’t work. Neither parametrization, but we use the module <code>parameterized</code> that works in a similar way.</p> <h3 class="relative group"><a id="parametrization" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#parametrization"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Parametrization </span></h3> <p>Often, there is a need to run the same test multiple times, but with different arguments. It could be done from within the test, but then there is no way of running that test for just one set of arguments.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment"># test_this1.py</span> <span class="hljs-keyword">import</span> unittest <span class="hljs-keyword">from</span> parameterized <span class="hljs-keyword">import</span> parameterized <span class="hljs-keyword">class</span> <span class="hljs-title class_">TestMathUnitTest</span>(unittest.TestCase): <span class="hljs-meta"> @parameterized.expand(<span class="hljs-params"> [ (<span class="hljs-params"><span class="hljs-string">&quot;negative&quot;</span>, -<span class="hljs-number">1.5</span>, -<span class="hljs-number">2.0</span></span>), (<span class="hljs-params"><span class="hljs-string">&quot;integer&quot;</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1.0</span></span>), (<span class="hljs-params"><span class="hljs-string">&quot;large fraction&quot;</span>, <span class="hljs-number">1.6</span>, <span class="hljs-number">1</span></span>), ] </span>)</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_floor</span>(<span class="hljs-params">self, name, <span class="hljs-built_in">input</span>, expected</span>): assert_equal(math.floor(<span class="hljs-built_in">input</span>), expected)<!-- HTML_TAG_END --></pre></div> <p>Now, by default this test will be run 3 times, each time with the last 3 arguments of <code>test_floor</code> being assigned the corresponding arguments in the parameter list.</p> <p>and you could run just the <code>negative</code> and <code>integer</code> sets of params with:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest -k <span class="hljs-string">&quot;negative and integer&quot;</span> tests/test_mytest.py<!-- HTML_TAG_END --></pre></div> <p>or all but <code>negative</code> sub-tests, with:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest -k <span class="hljs-string">&quot;not negative&quot;</span> tests/test_mytest.py<!-- HTML_TAG_END --></pre></div> <p>Besides using the <code>-k</code> filter that was just mentioned, you can find out the exact name of each sub-test and run any or all of them using their exact names.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest test_this1.py --collect-only -q<!-- HTML_TAG_END --></pre></div> <p>and it will list:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->test_this1.py::TestMathUnitTest::test_floor_0_negative test_this1.py::TestMathUnitTest::test_floor_1_integer test_this1.py::TestMathUnitTest::test_floor_2_large_fraction<!-- HTML_TAG_END --></pre></div> <p>So now you can run just 2 specific sub-tests:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest test_this1.py::TestMathUnitTest::test_floor_0_negative test_this1.py::TestMathUnitTest::test_floor_1_integer<!-- HTML_TAG_END --></pre></div> <p>The module <a href="https://pypi.org/project/parameterized/" rel="nofollow">parameterized</a> which is already in the developer dependencies of <code>transformers</code> works for both: <code>unittests</code> and <code>pytest</code> tests.</p> <p>If, however, the test is not a <code>unittest</code>, you may use <code>pytest.mark.parametrize</code> (or you may see it being used in some existing tests, mostly under <code>examples</code>).</p> <p>Here is the same example, this time using <code>pytest</code>’s <code>parametrize</code> marker:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment"># test_this2.py</span> <span class="hljs-keyword">import</span> pytest <span class="hljs-meta">@pytest.mark.parametrize(<span class="hljs-params"> <span class="hljs-string">&quot;name, input, expected&quot;</span>, [ (<span class="hljs-params"><span class="hljs-string">&quot;negative&quot;</span>, -<span class="hljs-number">1.5</span>, -<span class="hljs-number">2.0</span></span>), (<span class="hljs-params"><span class="hljs-string">&quot;integer&quot;</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1.0</span></span>), (<span class="hljs-params"><span class="hljs-string">&quot;large fraction&quot;</span>, <span class="hljs-number">1.6</span>, <span class="hljs-number">1</span></span>), ], </span>)</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_floor</span>(<span class="hljs-params">name, <span class="hljs-built_in">input</span>, expected</span>): assert_equal(math.floor(<span class="hljs-built_in">input</span>), expected)<!-- HTML_TAG_END --></pre></div> <p>Same as with <code>parameterized</code>, with <code>pytest.mark.parametrize</code> you can have a fine control over which sub-tests are run, if the <code>-k</code> filter doesn’t do the job. Except, this parametrization function creates a slightly different set of names for the sub-tests. Here is what they look like:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest test_this2.py --collect-only -q<!-- HTML_TAG_END --></pre></div> <p>and it will list:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->test_this2.py::test_floor[integer-1-1.0] test_this2.py::test_floor[negative--1.5--2.0] test_this2.py::test_floor[large fraction-1.6-1]<!-- HTML_TAG_END --></pre></div> <p>So now you can run just the specific test:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest test_this2.py::test_floor[negative--1.5--2.0] test_this2.py::test_floor[integer-1-1.0]<!-- HTML_TAG_END --></pre></div> <p>as in the previous example.</p> <h3 class="relative group"><a id="files-and-directories" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#files-and-directories"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Files and directories </span></h3> <p>In tests often we need to know where things are relative to the current test file, and it’s not trivial since the test could be invoked from more than one directory or could reside in sub-directories with different depths. A helper class <code>transformers.test_utils.TestCasePlus</code> solves this problem by sorting out all the basic paths and provides easy accessors to them:</p> <ul><li><p><code>pathlib</code> objects (all fully resolved):</p> <ul><li><code>test_file_path</code> - the current test file path, i.e. <code>__file__</code></li> <li><code>test_file_dir</code> - the directory containing the current test file</li> <li><code>tests_dir</code> - the directory of the <code>tests</code> test suite</li> <li><code>examples_dir</code> - the directory of the <code>examples</code> test suite</li> <li><code>repo_root_dir</code> - the directory of the repository</li> <li><code>src_dir</code> - the directory of <code>src</code> (i.e. where the <code>transformers</code> sub-dir resides)</li></ul></li> <li><p>stringified paths---same as above but these return paths as strings, rather than <code>pathlib</code> objects:</p> <ul><li><code>test_file_path_str</code></li> <li><code>test_file_dir_str</code></li> <li><code>tests_dir_str</code></li> <li><code>examples_dir_str</code></li> <li><code>repo_root_dir_str</code></li> <li><code>src_dir_str</code></li></ul></li></ul> <p>To start using those all you need is to make sure that the test resides in a subclass of <code>transformers.test_utils.TestCasePlus</code>. For example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers.testing_utils <span class="hljs-keyword">import</span> TestCasePlus <span class="hljs-keyword">class</span> <span class="hljs-title class_">PathExampleTest</span>(<span class="hljs-title class_ inherited__">TestCasePlus</span>): <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_something_involving_local_locations</span>(<span class="hljs-params">self</span>): data_dir = self.tests_dir / <span class="hljs-string">&quot;fixtures/tests_samples/wmt_en_ro&quot;</span><!-- HTML_TAG_END --></pre></div> <p>If you don’t need to manipulate paths via <code>pathlib</code> or you just need a path as a string, you can always invoked <code>str()</code> on the <code>pathlib</code> object or use the accessors ending with <code>_str</code>. For example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers.testing_utils <span class="hljs-keyword">import</span> TestCasePlus <span class="hljs-keyword">class</span> <span class="hljs-title class_">PathExampleTest</span>(<span class="hljs-title class_ inherited__">TestCasePlus</span>): <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_something_involving_stringified_locations</span>(<span class="hljs-params">self</span>): examples_dir = self.examples_dir_str<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="temporary-files-and-directories" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#temporary-files-and-directories"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Temporary files and directories </span></h3> <p>Using unique temporary files and directories are essential for parallel test running, so that the tests won’t overwrite each other’s data. Also we want to get the temporary files and directories removed at the end of each test that created them. Therefore, using packages like <code>tempfile</code>, which address these needs is essential.</p> <p>However, when debugging tests, you need to be able to see what goes into the temporary file or directory and you want to know it’s exact path and not having it randomized on every test re-run.</p> <p>A helper class <code>transformers.test_utils.TestCasePlus</code> is best used for such purposes. It’s a sub-class of <code>unittest.TestCase</code>, so we can easily inherit from it in the test modules.</p> <p>Here is an example of its usage:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers.testing_utils <span class="hljs-keyword">import</span> TestCasePlus <span class="hljs-keyword">class</span> <span class="hljs-title class_">ExamplesTests</span>(<span class="hljs-title class_ inherited__">TestCasePlus</span>): <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_whatever</span>(<span class="hljs-params">self</span>): tmp_dir = self.get_auto_remove_tmp_dir()<!-- HTML_TAG_END --></pre></div> <p>This code creates a unique temporary directory, and sets <code>tmp_dir</code> to its location.</p> <ul><li>Create a unique temporary dir:</li></ul> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">def</span> <span class="hljs-title function_">test_whatever</span>(<span class="hljs-params">self</span>): tmp_dir = self.get_auto_remove_tmp_dir()<!-- HTML_TAG_END --></pre></div> <p><code>tmp_dir</code> will contain the path to the created temporary dir. It will be automatically removed at the end of the test.</p> <ul><li>Create a temporary dir of my choice, ensure it’s empty before the test starts and don’t empty it after the test.</li></ul> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">def</span> <span class="hljs-title function_">test_whatever</span>(<span class="hljs-params">self</span>): tmp_dir = self.get_auto_remove_tmp_dir(<span class="hljs-string">&quot;./xxx&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests didn’t leave any data in there.</p> <ul><li><p>You can override the default behavior by directly overriding the <code>before</code> and <code>after</code> args, leading to one of the following behaviors:</p> <ul><li><code>before=True</code>: the temporary dir will always be cleared at the beginning of the test.</li> <li><code>before=False</code>: if the temporary dir already existed, any existing files will remain there.</li> <li><code>after=True</code>: the temporary dir will always be deleted at the end of the test.</li> <li><code>after=False</code>: the temporary dir will always be left intact at the end of the test.</li></ul></li></ul> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>In order to run the equivalent of <code>rm -r</code> safely, only subdirs of the project repository checkout are allowed if an explicit <code>tmp_dir</code> is used, so that by mistake no <code>/tmp</code> or similar important part of the filesystem will get nuked. i.e. please always pass paths that start with <code>./</code>.</p></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Each test can register multiple temporary directories and they all will get auto-removed, unless requested otherwise.</p></div> <h3 class="relative group"><a id="temporary-syspath-override" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#temporary-syspath-override"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Temporary sys.path override </span></h3> <p>If you need to temporary override <code>sys.path</code> to import from another test for example, you can use the <code>ExtendSysPath</code> context manager. Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">import</span> os <span class="hljs-keyword">from</span> transformers.testing_utils <span class="hljs-keyword">import</span> ExtendSysPath bindir = os.path.abspath(os.path.dirname(__file__)) <span class="hljs-keyword">with</span> ExtendSysPath(<span class="hljs-string">f&quot;<span class="hljs-subst">{bindir}</span>/..&quot;</span>): <span class="hljs-keyword">from</span> test_trainer <span class="hljs-keyword">import</span> TrainerIntegrationCommon <span class="hljs-comment"># noqa</span><!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="skipping-tests" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#skipping-tests"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Skipping tests </span></h3> <p>This is useful when a bug is found and a new test is written, yet the bug is not fixed yet. In order to be able to commit it to the main repository we need make sure it’s skipped during <code>make test</code>.</p> <p>Methods:</p> <ul><li><p>A <strong>skip</strong> means that you expect your test to pass only if some conditions are met, otherwise pytest should skip running the test altogether. Common examples are skipping windows-only tests on non-windows platforms, or skipping tests that depend on an external resource which is not available at the moment (for example a database).</p></li> <li><p>A <strong>xfail</strong> means that you expect a test to fail for some reason. A common example is a test for a feature not yet implemented, or a bug not yet fixed. When a test passes despite being expected to fail (marked with pytest.mark.xfail), it’s an xpass and will be reported in the test summary.</p></li></ul> <p>One of the important differences between the two is that <code>skip</code> doesn’t run the test, and <code>xfail</code> does. So if the code that’s buggy causes some bad state that will affect other tests, do not use <code>xfail</code>.</p> <h4 class="relative group"><a id="implementation" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#implementation"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Implementation </span></h4> <ul><li>Here is how to skip whole test unconditionally:</li></ul> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">@unittest.skip(<span class="hljs-params"><span class="hljs-string">&quot;this bug needs to be fixed&quot;</span></span>)</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_feature_x</span>():<!-- HTML_TAG_END --></pre></div> <p>or via pytest:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">@pytest.mark.skip(<span class="hljs-params">reason=<span class="hljs-string">&quot;this bug needs to be fixed&quot;</span></span>)</span><!-- HTML_TAG_END --></pre></div> <p>or the <code>xfail</code> way:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">@pytest.mark.xfail</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_feature_x</span>():<!-- HTML_TAG_END --></pre></div> <ul><li>Here is how to skip a test based on some internal check inside the test:</li></ul> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">def</span> <span class="hljs-title function_">test_feature_x</span>(): <span class="hljs-keyword">if</span> <span class="hljs-keyword">not</span> has_something(): pytest.skip(<span class="hljs-string">&quot;unsupported configuration&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>or the whole module:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">import</span> pytest <span class="hljs-keyword">if</span> <span class="hljs-keyword">not</span> pytest.config.getoption(<span class="hljs-string">&quot;--custom-flag&quot;</span>): pytest.skip(<span class="hljs-string">&quot;--custom-flag is missing, skipping tests&quot;</span>, allow_module_level=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <p>or the <code>xfail</code> way:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">def</span> <span class="hljs-title function_">test_feature_x</span>(): pytest.xfail(<span class="hljs-string">&quot;expected to fail until bug XYZ is fixed&quot;</span>)<!-- HTML_TAG_END --></pre></div> <ul><li>Here is how to skip all tests in a module if some import is missing:</li></ul> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->docutils = pytest.importorskip(<span class="hljs-string">&quot;docutils&quot;</span>, minversion=<span class="hljs-string">&quot;0.3&quot;</span>)<!-- HTML_TAG_END --></pre></div> <ul><li>Skip a test based on a condition:</li></ul> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">@pytest.mark.skipif(<span class="hljs-params">sys.version_info &lt; (<span class="hljs-params"><span class="hljs-number">3</span>,<span class="hljs-number">6</span></span>), reason=<span class="hljs-string">&quot;requires python3.6 or higher&quot;</span></span>)</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_feature_x</span>():<!-- HTML_TAG_END --></pre></div> <p>or:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">@unittest.skipIf(<span class="hljs-params">torch_device == <span class="hljs-string">&quot;cpu&quot;</span>, <span class="hljs-string">&quot;Can&#x27;t do half precision&quot;</span></span>)</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_feature_x</span>():<!-- HTML_TAG_END --></pre></div> <p>or skip the whole module:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">@pytest.mark.skipif(<span class="hljs-params">sys.platform == <span class="hljs-string">&#x27;win32&#x27;</span>, reason=<span class="hljs-string">&quot;does not run on windows&quot;</span></span>)</span> <span class="hljs-keyword">class</span> <span class="hljs-title class_">TestClass</span>(): <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_feature_x</span>(<span class="hljs-params">self</span>):<!-- HTML_TAG_END --></pre></div> <p>More details, example and ways are <a href="https://docs.pytest.org/en/latest/skipping.html" rel="nofollow">here</a>.</p> <h3 class="relative group"><a id="slow-tests" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#slow-tests"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Slow tests </span></h3> <p>The library of tests is ever-growing, and some of the tests take minutes to run, therefore we can’t afford waiting for an hour for the test suite to complete on CI. Therefore, with some exceptions for essential tests, slow tests should be marked as in the example below:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers.testing_utils <span class="hljs-keyword">import</span> slow <span class="hljs-meta">@slow</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_integration_foo</span>():<!-- HTML_TAG_END --></pre></div> <p>Once a test is marked as <code>@slow</code>, to run such tests set <code>RUN_SLOW=1</code> env var, e.g.:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->RUN_SLOW=1 pytest tests<!-- HTML_TAG_END --></pre></div> <p>Some decorators like <code>@parameterized</code> rewrite test names, therefore <code>@slow</code> and the rest of the skip decorators <code>@require_*</code> have to be listed last for them to work correctly. Here is an example of the correct usage:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">@parameteriz ed.expand(<span class="hljs-params">...</span>)</span> <span class="hljs-meta">@slow</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_integration_foo</span>():<!-- HTML_TAG_END --></pre></div> <p>As explained at the beginning of this document, slow tests get to run on a scheduled basis, rather than in PRs CI checks. So it’s possible that some problems will be missed during a PR submission and get merged. Such problems will get caught during the next scheduled CI job. But it also means that it’s important to run the slow tests on your machine before submitting the PR.</p> <p>Here is a rough decision making mechanism for choosing which tests should be marked as slow:</p> <p>If the test is focused on one of the library’s internal components (e.g., modeling files, tokenization files, pipelines), then we should run that test in the non-slow test suite. If it’s focused on an other aspect of the library, such as the documentation or the examples, then we should run these tests in the slow test suite. And then, to refine this approach we should have exceptions:</p> <ul><li>All tests that need to download a heavy set of weights or a dataset that is larger than ~50MB (e.g., model or tokenizer integration tests, pipeline integration tests) should be set to slow. If you’re adding a new model, you should create and upload to the hub a tiny version of it (with random weights) for integration tests. This is discussed in the following paragraphs.</li> <li>All tests that need to do a training not specifically optimized to be fast should be set to slow.</li> <li>We can introduce exceptions if some of these should-be-non-slow tests are excruciatingly slow, and set them to <code>@slow</code>. Auto-modeling tests, which save and load large files to disk, are a good example of tests that are marked as <code>@slow</code>.</li> <li>If a test completes under 1 second on CI (including downloads if any) then it should be a normal test regardless.</li></ul> <p>Collectively, all the non-slow tests need to cover entirely the different internals, while remaining fast. For example, a significant coverage can be achieved by testing with specially created tiny models with random weights. Such models have the very minimal number of layers (e.g., 2), vocab size (e.g., 1000), etc. Then the <code>@slow</code> tests can use large slow models to do qualitative testing. To see the use of these simply look for <em>tiny</em> models with:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->grep tiny tests examples<!-- HTML_TAG_END --></pre></div> <p>Here is a an example of a <a href="https://github.com/huggingface/transformers-doc2mdx/tree/master/scripts/fsmt/fsmt-make-tiny-model.py" rel="nofollow">script</a> that created the tiny model <a href="https://huggingface.co/stas/tiny-wmt19-en-de" rel="nofollow">stas/tiny-wmt19-en-de</a>. You can easily adjust it to your specific model’s architecture.</p> <p>It’s easy to measure the run-time incorrectly if for example there is an overheard of downloading a huge model, but if you test it locally the downloaded files would be cached and thus the download time not measured. Hence check the execution speed report in CI logs instead (the output of <code>pytest --durations=0 tests</code>).</p> <p>That report is also useful to find slow outliers that aren’t marked as such, or which need to be re-written to be fast. If you notice that the test suite starts getting slow on CI, the top listing of this report will show the slowest tests.</p> <h3 class="relative group"><a id="testing-the-stdoutstderr-output" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#testing-the-stdoutstderr-output"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Testing the stdout/stderr output </span></h3> <p>In order to test functions that write to <code>stdout</code> and/or <code>stderr</code>, the test can access those streams using the <code>pytest</code>’s <a href="https://docs.pytest.org/en/latest/capture.html" rel="nofollow">capsys system</a>. Here is how this is accomplished:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">import</span> sys <span class="hljs-keyword">def</span> <span class="hljs-title function_">print_to_stdout</span>(<span class="hljs-params">s</span>): <span class="hljs-built_in">print</span>(s) <span class="hljs-keyword">def</span> <span class="hljs-title function_">print_to_stderr</span>(<span class="hljs-params">s</span>): sys.stderr.write(s) <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_result_and_stdout</span>(<span class="hljs-params">capsys</span>): msg = <span class="hljs-string">&quot;Hello&quot;</span> print_to_stdout(msg) print_to_stderr(msg) out, err = capsys.readouterr() <span class="hljs-comment"># consume the captured output streams</span> <span class="hljs-comment"># optional: if you want to replay the consumed streams:</span> sys.stdout.write(out) sys.stderr.write(err) <span class="hljs-comment"># test:</span> <span class="hljs-keyword">assert</span> msg <span class="hljs-keyword">in</span> out <span class="hljs-keyword">assert</span> msg <span class="hljs-keyword">in</span> err<!-- HTML_TAG_END --></pre></div> <p>And, of course, most of the time, <code>stderr</code> will come as a part of an exception, so try/except has to be used in such a case:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">def</span> <span class="hljs-title function_">raise_exception</span>(<span class="hljs-params">msg</span>): <span class="hljs-keyword">raise</span> ValueError(msg) <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_something_exception</span>(): msg = <span class="hljs-string">&quot;Not a good value&quot;</span> error = <span class="hljs-string">&quot;&quot;</span> <span class="hljs-keyword">try</span>: raise_exception(msg) <span class="hljs-keyword">except</span> Exception <span class="hljs-keyword">as</span> e: error = <span class="hljs-built_in">str</span>(e) <span class="hljs-keyword">assert</span> msg <span class="hljs-keyword">in</span> error, <span class="hljs-string">f&quot;<span class="hljs-subst">{msg}</span> is in the exception:\n<span class="hljs-subst">{error}</span>&quot;</span><!-- HTML_TAG_END --></pre></div> <p>Another approach to capturing stdout is via <code>contextlib.redirect_stdout</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> io <span class="hljs-keyword">import</span> StringIO <span class="hljs-keyword">from</span> contextlib <span class="hljs-keyword">import</span> redirect_stdout <span class="hljs-keyword">def</span> <span class="hljs-title function_">print_to_stdout</span>(<span class="hljs-params">s</span>): <span class="hljs-built_in">print</span>(s) <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_result_and_stdout</span>(): msg = <span class="hljs-string">&quot;Hello&quot;</span> buffer = StringIO() <span class="hljs-keyword">with</span> redirect_stdout(buffer): print_to_stdout(msg) out = buffer.getvalue() <span class="hljs-comment"># optional: if you want to replay the consumed streams:</span> sys.stdout.write(out) <span class="hljs-comment"># test:</span> <span class="hljs-keyword">assert</span> msg <span class="hljs-keyword">in</span> out<!-- HTML_TAG_END --></pre></div> <p>An important potential issue with capturing stdout is that it may contain <code>\r</code> characters that in normal <code>print</code> reset everything that has been printed so far. There is no problem with <code>pytest</code>, but with <code>pytest -s</code> these characters get included in the buffer, so to be able to have the test run with and without <code>-s</code>, you have to make an extra cleanup to the captured output, using <code>re.sub(r&#39;~.*\r&#39;, &#39;&#39;, buf, 0, re.M)</code>.</p> <p>But, then we have a helper context manager wrapper to automatically take care of it all, regardless of whether it has some <code>\r</code>’s in it or not, so it’s a simple:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers.testing_utils <span class="hljs-keyword">import</span> CaptureStdout <span class="hljs-keyword">with</span> CaptureStdout() <span class="hljs-keyword">as</span> cs: function_that_writes_to_stdout() <span class="hljs-built_in">print</span>(cs.out)<!-- HTML_TAG_END --></pre></div> <p>Here is a full test example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers.testing_utils <span class="hljs-keyword">import</span> CaptureStdout msg = <span class="hljs-string">&quot;Secret message\r&quot;</span> final = <span class="hljs-string">&quot;Hello World&quot;</span> <span class="hljs-keyword">with</span> CaptureStdout() <span class="hljs-keyword">as</span> cs: <span class="hljs-built_in">print</span>(msg + final) <span class="hljs-keyword">assert</span> cs.out == final + <span class="hljs-string">&quot;\n&quot;</span>, <span class="hljs-string">f&quot;captured: <span class="hljs-subst">{cs.out}</span>, expecting <span class="hljs-subst">{final}</span>&quot;</span><!-- HTML_TAG_END --></pre></div> <p>If you’d like to capture <code>stderr</code> use the <code>CaptureStderr</code> class instead:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers.testing_utils <span class="hljs-keyword">import</span> CaptureStderr <span class="hljs-keyword">with</span> CaptureStderr() <span class="hljs-keyword">as</span> cs: function_that_writes_to_stderr() <span class="hljs-built_in">print</span>(cs.err)<!-- HTML_TAG_END --></pre></div> <p>If you need to capture both streams at once, use the parent <code>CaptureStd</code> class:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers.testing_utils <span class="hljs-keyword">import</span> CaptureStd <span class="hljs-keyword">with</span> CaptureStd() <span class="hljs-keyword">as</span> cs: function_that_writes_to_stdout_and_stderr() <span class="hljs-built_in">print</span>(cs.err, cs.out)<!-- HTML_TAG_END --></pre></div> <p>Also, to aid debugging test issues, by default these context managers automatically replay the captured streams on exit from the context.</p> <h3 class="relative group"><a id="capturing-logger-stream" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#capturing-logger-stream"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Capturing logger stream </span></h3> <p>If you need to validate the output of a logger, you can use <code>CaptureLogger</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> logging <span class="hljs-keyword">from</span> transformers.testing_utils <span class="hljs-keyword">import</span> CaptureLogger msg = <span class="hljs-string">&quot;Testing 1, 2, 3&quot;</span> logging.set_verbosity_info() logger = logging.get_logger(<span class="hljs-string">&quot;transformers.models.bart.tokenization_bart&quot;</span>) <span class="hljs-keyword">with</span> CaptureLogger(logger) <span class="hljs-keyword">as</span> cl: logger.info(msg) <span class="hljs-keyword">assert</span> cl.out, msg + <span class="hljs-string">&quot;\n&quot;</span><!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="testing-with-environment-variables" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#testing-with-environment-variables"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Testing with environment variables </span></h3> <p>If you want to test the impact of environment variables for a specific test you can use a helper decorator <code>transformers.testing_utils.mockenv</code></p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers.testing_utils <span class="hljs-keyword">import</span> mockenv <span class="hljs-keyword">class</span> <span class="hljs-title class_">HfArgumentParserTest</span>(unittest.TestCase): <span class="hljs-meta"> @mockenv(<span class="hljs-params">TRANSFORMERS_VERBOSITY=<span class="hljs-string">&quot;error&quot;</span></span>)</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_env_override</span>(<span class="hljs-params">self</span>): env_level_str = os.getenv(<span class="hljs-string">&quot;TRANSFORMERS_VERBOSITY&quot;</span>, <span class="hljs-literal">None</span>)<!-- HTML_TAG_END --></pre></div> <p>At times an external program needs to be called, which requires setting <code>PYTHONPATH</code> in <code>os.environ</code> to include multiple local paths. A helper class <code>transformers.test_utils.TestCasePlus</code> comes to help:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers.testing_utils <span class="hljs-keyword">import</span> TestCasePlus <span class="hljs-keyword">class</span> <span class="hljs-title class_">EnvExampleTest</span>(<span class="hljs-title class_ inherited__">TestCasePlus</span>): <span class="hljs-keyword">def</span> <span class="hljs-title function_">test_external_prog</span>(<span class="hljs-params">self</span>): env = self.get_env() <span class="hljs-comment"># now call the external program, passing `env` to it</span><!-- HTML_TAG_END --></pre></div> <p>Depending on whether the test file was under the <code>tests</code> test suite or <code>examples</code> it’ll correctly set up <code>env[PYTHONPATH]</code> to include one of these two directories, and also the <code>src</code> directory to ensure the testing is done against the current repo, and finally with whatever <code>env[PYTHONPATH]</code> was already set to before the test was called if anything.</p> <p>This helper method creates a copy of the <code>os.environ</code> object, so the original remains intact.</p> <h3 class="relative group"><a id="getting-reproducible-results" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#getting-reproducible-results"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Getting reproducible results </span></h3> <p>In some situations you may want to remove randomness for your tests. To get identical reproducable results set, you will need to fix the seed:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->seed = <span class="hljs-number">42</span> <span class="hljs-comment"># python RNG</span> <span class="hljs-keyword">import</span> random random.seed(seed) <span class="hljs-comment"># pytorch RNGs</span> <span class="hljs-keyword">import</span> torch torch.manual_seed(seed) torch.backends.cudnn.deterministic = <span class="hljs-literal">True</span> <span class="hljs-keyword">if</span> torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) <span class="hljs-comment"># numpy RNG</span> <span class="hljs-keyword">import</span> numpy <span class="hljs-keyword">as</span> np np.random.seed(seed) <span class="hljs-comment"># tf RNG</span> tf.random.set_seed(seed)<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="debugging-tests" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#debugging-tests"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Debugging tests </span></h3> <p>To start a debugger at the point of the warning, do this:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pytest tests/test_logging.py -W error::UserWarning --pdb<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="working-with-github-actions-workflows" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#working-with-github-actions-workflows"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Working with github actions workflows </span></h2> <p>To trigger a self-push workflow CI job, you must:</p> <ol><li>Create a new branch on <code>transformers</code> origin (not a fork!).</li> <li>The branch name has to start with either <code>ci_</code> or <code>ci-</code> (<code>master</code> triggers it too, but we can’t do PRs on <code>master</code>). It also gets triggered only for specific paths - you can find the up-to-date definition in case it changed since this document has been written <a href="https://github.com/huggingface/transformers/blob/master/.github/workflows/self-push.yml" rel="nofollow">here</a> under <em>push:</em></li> <li>Create a PR from this branch.</li> <li>Then you can see the job appear <a href="https://github.com/huggingface/transformers/actions/workflows/self-push.yml" rel="nofollow">here</a>. It may not run right away if there is a backlog.</li></ol> <h2 class="relative group"><a id="testing-experimental-ci-features" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#testing-experimental-ci-features"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Testing Experimental CI Features </span></h2> <p>Testing CI features can be potentially problematic as it can interfere with the normal CI functioning. Therefore if a new CI feature is to be added, it should be done as following.</p> <ol><li>Create a new dedicated job that tests what needs to be tested</li> <li>The new job must always succeed so that it gives us a green ✓ (details below).</li> <li>Let it run for some days to see that a variety of different PR types get to run on it (user fork branches, non-forked branches, branches originating from github.com UI direct file edit, various forced pushes, etc. - there are so many) while monitoring the experimental job’s logs (not the overall job green as it’s purposefully always green)</li> <li>When it’s clear that everything is solid, then merge the new changes into existing jobs.</li></ol> <p>That way experiments on CI functionality itself won’t interfere with the normal workflow.</p> <p>Now how can we make the job always succeed while the new CI feature is being developed?</p> <p>Some CIs, like TravisCI support ignore-step-failure and will report the overall job as successful, but CircleCI and Github Actions as of this writing don’t support that.</p> <p>So the following workaround can be used:</p> <ol><li><code>set +euo pipefail</code> at the beginning of the run command to suppress most potential failures in the bash script.</li> <li>the last command must be a success: <code>echo &quot;done&quot;</code> or just <code>true</code> will do</li></ol> <p>Here is an example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-bullet">-</span> <span class="hljs-attr">run:</span> <span class="hljs-attr">name:</span> <span class="hljs-string">run</span> <span class="hljs-string">CI</span> <span class="hljs-string">experiment</span> <span class="hljs-attr">command:</span> <span class="hljs-string">| set +euo pipefail echo &quot;setting run-all-despite-any-errors-mode&quot; this_command_will_fail echo &quot;but bash continues to run&quot; # emulate another failure false # but the last command must be a success echo &quot;during experiment do not remove: reporting success to CI, even if there were failures&quot;</span><!-- HTML_TAG_END --></pre></div> <p>For simple commands you could also do:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->cmd_that_may_fail || <span class="hljs-literal">true</span><!-- HTML_TAG_END --></pre></div> <p>Of course, once satisfied with the results, integrate the experimental step or job with the rest of the normal jobs, while removing <code>set +euo pipefail</code> or any other things you may have added to ensure that the experimental job doesn’t interfere with the normal CI functioning.</p> <p>This whole process would have been much easier if we only could set something like <code>allow-failure</code> for the experimental step, and let it fail without impacting the overall status of PRs. But as mentioned earlier CircleCI and Github Actions don’t support it at the moment.</p> <p>You can vote for this feature and see where it is at at these CI-specific threads:</p> <ul><li><a href="https://github.com/actions/toolkit/issues/399" rel="nofollow">Github Actions:</a></li> <li><a href="https://ideas.circleci.com/ideas/CCI-I-344" rel="nofollow">CircleCI:</a></li></ul> <script type="module" data-hydrate="mg3iht"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="mg3iht"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/testing.mdx-614baedb.js") ], params: {} } }); </script>
82
0
hf_public_repos/doc-build-dev/transformers/pr_16143
hf_public_repos/doc-build-dev/transformers/pr_16143/en/notebooks.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;transformers-notebooks&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;hugging-faces-notebooks&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;documentation-notebooks&quot;,&quot;title&quot;:&quot;Documentation notebooks&quot;},{&quot;local&quot;:&quot;pytorch-examples&quot;,&quot;title&quot;:&quot;PyTorch Examples&quot;},{&quot;local&quot;:&quot;tensorflow-examples&quot;,&quot;title&quot;:&quot;TensorFlow Examples&quot;},{&quot;local&quot;:&quot;optimum-notebooks&quot;,&quot;title&quot;:&quot;Optimum notebooks&quot;}],&quot;title&quot;:&quot;Hugging Face&#39;s notebooks 🤗&quot;},{&quot;local&quot;:&quot;community-notebooks&quot;,&quot;title&quot;:&quot;Community notebooks:&quot;}],&quot;title&quot;:&quot;🤗 Transformers Notebooks&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/notebooks.mdx-229c9f2b.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <h1 class="relative group"><a id="transformers-notebooks" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers-notebooks"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>🤗 Transformers Notebooks </span></h1> <p>You can find here a list of the official notebooks provided by Hugging Face.</p> <p>Also, we would like to list here interesting content created by the community. If you wrote some notebook(s) leveraging 🤗 Transformers and would like be listed here, please open a Pull Request so it can be included under the Community notebooks. </p> <h2 class="relative group"><a id="hugging-faces-notebooks" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#hugging-faces-notebooks"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Hugging Face&#39;s notebooks 🤗 </span></h2> <h3 class="relative group"><a id="documentation-notebooks" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#documentation-notebooks"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Documentation notebooks </span></h3> <p>You can open any page of the documentation as a notebook in colab (there is a button directly on said pages) but they are also listed here if you need to:</p> <table><thead><tr><th align="left">Notebook</th> <th align="left">Description</th> <th align="left"></th> <th align="right"></th></tr></thead> <tbody><tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/transformers_doc/quicktour.ipynb" rel="nofollow">Quicktour of the library</a></td> <td align="left">A presentation of the various APIs in Transformers</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/quicktour.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/quicktour.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/transformers_doc/task_summary.ipynb" rel="nofollow">Summary of the tasks</a></td> <td align="left">How to run the models of the Transformers library task by task</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/task_summary.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/task_summary.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/transformers_doc/preprocessing.ipynb" rel="nofollow">Preprocessing data</a></td> <td align="left">How to use a tokenizer to preprocess your data</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/preprocessing.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/preprocessing.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/transformers_doc/training.ipynb" rel="nofollow">Fine-tuning a pretrained model</a></td> <td align="left">How to use the Trainer to fine-tune a pretrained model</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/training.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/training.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/transformers_doc/tokenizer_summary.ipynb" rel="nofollow">Summary of the tokenizers</a></td> <td align="left">The differences between the tokenizers algorithm</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/tokenizer_summary.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/tokenizer_summary.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/transformers_doc/multilingual.ipynb" rel="nofollow">Multilingual models</a></td> <td align="left">How to use the multilingual models of the library</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/multilingual.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/multilingual.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/transformers_doc/custom_datasets.ipynb" rel="nofollow">Fine-tuning with custom datasets</a></td> <td align="left">How to fine-tune a pretrained model on various tasks</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/custom_datasets.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/transformers_doc/custom_datasets.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr></tbody></table> <h3 class="relative group"><a id="pytorch-examples" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#pytorch-examples"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PyTorch Examples </span></h3> <table><thead><tr><th align="left">Notebook</th> <th align="left">Description</th> <th align="left"></th> <th align="right"></th></tr></thead> <tbody><tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/examples/tokenizer_training.ipynb" rel="nofollow">Train your tokenizer</a></td> <td align="left">How to train and use your very own tokenizer</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/tokenizer_training.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/tokenizer_training.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/examples/language_modeling_from_scratch.ipynb" rel="nofollow">Train your language model</a></td> <td align="left">How to easily start using transformers</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/language_modeling_from_scratch.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/language_modeling_from_scratch.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/examples/text_classification.ipynb" rel="nofollow">How to fine-tune a model on text classification</a></td> <td align="left">Show how to preprocess the data and fine-tune a pretrained model on any GLUE task.</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/text_classification.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/text_classification.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/examples/language_modeling.ipynb" rel="nofollow">How to fine-tune a model on language modeling</a></td> <td align="left">Show how to preprocess the data and fine-tune a pretrained model on a causal or masked LM task.</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/language_modeling.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/language_modeling.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/examples/token_classification.ipynb" rel="nofollow">How to fine-tune a model on token classification</a></td> <td align="left">Show how to preprocess the data and fine-tune a pretrained model on a token classification task (NER, PoS).</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/token_classification.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/token_classification.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/examples/question_answering.ipynb" rel="nofollow">How to fine-tune a model on question answering</a></td> <td align="left">Show how to preprocess the data and fine-tune a pretrained model on SQUAD.</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/question_answering.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/question_answering.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/examples/multiple_choice.ipynb" rel="nofollow">How to fine-tune a model on multiple choice</a></td> <td align="left">Show how to preprocess the data and fine-tune a pretrained model on SWAG.</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/multiple_choice.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/multiple_choice.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/examples/translation.ipynb" rel="nofollow">How to fine-tune a model on translation</a></td> <td align="left">Show how to preprocess the data and fine-tune a pretrained model on WMT.</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/translation.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/translation.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/examples/summarization.ipynb" rel="nofollow">How to fine-tune a model on summarization</a></td> <td align="left">Show how to preprocess the data and fine-tune a pretrained model on XSUM.</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/summarization.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/summarization.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/examples/speech_recognition.ipynb" rel="nofollow">How to fine-tune a speech recognition model in English</a></td> <td align="left">Show how to preprocess the data and fine-tune a pretrained Speech model on TIMIT</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/speech_recognition.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/speech_recognition.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/examples/multi_lingual_speech_recognition.ipynb" rel="nofollow">How to fine-tune a speech recognition model in any language</a></td> <td align="left">Show how to preprocess the data and fine-tune a multi-lingually pretrained speech model on Common Voice</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/multi_lingual_speech_recognition.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/multi_lingual_speech_recognition.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/examples/audio_classification.ipynb" rel="nofollow">How to fine-tune a model on audio classification</a></td> <td align="left">Show how to preprocess the data and fine-tune a pretrained Speech model on Keyword Spotting</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/audio_classification.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/audio_classification.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/blog/blob/master/notebooks/01_how_to_train.ipynb" rel="nofollow">How to train a language model from scratch</a></td> <td align="left">Highlight all the steps to effectively train Transformer model on custom data</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/blog/blob/master/notebooks/01_how_to_train.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/blog/blob/master/notebooks/01_how_to_train.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/blog/blob/master/notebooks/02_how_to_generate.ipynb" rel="nofollow">How to generate text</a></td> <td align="left">How to use different decoding methods for language generation with transformers</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/blog/blob/master/notebooks/02_how_to_generate.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/blog/blob/master/notebooks/02_how_to_generate.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/examples/onnx-export.ipynb" rel="nofollow">How to export model to ONNX</a></td> <td align="left">Highlight how to export and run inference workloads through ONNX</td> <td align="left"></td> <td align="right"></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/examples/benchmark.ipynb" rel="nofollow">How to use Benchmarks</a></td> <td align="left">How to benchmark models with transformers</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/benchmark.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/benchmark.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/blog/blob/master/notebooks/03_reformer.ipynb" rel="nofollow">Reformer</a></td> <td align="left">How Reformer pushes the limits of language modeling</td> <td align="left"><a href="https://colab.research.google.com/github/patrickvonplaten/blog/blob/master/notebooks/03_reformer.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/patrickvonplaten/blog/blob/master/notebooks/03_reformer.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/examples/image_classification.ipynb" rel="nofollow">How to fine-tune a model on image classification</a></td> <td align="left">Show how to preprocess the data and fine-tune any pretrained Vision model on Image Classification</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/image_classification.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/image_classification.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr></tbody></table> <h3 class="relative group"><a id="tensorflow-examples" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#tensorflow-examples"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TensorFlow Examples </span></h3> <table><thead><tr><th align="left">Notebook</th> <th align="left">Description</th> <th align="left"></th> <th align="right"></th></tr></thead> <tbody><tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/examples/tokenizer_training.ipynb" rel="nofollow">Train your tokenizer</a></td> <td align="left">How to train and use your very own tokenizer</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/tokenizer_training.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/tokenizer_training.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/examples/language_modeling_from_scratch-tf.ipynb" rel="nofollow">Train your language model</a></td> <td align="left">How to easily start using transformers</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/language_modeling_from_scratch-tf.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/language_modeling_from_scratch-tf.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/examples/text_classification-tf.ipynb" rel="nofollow">How to fine-tune a model on text classification</a></td> <td align="left">Show how to preprocess the data and fine-tune a pretrained model on any GLUE task.</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/text_classification-tf.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/text_classification-tf.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/examples/language_modeling-tf.ipynb" rel="nofollow">How to fine-tune a model on language modeling</a></td> <td align="left">Show how to preprocess the data and fine-tune a pretrained model on a causal or masked LM task.</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/language_modeling-tf.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/language_modeling-tf.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/examples/token_classification-tf.ipynb" rel="nofollow">How to fine-tune a model on token classification</a></td> <td align="left">Show how to preprocess the data and fine-tune a pretrained model on a token classification task (NER, PoS).</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/token_classification-tf.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/token_classification-tf.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/examples/question_answering-tf.ipynb" rel="nofollow">How to fine-tune a model on question answering</a></td> <td align="left">Show how to preprocess the data and fine-tune a pretrained model on SQUAD.</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/question_answering-tf.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/question_answering-tf.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/examples/multiple_choice-tf.ipynb" rel="nofollow">How to fine-tune a model on multiple choice</a></td> <td align="left">Show how to preprocess the data and fine-tune a pretrained model on SWAG.</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/multiple_choice-tf.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/multiple_choice-tf.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/examples/translation-tf.ipynb" rel="nofollow">How to fine-tune a model on translation</a></td> <td align="left">Show how to preprocess the data and fine-tune a pretrained model on WMT.</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/translation-tf.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/translation-tf.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/examples/summarization-tf.ipynb" rel="nofollow">How to fine-tune a model on summarization</a></td> <td align="left">Show how to preprocess the data and fine-tune a pretrained model on XSUM.</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/summarization-tf.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/summarization-tf.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr></tbody></table> <h3 class="relative group"><a id="optimum-notebooks" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#optimum-notebooks"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Optimum notebooks </span></h3> <p>🤗 <a href="https://github.com/huggingface/optimum" rel="nofollow">Optimum</a> is an extension of 🤗 Transformers, providing a set of performance optimization tools enabling maximum efficiency to train and run models on targeted hardwares.</p> <table><thead><tr><th align="left">Notebook</th> <th align="left">Description</th> <th align="left"></th> <th align="right"></th></tr></thead> <tbody><tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/examples/text_classification_quantization_ort.ipynb" rel="nofollow">How to quantize a model with ONNX Runtime for text classification</a></td> <td align="left">Show how to apply static and dynamic quantization on a model using <a href="https://github.com/microsoft/onnxruntime" rel="nofollow">ONNX Runtime</a> for any GLUE task.</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/text_classification_quantization_ort.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/text_classification_quantization_ort.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr> <tr><td align="left"><a href="https://github.com/huggingface/notebooks/blob/master/examples/text_classification_quantization_inc.ipynb" rel="nofollow">How to quantize a model with Intel Neural Compressor for text classification</a></td> <td align="left">Show how to apply static, dynamic and aware training quantization on a model using <a href="https://github.com/intel/neural-compressor" rel="nofollow">Intel Neural Compressor (INC)</a> for any GLUE task.</td> <td align="left"><a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/text_classification_quantization_inc.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"></a></td> <td align="right"><a href="https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/examples/text_classification_quantization_inc.ipynb" rel="nofollow"><img src="https://studiolab.sagemaker.aws/studiolab.svg" alt="Open in AWS Studio"></a></td></tr></tbody></table> <h2 class="relative group"><a id="community-notebooks" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#community-notebooks"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Community notebooks: </span></h2> <p>More notebooks developed by the community are available <a href="community#community-notebooks">here</a>.</p> <script type="module" data-hydrate="1n3qsh1"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1n3qsh1"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/notebooks.mdx-229c9f2b.js") ], params: {} } }); </script>
83
0
hf_public_repos/doc-build-dev/transformers/pr_16143
hf_public_repos/doc-build-dev/transformers/pr_16143/en/multilingual.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;multilingual-models-for-inference&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;xlm&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;xlm-with-language-embeddings&quot;,&quot;title&quot;:&quot;XLM with language embeddings&quot;},{&quot;local&quot;:&quot;xlm-without-language-embeddings&quot;,&quot;title&quot;:&quot;XLM without language embeddings&quot;}],&quot;title&quot;:&quot;XLM&quot;},{&quot;local&quot;:&quot;bert&quot;,&quot;title&quot;:&quot;BERT&quot;},{&quot;local&quot;:&quot;xlmroberta&quot;,&quot;title&quot;:&quot;XLM-RoBERTa&quot;},{&quot;local&quot;:&quot;m2m100&quot;,&quot;title&quot;:&quot;M2M100&quot;},{&quot;local&quot;:&quot;mbart&quot;,&quot;title&quot;:&quot;MBart&quot;}],&quot;title&quot;:&quot;Multilingual models for inference&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/multilingual.mdx-0feae7c7.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/DocNotebookDropdown-ecff2a90.js"> <h1 class="relative group"><a id="multilingual-models-for-inference" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#multilingual-models-for-inference"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Multilingual models for inference </span></h1> <div class="flex space-x-1 absolute z-10 right-0 top-0"><div class="relative colab-dropdown "> <button class=" " type="button"> <img alt="Open In Colab" class="!m-0" src="https://colab.research.google.com/assets/colab-badge.svg"> </button> </div> <div class="relative colab-dropdown "> <button class=" " type="button"> <img alt="Open In Studio Lab" class="!m-0" src="https://studiolab.sagemaker.aws/studiolab.svg"> </button> </div></div> <p>There are several multilingual models in 🤗 Transformers, and their inference usage differs from monolingual models. Not <em>all</em> multilingual model usage is different though. Some models, like <a href="https://huggingface.co/bert-base-multilingual-uncased" rel="nofollow">bert-base-multilingual-uncased</a>, can be used just like a monolingual model. This guide will show you how to use multilingual models whose usage differs for inference.</p> <h2 class="relative group"><a id="xlm" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#xlm"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XLM </span></h2> <p>XLM has ten different checkpoints, only one of which is monolingual. The nine remaining model checkpoints can be split into two categories: the checkpoints that use language embeddings and those that don’t.</p> <h3 class="relative group"><a id="xlm-with-language-embeddings" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#xlm-with-language-embeddings"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XLM with language embeddings </span></h3> <p>The following XLM models use language embeddings to specify the language used at inference:</p> <ul><li><code>xlm-mlm-ende-1024</code> (Masked language modeling, English-German)</li> <li><code>xlm-mlm-enfr-1024</code> (Masked language modeling, English-French)</li> <li><code>xlm-mlm-enro-1024</code> (Masked language modeling, English-Romanian)</li> <li><code>xlm-mlm-xnli15-1024</code> (Masked language modeling, XNLI languages)</li> <li><code>xlm-mlm-tlm-xnli15-1024</code> (Masked language modeling + translation, XNLI languages)</li> <li><code>xlm-clm-enfr-1024</code> (Causal language modeling, English-French)</li> <li><code>xlm-clm-ende-1024</code> (Causal language modeling, English-German)</li></ul> <p>Language embeddings are represented as a tensor of the same shape as the <code>input_ids</code> passed to the model. The values in these tensors depend on the language used and are identified by the tokenizer’s <code>lang2id</code> and <code>id2lang</code> attributes.</p> <p>In this example, load the <code>xlm-clm-enfr-1024</code> checkpoint (Causal language modeling, English-French):</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMTokenizer, XLMWithLMHeadModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMTokenizer.from_pretrained(<span class="hljs-string">&quot;xlm-clm-enfr-1024&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMWithLMHeadModel.from_pretrained(<span class="hljs-string">&quot;xlm-clm-enfr-1024&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>The <code>lang2id</code> attribute of the tokenizer displays this model’s languages and their ids:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer.lang2id) {<span class="hljs-string">&#x27;en&#x27;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&#x27;fr&#x27;</span>: <span class="hljs-number">1</span>}<!-- HTML_TAG_END --></pre></div> <p>Next, create an example input:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.tensor([tokenizer.encode(<span class="hljs-string">&quot;Wikipedia was used to&quot;</span>)]) <span class="hljs-comment"># batch size of 1</span><!-- HTML_TAG_END --></pre></div> <p>Set the language id as <code>&quot;en&quot;</code> and use it to define the language embedding. The language embedding is a tensor filled with <code>0</code> since that is the language id for English. This tensor should be the same size as <code>input_ids</code>. </p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>language_id = tokenizer.lang2id[<span class="hljs-string">&quot;en&quot;</span>] <span class="hljs-comment"># 0</span> <span class="hljs-meta">&gt;&gt;&gt; </span>langs = torch.tensor([language_id] * input_ids.shape[<span class="hljs-number">1</span>]) <span class="hljs-comment"># torch.tensor([0, 0, 0, ..., 0])</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># We reshape it to be of size (batch_size, sequence_length)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>langs = langs.view(<span class="hljs-number">1</span>, -<span class="hljs-number">1</span>) <span class="hljs-comment"># is now of shape [1, sequence_length] (we have a batch size of 1)</span><!-- HTML_TAG_END --></pre></div> <p>Now you can pass the <code>input_ids</code> and language embedding to the model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids, langs=langs)<!-- HTML_TAG_END --></pre></div> <p>The <a href="https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-generation/run_generation.py" rel="nofollow">run_generation.py</a> script can generate text with language embeddings using the <code>xlm-clm</code> checkpoints.</p> <h3 class="relative group"><a id="xlm-without-language-embeddings" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#xlm-without-language-embeddings"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XLM without language embeddings </span></h3> <p>The following XLM models do not require language embeddings during inference:</p> <ul><li><code>xlm-mlm-17-1280</code> (Masked language modeling, 17 languages)</li> <li><code>xlm-mlm-100-1280</code> (Masked language modeling, 100 languages)</li></ul> <p>These models are used for generic sentence representations, unlike the previous XLM checkpoints.</p> <h2 class="relative group"><a id="bert" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#bert"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BERT </span></h2> <p>The following BERT models can be used for multilingual tasks:</p> <ul><li><code>bert-base-multilingual-uncased</code> (Masked language modeling + Next sentence prediction, 102 languages)</li> <li><code>bert-base-multilingual-cased</code> (Masked language modeling + Next sentence prediction, 104 languages)</li></ul> <p>These models do not require language embeddings during inference. They should identify the language from the context and infer accordingly.</p> <h2 class="relative group"><a id="xlmroberta" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#xlmroberta"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XLM-RoBERTa </span></h2> <p>The following XLM-RoBERTa models can be used for multilingual tasks:</p> <ul><li><code>xlm-roberta-base</code> (Masked language modeling, 100 languages)</li> <li><code>xlm-roberta-large</code> (Masked language modeling, 100 languages)</li></ul> <p>XLM-RoBERTa was trained on 2.5TB of newly created and cleaned CommonCrawl data in 100 languages. It provides strong gains over previously released multilingual models like mBERT or XLM on downstream tasks like classification, sequence labeling, and question answering.</p> <h2 class="relative group"><a id="m2m100" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#m2m100"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>M2M100 </span></h2> <p>The following M2M100 models can be used for multilingual translation:</p> <ul><li><code>facebook/m2m100_418M</code> (Translation)</li> <li><code>facebook/m2m100_1.2B</code> (Translation)</li></ul> <p>In this example, load the <code>facebook/m2m100_418M</code> checkpoint to translate from Chinese to English. You can set the source language in the tokenizer:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> M2M100ForConditionalGeneration, M2M100Tokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>en_text = <span class="hljs-string">&quot;Do not meddle in the affairs of wizards, for they are subtle and quick to anger.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>chinese_text = <span class="hljs-string">&quot;不要插手巫師的事務, 因為他們是微妙的, 很快就會發怒.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = M2M100Tokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/m2m100_418M&quot;</span>, src_lang=<span class="hljs-string">&quot;zh&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = M2M100ForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/m2m100_418M&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Tokenize the text:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>encoded_zh = tokenizer(chinese_text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>M2M100 forces the target language id as the first generated token to translate to the target language. Set the <code>forced_bos_token_id</code> to <code>en</code> in the <code>generate</code> method to translate to English:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>generated_tokens = model.generate(**encoded_zh, forced_bos_token_id=tokenizer.get_lang_id(<span class="hljs-string">&quot;en&quot;</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(generated_tokens, skip_special_tokens=<span class="hljs-literal">True</span>) <span class="hljs-string">&#x27;Do not interfere with the matters of the witches, because they are delicate and will soon be angry.&#x27;</span><!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="mbart" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#mbart"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MBart </span></h2> <p>The following MBart models can be used for multilingual translation:</p> <ul><li><code>facebook/mbart-large-50-one-to-many-mmt</code> (One-to-many multilingual machine translation, 50 languages)</li> <li><code>facebook/mbart-large-50-many-to-many-mmt</code> (Many-to-many multilingual machine translation, 50 languages)</li> <li><code>facebook/mbart-large-50-many-to-one-mmt</code> (Many-to-one multilingual machine translation, 50 languages)</li> <li><code>facebook/mbart-large-50</code> (Multilingual translation, 50 languages)</li> <li><code>facebook/mbart-large-cc25</code></li></ul> <p>In this example, load the <code>facebook/mbart-large-50-many-to-many-mmt</code> checkpoint to translate Finnish to English. You can set the source language in the tokenizer:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForSeq2SeqLM <span class="hljs-meta">&gt;&gt;&gt; </span>en_text = <span class="hljs-string">&quot;Do not meddle in the affairs of wizards, for they are subtle and quick to anger.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>fi_text = <span class="hljs-string">&quot;Älä sekaannu velhojen asioihin, sillä ne ovat hienovaraisia ja nopeasti vihaisia.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-50-many-to-many-mmt&quot;</span>, src_lang=<span class="hljs-string">&quot;fi_FI&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-50-many-to-many-mmt&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Tokenize the text:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>encoded_en = tokenizer(en_text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>MBart forces the target language id as the first generated token to translate to the target language. Set the <code>forced_bos_token_id</code> to <code>en</code> in the <code>generate</code> method to translate to English:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>generated_tokens = model.generate(**encoded_en, forced_bos_token_id=tokenizer.lang_code_to_id(<span class="hljs-string">&quot;en_XX&quot;</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(generated_tokens, skip_special_tokens=<span class="hljs-literal">True</span>) <span class="hljs-string">&quot;Don&#x27;t interfere with the wizard&#x27;s affairs, because they are subtle, will soon get angry.&quot;</span><!-- HTML_TAG_END --></pre></div> <p>If you are using the <code>facebook/mbart-large-50-many-to-one-mmt</code> checkpoint, you don’t need to force the target language id as the first generated token otherwise the usage is the same.</p> <script type="module" data-hydrate="lx0noq"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="lx0noq"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/multilingual.mdx-0feae7c7.js") ], params: {} } }); </script>
84
0
hf_public_repos/doc-build-dev/transformers/pr_16143
hf_public_repos/doc-build-dev/transformers/pr_16143/en/pipeline_tutorial.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;pipelines-for-inference&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;pipeline-usage&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;choose-a-model-and-tokenizer&quot;,&quot;title&quot;:&quot;Choose a model and tokenizer&quot;}],&quot;title&quot;:&quot;Pipeline usage&quot;},{&quot;local&quot;:&quot;audio-pipeline&quot;,&quot;title&quot;:&quot;Audio pipeline&quot;},{&quot;local&quot;:&quot;vision-pipeline&quot;,&quot;title&quot;:&quot;Vision pipeline&quot;}],&quot;title&quot;:&quot;Pipelines for inference&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/pipeline_tutorial.mdx-8f24abfb.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="pipelines-for-inference" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#pipelines-for-inference"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Pipelines for inference </span></h1> <p>The <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> makes it simple to use any model from the <a href="https://huggingface.co/models" rel="nofollow">Model Hub</a> for inference on a variety of tasks such as text generation, image segmentation and audio classification. Even if you don’t have experience with a specific modality or understand the code powering the models, you can still use them with the <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a>! This tutorial will teach you to:</p> <ul><li>Use a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> for inference.</li> <li>Use a specific tokenizer or model.</li> <li>Use a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> for audio and vision tasks.</li></ul> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Take a look at the <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> documentation for a complete list of supported taska.</p></div> <h2 class="relative group"><a id="pipeline-usage" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#pipeline-usage"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Pipeline usage </span></h2> <p>While each task has an associated <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a>, it is simpler to use the general <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> abstraction which contains all the specific task pipelines. The <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> automatically loads a default model and tokenizer capable of inference for your task. </p> <ol><li>Start by creating a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> and specify an inference task:</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>generator = pipeline(task=<span class="hljs-string">&quot;text-generation&quot;</span>)<!-- HTML_TAG_END --></pre></div> <ol start="2"><li>Pass your input text to the <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a>:</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>generator(<span class="hljs-string">&quot;Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone&quot;</span>) [{<span class="hljs-string">&#x27;generated_text&#x27;</span>: <span class="hljs-string">&#x27;Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone, Seven for the Iron-priests at the door to the east, and thirteen for the Lord Kings at the end of the mountain&#x27;</span>}]<!-- HTML_TAG_END --></pre></div> <p>If you have more than one input, pass your input as a list:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>generator( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Nine for Mortal Men, doomed to die, One for the Dark Lord on his dark throne&quot;</span>, <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Any additional parameters for your task can also be included in the <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a>. The <code>text-generation</code> task has a <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate">generate()</a> method with several parameters for controlling the output. For example, if you want to generate more than one output, set the <code>num_return_sequences</code> parameter:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>generator( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone&quot;</span>, <span class="hljs-meta">... </span> num_return_sequences=<span class="hljs-number">2</span>, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="choose-a-model-and-tokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#choose-a-model-and-tokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Choose a model and tokenizer </span></h3> <p>The <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> accepts any model from the <a href="https://huggingface.co/models" rel="nofollow">Model Hub</a>. There are tags on the Model Hub that allow you to filter for a model you’d like to use for your task. Once you’ve picked an appropriate model, load it with the corresponding <code>AutoModelFor</code> and [`AutoTokenizer’] class. For example, load the <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForCausalLM">AutoModelForCausalLM</a> class for a causal language modeling task:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Create a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> for your task, and specify the model and tokenizer you’ve loaded:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>generator = pipeline(task=<span class="hljs-string">&quot;text-generation&quot;</span>, model=model, tokenizer=tokenizer)<!-- HTML_TAG_END --></pre></div> <p>Pass your input text to the <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> to generate some text:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>generator(<span class="hljs-string">&quot;Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone&quot;</span>) [{<span class="hljs-string">&#x27;generated_text&#x27;</span>: <span class="hljs-string">&#x27;Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone, Seven for the Dragon-lords (for them to rule in a world ruled by their rulers, and all who live within the realm&#x27;</span>}]<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="audio-pipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#audio-pipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Audio pipeline </span></h2> <p>The flexibility of the <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> means it can also be extended to audio tasks.</p> <p>For example, let’s classify the emotion from a short clip of John F. Kennedy’s famous <a href="https://en.wikipedia.org/wiki/We_choose_to_go_to_the_Moon" rel="nofollow">“We choose to go to the Moon”</a> speech. Find an <a href="https://huggingface.co/models?pipeline_tag=audio-classification" rel="nofollow">audio classification</a> model on the Model Hub for emotion recognition and load it in the <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>audio_classifier = pipeline( <span class="hljs-meta">... </span> task=<span class="hljs-string">&quot;audio-classification&quot;</span>, model=<span class="hljs-string">&quot;ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition&quot;</span> <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Pass the audio file to the <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>audio_classifier(<span class="hljs-string">&quot;jfk_moon_speech.wav&quot;</span>) [{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;calm&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.13856211304664612</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;disgust&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.13148026168346405</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;happy&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.12635163962841034</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;angry&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.12439591437578201</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;fearful&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.12404385954141617</span>}]<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="vision-pipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#vision-pipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Vision pipeline </span></h2> <p>Finally, using a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> for vision tasks is practically identical.</p> <p>Specify your vision task and pass your image to the classifier. The imaage can be a link or a local path to the image. For example, what species of cat is shown below?</p> <p><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" alt="pipeline-cat-chonk"></p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>vision_classifier = pipeline(task=<span class="hljs-string">&quot;image-classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>vision_classifier( <span class="hljs-meta">... </span> images=<span class="hljs-string">&quot;https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg&quot;</span> <span class="hljs-meta">... </span>) [{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;lynx, catamount&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.4403027892112732</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;cougar, puma, catamount, mountain lion, painter, panther, Felis concolor&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.03433405980467796</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;snow leopard, ounce, Panthera uncia&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.032148055732250214</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;Egyptian cat&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.02353910356760025</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;tiger cat&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.023034192621707916</span>}]<!-- HTML_TAG_END --></pre></div> <script type="module" data-hydrate="ne9jy"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="ne9jy"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/pipeline_tutorial.mdx-8f24abfb.js") ], params: {} } }); </script>
85
0
hf_public_repos/doc-build-dev/transformers/pr_16143
hf_public_repos/doc-build-dev/transformers/pr_16143/en/run_scripts.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;train-with-a-script&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;setup&quot;,&quot;title&quot;:&quot;Setup&quot;},{&quot;local&quot;:&quot;run-a-script&quot;,&quot;title&quot;:&quot;Run a script&quot;},{&quot;local&quot;:&quot;distributed-training-and-mixed-precision&quot;,&quot;title&quot;:&quot;Distributed training and mixed precision&quot;},{&quot;local&quot;:&quot;run-a-script-on-a-tpu&quot;,&quot;title&quot;:&quot;Run a script on a TPU&quot;},{&quot;local&quot;:&quot;run-a-script-with-accelerate&quot;,&quot;title&quot;:&quot;Run a script with 🤗 Accelerate&quot;},{&quot;local&quot;:&quot;use-a-custom-dataset&quot;,&quot;title&quot;:&quot;Use a custom dataset&quot;},{&quot;local&quot;:&quot;test-a-script&quot;,&quot;title&quot;:&quot;Test a script&quot;},{&quot;local&quot;:&quot;resume-training-from-checkpoint&quot;,&quot;title&quot;:&quot;Resume training from checkpoint&quot;},{&quot;local&quot;:&quot;share-your-model&quot;,&quot;title&quot;:&quot;Share your model&quot;}],&quot;title&quot;:&quot;Train with a script&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/run_scripts.mdx-dea66c26.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlockFw-27a176a0.js"> <h1 class="relative group"><a id="train-with-a-script" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#train-with-a-script"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Train with a script </span></h1> <p>Along with the 🤗 Transformers <a href="./noteboks/README">notebooks</a>, there are also example scripts demonstrating how to train a model for a task with <a href="https://github.com/huggingface/transformers/tree/master/examples/pytorch" rel="nofollow">PyTorch</a>, <a href="https://github.com/huggingface/transformers/tree/master/examples/tensorflow" rel="nofollow">TensorFlow</a>, or <a href="https://github.com/huggingface/transformers/tree/master/examples/flax" rel="nofollow">JAX/Flax</a>.</p> <p>You will also find scripts we’ve used in our <a href="https://github.com/huggingface/transformers/tree/master/examples/research_projects" rel="nofollow">research projects</a> and <a href="https://github.com/huggingface/transformers/tree/master/examples/legacy" rel="nofollow">legacy examples</a> which are mostly community contributed. These scripts are not actively maintained and require a specific version of 🤗 Transformers that will most likely be incompatible with the latest version of the library.</p> <p>The example scripts are not expected to work out-of-the-box on every problem, and you may need to adapt the script to the problem you’re trying to solve. To help you with this, most of the scripts fully expose how data is preprocessed, allowing you to edit it as necessary for your use case.</p> <p>For any feature you’d like to implement in an example script, please discuss it on the <a href="https://discuss.huggingface.co/" rel="nofollow">forum</a> or in an <a href="https://github.com/huggingface/transformers/issues" rel="nofollow">issue</a> before submitting a Pull Request. While we welcome bug fixes, it is unlikely we will merge a Pull Request that adds more functionality at the cost of readability.</p> <p>This guide will show you how to run an example summarization training script in <a href="https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization" rel="nofollow">PyTorch</a> and <a href="https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization" rel="nofollow">TensorFlow</a>. All examples are expected to work with both frameworks unless otherwise specified.</p> <h2 class="relative group"><a id="setup" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#setup"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Setup </span></h2> <p>To successfully run the latest version of the example scripts, you have to <strong>install 🤗 Transformers from source</strong> in a new virtual environment:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->git <span class="hljs-built_in">clone</span> https://github.com/huggingface/transformers <span class="hljs-built_in">cd</span> transformers pip install .<!-- HTML_TAG_END --></pre></div> <p>For older versions of the example scripts, click on the toggle below:</p> <details><summary>Examples for older versions of 🤗 Transformers</summary> <ul><li><a href="https://github.com/huggingface/transformers/tree/v4.5.1/examples">v4.5.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.4.2/examples">v4.4.2</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.3.3/examples">v4.3.3</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.2.2/examples">v4.2.2</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.1.1/examples">v4.1.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.0.1/examples">v4.0.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.5.1/examples">v3.5.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.4.0/examples">v3.4.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.3.1/examples">v3.3.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.2.0/examples">v3.2.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.1.0/examples">v3.1.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.0.2/examples">v3.0.2</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.11.0/examples">v2.11.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.10.0/examples">v2.10.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.9.1/examples">v2.9.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.8.0/examples">v2.8.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.7.0/examples">v2.7.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.6.0/examples">v2.6.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.5.1/examples">v2.5.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.4.0/examples">v2.4.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.3.0/examples">v2.3.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.2.0/examples">v2.2.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.1.0/examples">v2.1.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.0.0/examples">v2.0.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v1.2.0/examples">v1.2.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v1.1.0/examples">v1.1.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v1.0.0/examples">v1.0.0</a></li></ul></details> <p>Then switch your current clone of 🤗 Transformers to a specific version, like v3.5.1 for example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->git checkout tags/v3.5.1<!-- HTML_TAG_END --></pre></div> <p>After you’ve setup the correct library version, navigate to the example folder of your choice and install the example specific requirements:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install -r requirements.txt<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="run-a-script" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#run-a-script"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Run a script </span></h2> <p>The example script downloads and preprocesses a dataset from the 🤗 <a href="https://huggingface.co/docs/datasets/" rel="nofollow">Datasets</a> library. Then the script fine-tunes a dataset with the <a href="https://huggingface.co/docs/transformers/main_classes/trainer" rel="nofollow">Trainer</a> on an architecture that supports summarization. The following example shows how to fine-tune <a href="https://huggingface.co/t5-small" rel="nofollow">T5-small</a> on the <a href="https://huggingface.co/datasets/cnn_dailymail" rel="nofollow">CNN/DailyMail</a> dataset. The T5 model requires an additional <code>source_prefix</code> argument due to how it was trained. This prompt lets T5 know this is a summarization task.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="Copy code excerpt to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><div><div class="bg-white leading-none border border-gray-100 rounded-lg inline-flex p-0.5 text-sm mb-4 select-none"><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-l false"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <p class="!m-0 ">Pytorch</p> </button><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-r text-gray-500 filter grayscale"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <p class="!m-0 ">TensorFlow</p> </button></div></div><!-- HTML_TAG_START -->python examples/pytorch/summarization/run_summarization.py \ --model_name_or_path t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config <span class="hljs-string">&quot;3.0.0&quot;</span> \ --source_prefix <span class="hljs-string">&quot;summarize: &quot;</span> \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="distributed-training-and-mixed-precision" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#distributed-training-and-mixed-precision"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Distributed training and mixed precision </span></h2> <p>The <a href="https://huggingface.co/docs/transformers/main_classes/trainer" rel="nofollow">Trainer</a> supports distributed training and mixed precision, which means you can also use it in a script. To enable both of these features:</p> <ul><li>Add the <code>fp16</code> argument to enable mixed precision.</li> <li>Set the number of GPUs to use with the <code>nproc_per_node</code> argument.</li></ul> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -m torch.distributed.launch \ --nproc_per_node 8 pytorch/summarization/run_summarization.py \ --fp16 \ --model_name_or_path t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config <span class="hljs-string">&quot;3.0.0&quot;</span> \ --source_prefix <span class="hljs-string">&quot;summarize: &quot;</span> \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate<!-- HTML_TAG_END --></pre></div> <p>TensorFlow scripts utilize a <a href="https://www.tensorflow.org/guide/distributed_training#mirroredstrategy" rel="nofollow"><code>MirroredStrategy</code></a> for distributed training, and you don’t need to add any additional arguments to the training script. The TensorFlow script will use multiple GPUs by default if they are available.</p> <h2 class="relative group"><a id="run-a-script-on-a-tpu" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#run-a-script-on-a-tpu"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Run a script on a TPU </span></h2> <p>Tensor Processing Units (TPUs) are specifically designed to accelerate performance. PyTorch supports TPUs with the <a href="https://www.tensorflow.org/xla" rel="nofollow">XLA</a> deep learning compiler (see <a href="https://github.com/pytorch/xla/blob/master/README.md" rel="nofollow">here</a> for more details). To use a TPU, launch the <code>xla_spawn.py</code> script and use the <code>num_cores</code> argument to set the number of TPU cores you want to use.</p> <p>TensorFlow scripts utilize a <a href="https://www.tensorflow.org/guide/distributed_training#tpustrategy" rel="nofollow"><code>TPUStrategy</code></a> for training on TPUs. To use a TPU, pass the name of the TPU resource to the <code>tpu</code> argument.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="Copy code excerpt to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><div><div class="bg-white leading-none border border-gray-100 rounded-lg inline-flex p-0.5 text-sm mb-4 select-none"><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-l false"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <p class="!m-0 ">Pytorch</p> </button><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-r text-gray-500 filter grayscale"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <p class="!m-0 ">TensorFlow</p> </button></div></div><!-- HTML_TAG_START -->python xla_spawn.py --num_cores 8 \ summarization/run_summarization.py \ --model_name_or_path t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config <span class="hljs-string">&quot;3.0.0&quot;</span> \ --source_prefix <span class="hljs-string">&quot;summarize: &quot;</span> \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="run-a-script-with-accelerate" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#run-a-script-with-accelerate"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Run a script with 🤗 Accelerate </span></h2> <p>🤗 <a href="https://huggingface.co/docs/accelerate/index.html" rel="nofollow">Accelerate</a> is a PyTorch-only library that offers a unified method for training a model on several types of setups (CPU-only, multiple GPUs, TPUs) while maintaining complete visibility into the PyTorch training loop. Make sure you have 🤗 Accelerate installed if you don’t already have it:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install accelerate<!-- HTML_TAG_END --></pre></div> <p>Instead of the <code>run_summarization.py</code> script, you need to use the <code>run_summarization_no_trainer.py</code> script. 🤗 Accelerate supported scripts will have a <code>task_no_trainer.py</code> file in the folder. Begin by running the following command to create and save a configuration file:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->accelerate config<!-- HTML_TAG_END --></pre></div> <p>Test your setup to make sure it is configured correctly:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->accelerate <span class="hljs-built_in">test</span><!-- HTML_TAG_END --></pre></div> <p>Now you are ready to launch the training:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->accelerate launch run_summarization_no_trainer.py \ --model_name_or_path t5-small \ --dataset_name cnn_dailymail \ --dataset_config <span class="hljs-string">&quot;3.0.0&quot;</span> \ --source_prefix <span class="hljs-string">&quot;summarize: &quot;</span> \ --output_dir ~/tmp/tst-summarization<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="use-a-custom-dataset" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#use-a-custom-dataset"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Use a custom dataset </span></h2> <p>The summarization script supports custom datasets as long as they are a CSV or JSON Line file. When you use your own dataset, you need to specify several additional arguments:</p> <ul><li><code>train_file</code> and <code>validation_file</code> specify the path to your training and validation files.</li> <li><code>text_column</code> is the input text to summarize.</li> <li><code>summary_column</code> is the target text to output.</li></ul> <p>A summarization script using a custom dataset would look like this:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python examples/pytorch/summarization/run_summarization.py \ --model_name_or_path t5-small \ --do_train \ --do_eval \ --train_file path_to_csv_or_jsonlines_file \ --validation_file path_to_csv_or_jsonlines_file \ --text_column text_column_name \ --summary_column summary_column_name \ --source_prefix <span class="hljs-string">&quot;summarize: &quot;</span> \ --output_dir /tmp/tst-summarization \ --overwrite_output_dir \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --predict_with_generate<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="test-a-script" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#test-a-script"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Test a script </span></h2> <p>It is often a good idea to run your script on a smaller number of dataset examples to ensure everything works as expected before committing to an entire dataset which may take hours to complete. Use the following arguments to truncate the dataset to a maximum number of samples:</p> <ul><li><code>max_train_samples</code></li> <li><code>max_eval_samples</code></li> <li><code>max_predict_samples</code></li></ul> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python examples/pytorch/summarization/run_summarization.py \ --model_name_or_path t5-small \ --max_train_samples 50 \ --max_eval_samples 50 \ --max_predict_samples 50 \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config <span class="hljs-string">&quot;3.0.0&quot;</span> \ --source_prefix <span class="hljs-string">&quot;summarize: &quot;</span> \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate<!-- HTML_TAG_END --></pre></div> <p>Not all example scripts support the <code>max_predict_samples</code> argument. If you aren’t sure whether your script supports this argument, add the <code>-h</code> argument to check:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->examples/pytorch/summarization/run_summarization.py -h<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="resume-training-from-checkpoint" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#resume-training-from-checkpoint"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Resume training from checkpoint </span></h2> <p>Another helpful option to enable is resuming training from a previous checkpoint. This will ensure you can pick up where you left off without starting over if your training gets interrupted. There are two methods to resume training from a checkpoint.</p> <p>The first method uses the <code>output_dir previous_output_dir</code> argument to resume training from the latest checkpoint stored in <code>output_dir</code>. In this case, you should remove <code>overwrite_output_dir</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python examples/pytorch/summarization/run_summarization.py --model_name_or_path t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config <span class="hljs-string">&quot;3.0.0&quot;</span> \ --source_prefix <span class="hljs-string">&quot;summarize: &quot;</span> \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --output_dir previous_output_dir \ --predict_with_generate<!-- HTML_TAG_END --></pre></div> <p>The second method uses the <code>resume_from_checkpoint path_to_specific_checkpoint</code> argument to resume training from a specific checkpoint folder.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python examples/pytorch/summarization/run_summarization.py --model_name_or_path t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config <span class="hljs-string">&quot;3.0.0&quot;</span> \ --source_prefix <span class="hljs-string">&quot;summarize: &quot;</span> \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --resume_from_checkpoint path_to_specific_checkpoint \ --predict_with_generate<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="share-your-model" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#share-your-model"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Share your model </span></h2> <p>All scripts can upload your final model to the <a href="https://huggingface.co/models" rel="nofollow">Model Hub</a>. Make sure you are logged into Hugging Face before you begin:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->huggingface-cli login<!-- HTML_TAG_END --></pre></div> <p>Then add the <code>push_to_hub</code> argument to the script. This argument will create a repository with your Hugging Face username and the folder name specified in <code>output_dir</code>.</p> <p>To give your repository a specific name, use the <code>push_to_hub_model_id</code> argument to add it. The repository will be automatically listed under your namespace.</p> <p>The following example shows how to upload a model with a specific repository name:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python examples/pytorch/summarization/run_summarization.py --model_name_or_path t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config <span class="hljs-string">&quot;3.0.0&quot;</span> \ --source_prefix <span class="hljs-string">&quot;summarize: &quot;</span> \ --push_to_hub \ --push_to_hub_model_id finetuned-t5-cnn_dailymail \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate<!-- HTML_TAG_END --></pre></div> <script type="module" data-hydrate="1e5j8u2"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1e5j8u2"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/run_scripts.mdx-dea66c26.js") ], params: {} } }); </script>
86
0
hf_public_repos/doc-build-dev/transformers/pr_16143
hf_public_repos/doc-build-dev/transformers/pr_16143/en/fast_tokenizers.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;using-tokenizers-from-tokenizers&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;loading-directly-from-the-tokenizer-object&quot;,&quot;title&quot;:&quot;Loading directly from the tokenizer object&quot;},{&quot;local&quot;:&quot;loading-from-a-json-file&quot;,&quot;title&quot;:&quot;Loading from a JSON file&quot;}],&quot;title&quot;:&quot;Using tokenizers from 🤗 Tokenizers&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/fast_tokenizers.mdx-1a58673f.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="using-tokenizers-from-tokenizers" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#using-tokenizers-from-tokenizers"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Using tokenizers from 🤗 Tokenizers </span></h1> <p>The <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a> depends on the <a href="https://huggingface.co/docs/tokenizers" rel="nofollow">🤗 Tokenizers</a> library. The tokenizers obtained from the 🤗 Tokenizers library can be loaded very simply into 🤗 Transformers.</p> <p>Before getting in the specifics, let’s first start by creating a dummy tokenizer in a few lines:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> tokenizers <span class="hljs-keyword">import</span> Tokenizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> tokenizers.models <span class="hljs-keyword">import</span> BPE <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> tokenizers.trainers <span class="hljs-keyword">import</span> BpeTrainer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> tokenizers.pre_tokenizers <span class="hljs-keyword">import</span> Whitespace <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = Tokenizer(BPE(unk_token=<span class="hljs-string">&quot;[UNK]&quot;</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = BpeTrainer(special_tokens=[<span class="hljs-string">&quot;[UNK]&quot;</span>, <span class="hljs-string">&quot;[CLS]&quot;</span>, <span class="hljs-string">&quot;[SEP]&quot;</span>, <span class="hljs-string">&quot;[PAD]&quot;</span>, <span class="hljs-string">&quot;[MASK]&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.pre_tokenizer = Whitespace() <span class="hljs-meta">&gt;&gt;&gt; </span>files = [...] <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.train(files, trainer)<!-- HTML_TAG_END --></pre></div> <p>We now have a tokenizer trained on the files we defined. We can either continue using it in that runtime, or save it to a JSON file for future re-use.</p> <h2 class="relative group"><a id="loading-directly-from-the-tokenizer-object" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#loading-directly-from-the-tokenizer-object"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Loading directly from the tokenizer object </span></h2> <p>Let’s see how to leverage this tokenizer object in the 🤗 Transformers library. The <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a> class allows for easy instantiation, by accepting the instantiated <em>tokenizer</em> object as an argument:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PreTrainedTokenizerFast <span class="hljs-meta">&gt;&gt;&gt; </span>fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer)<!-- HTML_TAG_END --></pre></div> <p>This object can now be used with all the methods shared by the 🤗 Transformers tokenizers! Head to <a href="main_classes/tokenizer">the tokenizer page</a> for more information.</p> <h2 class="relative group"><a id="loading-from-a-json-file" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#loading-from-a-json-file"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Loading from a JSON file </span></h2> <p>In order to load a tokenizer from a JSON file, let’s first start by saving our tokenizer:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.save(<span class="hljs-string">&quot;tokenizer.json&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>The path to which we saved this file can be passed to the <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a> initialization method using the <code>tokenizer_file</code> parameter:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PreTrainedTokenizerFast <span class="hljs-meta">&gt;&gt;&gt; </span>fast_tokenizer = PreTrainedTokenizerFast(tokenizer_file=<span class="hljs-string">&quot;tokenizer.json&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>This object can now be used with all the methods shared by the 🤗 Transformers tokenizers! Head to <a href="main_classes/tokenizer">the tokenizer page</a> for more information.</p> <script type="module" data-hydrate="fcofsk"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="fcofsk"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/fast_tokenizers.mdx-1a58673f.js") ], params: {} } }); </script>
87
0
hf_public_repos/doc-build-dev/transformers/pr_16143
hf_public_repos/doc-build-dev/transformers/pr_16143/en/benchmarks.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;benchmarks&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;how-to-benchmark-transformers-models&quot;,&quot;title&quot;:&quot;How to benchmark 🤗 Transformers models&quot;},{&quot;local&quot;:&quot;benchmark-best-practices&quot;,&quot;title&quot;:&quot;Benchmark best practices&quot;},{&quot;local&quot;:&quot;sharing-your-benchmark&quot;,&quot;title&quot;:&quot;Sharing your benchmark&quot;}],&quot;title&quot;:&quot;Benchmarks&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/benchmarks.mdx-680f04c0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlockFw-27a176a0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/DocNotebookDropdown-ecff2a90.js"> <h1 class="relative group"><a id="benchmarks" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#benchmarks"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Benchmarks </span></h1> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>Hugging Face’s Benchmarking tools are deprecated and it is advised to use external Benchmarking libraries to measure the speed and memory complexity of Transformer models.</p></div> <div class="flex space-x-1 absolute z-10 right-0 top-0"><div class="relative colab-dropdown "> <button class=" " type="button"> <img alt="Open In Colab" class="!m-0" src="https://colab.research.google.com/assets/colab-badge.svg"> </button> </div> <div class="relative colab-dropdown "> <button class=" " type="button"> <img alt="Open In Studio Lab" class="!m-0" src="https://studiolab.sagemaker.aws/studiolab.svg"> </button> </div></div> <p>Let’s take a look at how 🤗 Transformers models can be benchmarked, best practices, and already available benchmarks.</p> <p>A notebook explaining in more detail how to benchmark 🤗 Transformers models can be found <a href="https://github.com/huggingface/notebooks/tree/master/examples/benchmark.ipynb" rel="nofollow">here</a>.</p> <h2 class="relative group"><a id="how-to-benchmark-transformers-models" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#how-to-benchmark-transformers-models"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>How to benchmark 🤗 Transformers models </span></h2> <p>The classes <code>PyTorchBenchmark</code>and <code>TensorFlowBenchmark</code>allow to flexibly benchmark 🤗 Transformers models. The benchmark classes allow us to measure the <em>peak memory usage</em> and <em>required time</em> for both <em>inference</em> and <em>training</em>.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Hereby, <em>inference</em> is defined by a single forward pass, and <em>training</em> is defined by a single forward pass and backward pass.</p></div> <p>The benchmark classes <code>PyTorchBenchmark</code>and <code>TensorFlowBenchmark</code>expect an object of type <code>PyTorchBenchmarkArguments</code>and <code>TensorFlowBenchmarkArguments</code> respectively, for instantiation. <code>PyTorchBenchmarkArguments</code>and <code>TensorFlowBenchmarkArguments</code>are data classes and contain all relevant configurations for their corresponding benchmark class. In the following example, it is shown how a BERT model of type <em>bert-base-cased</em> can be benchmarked.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="Copy code excerpt to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><div><div class="bg-white leading-none border border-gray-100 rounded-lg inline-flex p-0.5 text-sm mb-4 select-none"><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-l false"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <p class="!m-0 ">Pytorch</p> </button><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-r text-gray-500 filter grayscale"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <p class="!m-0 ">TensorFlow</p> </button></div></div><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PyTorchBenchmark, PyTorchBenchmarkArguments <span class="hljs-meta">&gt;&gt;&gt; </span>args = PyTorchBenchmarkArguments(models=[<span class="hljs-string">&quot;bert-base-uncased&quot;</span>], batch_sizes=[<span class="hljs-number">8</span>], sequence_lengths=[<span class="hljs-number">8</span>, <span class="hljs-number">32</span>, <span class="hljs-number">128</span>, <span class="hljs-number">512</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>benchmark = PyTorchBenchmark(args)<!-- HTML_TAG_END --></pre></div> <p>Here, three arguments are given to the benchmark argument data classes, namely <code>models</code>, <code>batch_sizes</code>, and <code>sequence_lengths</code>. The argument <code>models</code> is required and expects a <code>list</code> of model identifiers from the <a href="https://huggingface.co/models" rel="nofollow">model hub</a> The <code>list</code> arguments <code>batch_sizes</code> and <code>sequence_lengths</code> define the size of the <code>input_ids</code> on which the model is benchmarked. There are many more parameters that can be configured via the benchmark argument data classes. For more detail on these one can either directly consult the files <code>src/transformers/benchmark/benchmark_args_utils.py</code>, <code>src/transformers/benchmark/benchmark_args.py</code> (for PyTorch) and <code>src/transformers/benchmark/benchmark_args_tf.py</code> (for Tensorflow). Alternatively, running the following shell commands from root will print out a descriptive list of all configurable parameters for PyTorch and Tensorflow respectively.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="Copy code excerpt to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><div><div class="bg-white leading-none border border-gray-100 rounded-lg inline-flex p-0.5 text-sm mb-4 select-none"><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-l false"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <p class="!m-0 ">Pytorch</p> </button><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-r text-gray-500 filter grayscale"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <p class="!m-0 ">TensorFlow</p> </button></div></div><!-- HTML_TAG_START -->python examples/pytorch/benchmarking/run_benchmark.py --<span class="hljs-built_in">help</span><!-- HTML_TAG_END --></pre></div> <p>An instantiated benchmark object can then simply be run by calling <code>benchmark.run()</code>.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="Copy code excerpt to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><div><div class="bg-white leading-none border border-gray-100 rounded-lg inline-flex p-0.5 text-sm mb-4 select-none"><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-l false"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <p class="!m-0 ">Pytorch</p> </button><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-r text-gray-500 filter grayscale"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <p class="!m-0 ">TensorFlow</p> </button></div></div><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>results = benchmark.run() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(results) ==================== INFERENCE - SPEED - RESULT ==================== -------------------------------------------------------------------------------- Model Name Batch Size Seq Length Time <span class="hljs-keyword">in</span> s -------------------------------------------------------------------------------- bert-base-uncased <span class="hljs-number">8</span> <span class="hljs-number">8</span> <span class="hljs-number">0.006</span> bert-base-uncased <span class="hljs-number">8</span> <span class="hljs-number">32</span> <span class="hljs-number">0.006</span> bert-base-uncased <span class="hljs-number">8</span> <span class="hljs-number">128</span> <span class="hljs-number">0.018</span> bert-base-uncased <span class="hljs-number">8</span> <span class="hljs-number">512</span> <span class="hljs-number">0.088</span> -------------------------------------------------------------------------------- ==================== INFERENCE - MEMORY - RESULT ==================== -------------------------------------------------------------------------------- Model Name Batch Size Seq Length Memory <span class="hljs-keyword">in</span> MB -------------------------------------------------------------------------------- bert-base-uncased <span class="hljs-number">8</span> <span class="hljs-number">8</span> <span class="hljs-number">1227</span> bert-base-uncased <span class="hljs-number">8</span> <span class="hljs-number">32</span> <span class="hljs-number">1281</span> bert-base-uncased <span class="hljs-number">8</span> <span class="hljs-number">128</span> <span class="hljs-number">1307</span> bert-base-uncased <span class="hljs-number">8</span> <span class="hljs-number">512</span> <span class="hljs-number">1539</span> -------------------------------------------------------------------------------- ==================== ENVIRONMENT INFORMATION ==================== - transformers_version: <span class="hljs-number">2.11</span><span class="hljs-number">.0</span> - framework: PyTorch - use_torchscript: <span class="hljs-literal">False</span> - framework_version: <span class="hljs-number">1.4</span><span class="hljs-number">.0</span> - python_version: <span class="hljs-number">3.6</span><span class="hljs-number">.10</span> - system: Linux - cpu: x86_64 - architecture: 64bit - date: <span class="hljs-number">2020</span>-06-<span class="hljs-number">29</span> - time: 08:<span class="hljs-number">58</span>:<span class="hljs-number">43.371351</span> - fp16: <span class="hljs-literal">False</span> - use_multiprocessing: <span class="hljs-literal">True</span> - only_pretrain_model: <span class="hljs-literal">False</span> - cpu_ram_mb: <span class="hljs-number">32088</span> - use_gpu: <span class="hljs-literal">True</span> - num_gpus: <span class="hljs-number">1</span> - gpu: TITAN RTX - gpu_ram_mb: <span class="hljs-number">24217</span> - gpu_power_watts: <span class="hljs-number">280.0</span> - gpu_performance_state: <span class="hljs-number">2</span> - use_tpu: <span class="hljs-literal">False</span><!-- HTML_TAG_END --></pre></div> <p>By default, the <em>time</em> and the <em>required memory</em> for <em>inference</em> are benchmarked. In the example output above the first two sections show the result corresponding to <em>inference time</em> and <em>inference memory</em>. In addition, all relevant information about the computing environment, <em>e.g.</em> the GPU type, the system, the library versions, etc… are printed out in the third section under <em>ENVIRONMENT INFORMATION</em>. This information can optionally be saved in a <em>.csv</em> file when adding the argument <code>save_to_csv=True</code> to <code>PyTorchBenchmarkArguments</code>and <code>TensorFlowBenchmarkArguments</code>respectively. In this case, every section is saved in a separate <em>.csv</em> file. The path to each <em>.csv</em> file can optionally be defined via the argument data classes.</p> <p>Instead of benchmarking pre-trained models via their model identifier, <em>e.g.</em> <code>bert-base-uncased</code>, the user can alternatively benchmark an arbitrary configuration of any available model class. In this case, a <code>list</code> of configurations must be inserted with the benchmark args as follows.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="Copy code excerpt to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><div><div class="bg-white leading-none border border-gray-100 rounded-lg inline-flex p-0.5 text-sm mb-4 select-none"><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-l false"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <p class="!m-0 ">Pytorch</p> </button><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-r text-gray-500 filter grayscale"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <p class="!m-0 ">TensorFlow</p> </button></div></div><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PyTorchBenchmark, PyTorchBenchmarkArguments, BertConfig <span class="hljs-meta">&gt;&gt;&gt; </span>args = PyTorchBenchmarkArguments( <span class="hljs-meta">... </span> models=[<span class="hljs-string">&quot;bert-base&quot;</span>, <span class="hljs-string">&quot;bert-384-hid&quot;</span>, <span class="hljs-string">&quot;bert-6-lay&quot;</span>], batch_sizes=[<span class="hljs-number">8</span>], sequence_lengths=[<span class="hljs-number">8</span>, <span class="hljs-number">32</span>, <span class="hljs-number">128</span>, <span class="hljs-number">512</span>] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config_base = BertConfig() <span class="hljs-meta">&gt;&gt;&gt; </span>config_384_hid = BertConfig(hidden_size=<span class="hljs-number">384</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>config_6_lay = BertConfig(num_hidden_layers=<span class="hljs-number">6</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>benchmark = PyTorchBenchmark(args, configs=[config_base, config_384_hid, config_6_lay]) <span class="hljs-meta">&gt;&gt;&gt; </span>benchmark.run() ==================== INFERENCE - SPEED - RESULT ==================== -------------------------------------------------------------------------------- Model Name Batch Size Seq Length Time <span class="hljs-keyword">in</span> s -------------------------------------------------------------------------------- bert-base <span class="hljs-number">8</span> <span class="hljs-number">128</span> <span class="hljs-number">0.006</span> bert-base <span class="hljs-number">8</span> <span class="hljs-number">512</span> <span class="hljs-number">0.006</span> bert-base <span class="hljs-number">8</span> <span class="hljs-number">128</span> <span class="hljs-number">0.018</span> bert-base <span class="hljs-number">8</span> <span class="hljs-number">512</span> <span class="hljs-number">0.088</span> bert-<span class="hljs-number">384</span>-hid <span class="hljs-number">8</span> <span class="hljs-number">8</span> <span class="hljs-number">0.006</span> bert-<span class="hljs-number">384</span>-hid <span class="hljs-number">8</span> <span class="hljs-number">32</span> <span class="hljs-number">0.006</span> bert-<span class="hljs-number">384</span>-hid <span class="hljs-number">8</span> <span class="hljs-number">128</span> <span class="hljs-number">0.011</span> bert-<span class="hljs-number">384</span>-hid <span class="hljs-number">8</span> <span class="hljs-number">512</span> <span class="hljs-number">0.054</span> bert-<span class="hljs-number">6</span>-lay <span class="hljs-number">8</span> <span class="hljs-number">8</span> <span class="hljs-number">0.003</span> bert-<span class="hljs-number">6</span>-lay <span class="hljs-number">8</span> <span class="hljs-number">32</span> <span class="hljs-number">0.004</span> bert-<span class="hljs-number">6</span>-lay <span class="hljs-number">8</span> <span class="hljs-number">128</span> <span class="hljs-number">0.009</span> bert-<span class="hljs-number">6</span>-lay <span class="hljs-number">8</span> <span class="hljs-number">512</span> <span class="hljs-number">0.044</span> -------------------------------------------------------------------------------- ==================== INFERENCE - MEMORY - RESULT ==================== -------------------------------------------------------------------------------- Model Name Batch Size Seq Length Memory <span class="hljs-keyword">in</span> MB -------------------------------------------------------------------------------- bert-base <span class="hljs-number">8</span> <span class="hljs-number">8</span> <span class="hljs-number">1277</span> bert-base <span class="hljs-number">8</span> <span class="hljs-number">32</span> <span class="hljs-number">1281</span> bert-base <span class="hljs-number">8</span> <span class="hljs-number">128</span> <span class="hljs-number">1307</span> bert-base <span class="hljs-number">8</span> <span class="hljs-number">512</span> <span class="hljs-number">1539</span> bert-<span class="hljs-number">384</span>-hid <span class="hljs-number">8</span> <span class="hljs-number">8</span> <span class="hljs-number">1005</span> bert-<span class="hljs-number">384</span>-hid <span class="hljs-number">8</span> <span class="hljs-number">32</span> <span class="hljs-number">1027</span> bert-<span class="hljs-number">384</span>-hid <span class="hljs-number">8</span> <span class="hljs-number">128</span> <span class="hljs-number">1035</span> bert-<span class="hljs-number">384</span>-hid <span class="hljs-number">8</span> <span class="hljs-number">512</span> <span class="hljs-number">1255</span> bert-<span class="hljs-number">6</span>-lay <span class="hljs-number">8</span> <span class="hljs-number">8</span> <span class="hljs-number">1097</span> bert-<span class="hljs-number">6</span>-lay <span class="hljs-number">8</span> <span class="hljs-number">32</span> <span class="hljs-number">1101</span> bert-<span class="hljs-number">6</span>-lay <span class="hljs-number">8</span> <span class="hljs-number">128</span> <span class="hljs-number">1127</span> bert-<span class="hljs-number">6</span>-lay <span class="hljs-number">8</span> <span class="hljs-number">512</span> <span class="hljs-number">1359</span> -------------------------------------------------------------------------------- ==================== ENVIRONMENT INFORMATION ==================== - transformers_version: <span class="hljs-number">2.11</span><span class="hljs-number">.0</span> - framework: PyTorch - use_torchscript: <span class="hljs-literal">False</span> - framework_version: <span class="hljs-number">1.4</span><span class="hljs-number">.0</span> - python_version: <span class="hljs-number">3.6</span><span class="hljs-number">.10</span> - system: Linux - cpu: x86_64 - architecture: 64bit - date: <span class="hljs-number">2020</span>-06-<span class="hljs-number">29</span> - time: 09:<span class="hljs-number">35</span>:<span class="hljs-number">25.143267</span> - fp16: <span class="hljs-literal">False</span> - use_multiprocessing: <span class="hljs-literal">True</span> - only_pretrain_model: <span class="hljs-literal">False</span> - cpu_ram_mb: <span class="hljs-number">32088</span> - use_gpu: <span class="hljs-literal">True</span> - num_gpus: <span class="hljs-number">1</span> - gpu: TITAN RTX - gpu_ram_mb: <span class="hljs-number">24217</span> - gpu_power_watts: <span class="hljs-number">280.0</span> - gpu_performance_state: <span class="hljs-number">2</span> - use_tpu: <span class="hljs-literal">False</span><!-- HTML_TAG_END --></pre></div> <p>Again, <em>inference time</em> and <em>required memory</em> for <em>inference</em> are measured, but this time for customized configurations of the <code>BertModel</code> class. This feature can especially be helpful when deciding for which configuration the model should be trained.</p> <h2 class="relative group"><a id="benchmark-best-practices" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#benchmark-best-practices"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Benchmark best practices </span></h2> <p>This section lists a couple of best practices one should be aware of when benchmarking a model.</p> <ul><li>Currently, only single device benchmarking is supported. When benchmarking on GPU, it is recommended that the user specifies on which device the code should be run by setting the <code>CUDA_VISIBLE_DEVICES</code> environment variable in the shell, <em>e.g.</em> <code>export CUDA_VISIBLE_DEVICES=0</code> before running the code.</li> <li>The option <code>no_multi_processing</code> should only be set to <code>True</code> for testing and debugging. To ensure accurate memory measurement it is recommended to run each memory benchmark in a separate process by making sure <code>no_multi_processing</code> is set to <code>True</code>.</li> <li>One should always state the environment information when sharing the results of a model benchmark. Results can vary heavily between different GPU devices, library versions, etc., so that benchmark results on their own are not very useful for the community.</li></ul> <h2 class="relative group"><a id="sharing-your-benchmark" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#sharing-your-benchmark"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Sharing your benchmark </span></h2> <p>Previously all available core models (10 at the time) have been benchmarked for <em>inference time</em>, across many different settings: using PyTorch, with and without TorchScript, using TensorFlow, with and without XLA. All of those tests were done across CPUs (except for TensorFlow XLA) and GPUs.</p> <p>The approach is detailed in the <a href="https://medium.com/huggingface/benchmarking-transformers-pytorch-and-tensorflow-e2917fb891c2" rel="nofollow">following blogpost</a> and the results are available <a href="https://docs.google.com/spreadsheets/d/1sryqufw2D0XlUH4sq3e9Wnxu5EAQkaohzrJbd5HdQ_w/edit?usp=sharing" rel="nofollow">here</a>.</p> <p>With the new <em>benchmark</em> tools, it is easier than ever to share your benchmark results with the community</p> <ul><li><a href="https://github.com/huggingface/transformers/tree/master/examples/pytorch/benchmarking/README.md" rel="nofollow">PyTorch Benchmarking Results</a>.</li> <li><a href="https://github.com/huggingface/transformers/tree/master/examples/tensorflow/benchmarking/README.md" rel="nofollow">TensorFlow Benchmarking Results</a>.</li></ul> <script type="module" data-hydrate="1o8i6l3"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1o8i6l3"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/benchmarks.mdx-680f04c0.js") ], params: {} } }); </script>
88
0
hf_public_repos/doc-build-dev/transformers/pr_16143
hf_public_repos/doc-build-dev/transformers/pr_16143/en/quicktour.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;quick-tour&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;pipeline&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;pipeline-usage&quot;,&quot;title&quot;:&quot;Pipeline usage&quot;},{&quot;local&quot;:&quot;use-another-model-and-tokenizer-in-the-pipeline&quot;,&quot;title&quot;:&quot;Use another model and tokenizer in the pipeline&quot;}],&quot;title&quot;:&quot;Pipeline&quot;},{&quot;local&quot;:&quot;autoclass&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;autotokenizer&quot;,&quot;title&quot;:&quot;AutoTokenizer&quot;},{&quot;local&quot;:&quot;automodel&quot;,&quot;title&quot;:&quot;AutoModel&quot;},{&quot;local&quot;:&quot;save-a-model&quot;,&quot;title&quot;:&quot;Save a model&quot;}],&quot;title&quot;:&quot;AutoClass&quot;}],&quot;title&quot;:&quot;Quick tour&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/quicktour.mdx-8a4295b9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Youtube-27813aed.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlockFw-27a176a0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/DocNotebookDropdown-ecff2a90.js"> <h1 class="relative group"><a id="quick-tour" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#quick-tour"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Quick tour </span></h1> <div class="flex space-x-1 absolute z-10 right-0 top-0"><div class="relative colab-dropdown "> <button class=" " type="button"> <img alt="Open In Colab" class="!m-0" src="https://colab.research.google.com/assets/colab-badge.svg"> </button> </div> <div class="relative colab-dropdown "> <button class=" " type="button"> <img alt="Open In Studio Lab" class="!m-0" src="https://studiolab.sagemaker.aws/studiolab.svg"> </button> </div></div> <p>Get up and running with 🤗 Transformers! Start using the <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> for rapid inference, and quickly load a pretrained model and tokenizer with an <a href="./model_doc/auto">AutoClass</a> to solve your text, vision or audio task.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>All code examples presented in the documentation have a toggle on the top left for PyTorch and TensorFlow. If not, the code is expected to work for both backends without any change.</p></div> <h2 class="relative group"><a id="pipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#pipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Pipeline </span></h2> <p><a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> is the easiest way to use a pretrained model for a given task.</p> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/tiZFewofSLM" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>The <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> supports many common tasks out-of-the-box:</p> <p><strong>Text</strong>:</p> <ul><li>Sentiment analysis: classify the polarity of a given text.</li> <li>Text generation (in English): generate text from a given input.</li> <li>Name entity recognition (NER): label each word with the entity it represents (person, date, location, etc.).</li> <li>Question answering: extract the answer from the context, given some context and a question.</li> <li>Fill-mask: fill in the blank given a text with masked words.</li> <li>Summarization: generate a summary of a long sequence of text or document.</li> <li>Translation: translate text into another language.</li> <li>Feature extraction: create a tensor representation of the text.</li></ul> <p><strong>Image</strong>:</p> <ul><li>Image classification: classify an image.</li> <li>Image segmentation: classify every pixel in an image.</li> <li>Object detection: detect objects within an image.</li></ul> <p><strong>Audio</strong>:</p> <ul><li>Audio classification: assign a label to a given segment of audio.</li> <li>Automatic speech recognition (ASR): transcribe audio data into text.</li></ul> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>For more details about the <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> and associated tasks, refer to the documentation <a href="./main_classes/pipelines">here</a>.</p></div> <h3 class="relative group"><a id="pipeline-usage" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#pipeline-usage"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Pipeline usage </span></h3> <p>In the following example, you will use the <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> for sentiment analysis.</p> <p>Install the following dependencies if you haven’t already:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="Copy code excerpt to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><div><div class="bg-white leading-none border border-gray-100 rounded-lg inline-flex p-0.5 text-sm mb-4 select-none"><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-l false"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <p class="!m-0 ">Pytorch</p> </button><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-r text-gray-500 filter grayscale"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <p class="!m-0 ">TensorFlow</p> </button></div></div><!-- HTML_TAG_START -->pip install torch<!-- HTML_TAG_END --></pre></div> <p>Import <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> and specify the task you want to complete:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>classifier = pipeline(<span class="hljs-string">&quot;sentiment-analysis&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>The pipeline downloads and caches a default <a href="https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english" rel="nofollow">pretrained model</a> and tokenizer for sentiment analysis. Now you can use the <code>classifier</code> on your target text:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>classifier(<span class="hljs-string">&quot;We are very happy to show you the 🤗 Transformers library.&quot;</span>) [{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;POSITIVE&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9998</span>}]<!-- HTML_TAG_END --></pre></div> <p>For more than one sentence, pass a list of sentences to the <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> which returns a list of dictionaries:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>results = classifier([<span class="hljs-string">&quot;We are very happy to show you the 🤗 Transformers library.&quot;</span>, <span class="hljs-string">&quot;We hope you don&#x27;t hate it.&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> result <span class="hljs-keyword">in</span> results: <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;label: <span class="hljs-subst">{result[<span class="hljs-string">&#x27;label&#x27;</span>]}</span>, with score: <span class="hljs-subst">{<span class="hljs-built_in">round</span>(result[<span class="hljs-string">&#x27;score&#x27;</span>], <span class="hljs-number">4</span>)}</span>&quot;</span>) label: POSITIVE, <span class="hljs-keyword">with</span> score: <span class="hljs-number">0.9998</span> label: NEGATIVE, <span class="hljs-keyword">with</span> score: <span class="hljs-number">0.5309</span><!-- HTML_TAG_END --></pre></div> <p>The <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> can also iterate over an entire dataset. Start by installing the <a href="https://huggingface.co/docs/datasets/" rel="nofollow">🤗 Datasets</a> library:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install datasets <!-- HTML_TAG_END --></pre></div> <p>Create a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> with the task you want to solve for and the model you want to use.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>speech_recognizer = pipeline(<span class="hljs-string">&quot;automatic-speech-recognition&quot;</span>, model=<span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Next, load a dataset (see the 🤗 Datasets <a href="https://huggingface.co/docs/datasets/quickstart.html" rel="nofollow">Quick Start</a> for more details) you’d like to iterate over. For example, let’s load the <a href="https://huggingface.co/datasets/superb" rel="nofollow">SUPERB</a> dataset:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> datasets <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = datasets.load_dataset(<span class="hljs-string">&quot;superb&quot;</span>, name=<span class="hljs-string">&quot;asr&quot;</span>, split=<span class="hljs-string">&quot;test&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>You can pass a whole dataset pipeline:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>files = dataset[<span class="hljs-string">&quot;file&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span>speech_recognizer(files[:<span class="hljs-number">4</span>]) [{<span class="hljs-string">&#x27;text&#x27;</span>: <span class="hljs-string">&#x27;HE HOPED THERE WOULD BE STEW FOR DINNER TURNIPS AND CARROTS AND BRUISED POTATOES AND FAT MUTTON PIECES TO BE LADLED OUT IN THICK PEPPERED FLOWER FAT AND SAUCE&#x27;</span>}, {<span class="hljs-string">&#x27;text&#x27;</span>: <span class="hljs-string">&#x27;STUFFERED INTO YOU HIS BELLY COUNSELLED HIM&#x27;</span>}, {<span class="hljs-string">&#x27;text&#x27;</span>: <span class="hljs-string">&#x27;AFTER EARLY NIGHTFALL THE YELLOW LAMPS WOULD LIGHT UP HERE AND THERE THE SQUALID QUARTER OF THE BROTHELS&#x27;</span>}, {<span class="hljs-string">&#x27;text&#x27;</span>: <span class="hljs-string">&#x27;HO BERTIE ANY GOOD IN YOUR MIND&#x27;</span>}]<!-- HTML_TAG_END --></pre></div> <p>For a larger dataset where the inputs are big (like in speech or vision), you will want to pass along a generator instead of a list that loads all the inputs in memory. See the <a href="./main_classes/pipelines">pipeline documentation</a> for more information.</p> <h3 class="relative group"><a id="use-another-model-and-tokenizer-in-the-pipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#use-another-model-and-tokenizer-in-the-pipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Use another model and tokenizer in the pipeline </span></h3> <p>The <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> can accommodate any model from the <a href="https://huggingface.co/models" rel="nofollow">Model Hub</a>, making it easy to adapt the <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> for other use-cases. For example, if you’d like a model capable of handling French text, use the tags on the Model Hub to filter for an appropriate model. The top filtered result returns a multilingual <a href="https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment" rel="nofollow">BERT model</a> fine-tuned for sentiment analysis. Great, let’s use this model!</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">&quot;nlptown/bert-base-multilingual-uncased-sentiment&quot;</span><!-- HTML_TAG_END --></pre></div> <p>Use the <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForSequenceClassification">AutoModelForSequenceClassification</a> and [‘AutoTokenizer’] to load the pretrained model and it’s associated tokenizer (more on an <code>AutoClass</code> below):</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="Copy code excerpt to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><div><div class="bg-white leading-none border border-gray-100 rounded-lg inline-flex p-0.5 text-sm mb-4 select-none"><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-l false"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <p class="!m-0 ">Pytorch</p> </button><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-r text-gray-500 filter grayscale"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <p class="!m-0 ">TensorFlow</p> </button></div></div><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSequenceClassification.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(model_name)<!-- HTML_TAG_END --></pre></div> <p>Then you can specify the model and tokenizer in the <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a>, and apply the <code>classifier</code> on your target text:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>classifier = pipeline(<span class="hljs-string">&quot;sentiment-analysis&quot;</span>, model=model, tokenizer=tokenizer) <span class="hljs-meta">&gt;&gt;&gt; </span>classifier(<span class="hljs-string">&quot;Nous sommes très heureux de vous présenter la bibliothèque 🤗 Transformers.&quot;</span>) [{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;5 stars&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.7273</span>}]<!-- HTML_TAG_END --></pre></div> <p>If you can’t find a model for your use-case, you will need to fine-tune a pretrained model on your data. Take a look at our <a href="./training">fine-tuning tutorial</a> to learn how. Finally, after you’ve fine-tuned your pretrained model, please consider sharing it (see tutorial <a href="./model_sharing">here</a>) with the community on the Model Hub to democratize NLP for everyone! 🤗</p> <h2 class="relative group"><a id="autoclass" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#autoclass"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>AutoClass </span></h2> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/AhChOFRegn4" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Under the hood, the <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForSequenceClassification">AutoModelForSequenceClassification</a> and <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoTokenizer">AutoTokenizer</a> classes work together to power the <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a>. An <a href="./model_doc/auto">AutoClass</a> is a shortcut that automatically retrieves the architecture of a pretrained model from it’s name or path. You only need to select the appropriate <code>AutoClass</code> for your task and it’s associated tokenizer with <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoTokenizer">AutoTokenizer</a>. </p> <p>Let’s return to our example and see how you can use the <code>AutoClass</code> to replicate the results of the <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a>.</p> <h3 class="relative group"><a id="autotokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#autotokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>AutoTokenizer </span></h3> <p>A tokenizer is responsible for preprocessing text into a format that is understandable to the model. First, the tokenizer will split the text into words called <em>tokens</em>. There are multiple rules that govern the tokenization process, including how to split a word and at what level (learn more about tokenization <a href="./tokenizer_summary">here</a>). The most important thing to remember though is you need to instantiate the tokenizer with the same model name to ensure you’re using the same tokenization rules a model was pretrained with.</p> <p>Load a tokenizer with <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoTokenizer">AutoTokenizer</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">&quot;nlptown/bert-base-multilingual-uncased-sentiment&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(model_name)<!-- HTML_TAG_END --></pre></div> <p>Next, the tokenizer converts the tokens into numbers in order to construct a tensor as input to the model. This is known as the model’s <em>vocabulary</em>.</p> <p>Pass your text to the tokenizer:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(<span class="hljs-string">&quot;We are very happy to show you the 🤗 Transformers library.&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(encoding) {<span class="hljs-string">&#x27;input_ids&#x27;</span>: [<span class="hljs-number">101</span>, <span class="hljs-number">11312</span>, <span class="hljs-number">10320</span>, <span class="hljs-number">12495</span>, <span class="hljs-number">19308</span>, <span class="hljs-number">10114</span>, <span class="hljs-number">11391</span>, <span class="hljs-number">10855</span>, <span class="hljs-number">10103</span>, <span class="hljs-number">100</span>, <span class="hljs-number">58263</span>, <span class="hljs-number">13299</span>, <span class="hljs-number">119</span>, <span class="hljs-number">102</span>], <span class="hljs-string">&#x27;token_type_ids&#x27;</span>: [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], <span class="hljs-string">&#x27;attention_mask&#x27;</span>: [<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>]}<!-- HTML_TAG_END --></pre></div> <p>The tokenizer will return a dictionary containing:</p> <ul><li><a href="./glossary#input-ids">input_ids</a>: numerical representions of your tokens.</li> <li><a href=".glossary#attention-mask">atttention_mask</a>: indicates which tokens should be attended to.</li></ul> <p>Just like the <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a>, the tokenizer will accept a list of inputs. In addition, the tokenizer can also pad and truncate the text to return a batch with uniform length:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="Copy code excerpt to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><div><div class="bg-white leading-none border border-gray-100 rounded-lg inline-flex p-0.5 text-sm mb-4 select-none"><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-l false"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <p class="!m-0 ">Pytorch</p> </button><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-r text-gray-500 filter grayscale"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <p class="!m-0 ">TensorFlow</p> </button></div></div><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>pt_batch = tokenizer( <span class="hljs-meta">... </span> [<span class="hljs-string">&quot;We are very happy to show you the 🤗 Transformers library.&quot;</span>, <span class="hljs-string">&quot;We hope you don&#x27;t hate it.&quot;</span>], <span class="hljs-meta">... </span> padding=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> truncation=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> max_length=<span class="hljs-number">512</span>, <span class="hljs-meta">... </span> return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Read the <a href="./preprocessing">preprocessing</a> tutorial for more details about tokenization.</p> <h3 class="relative group"><a id="automodel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#automodel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>AutoModel </span></h3> <p>🤗 Transformers provides a simple and unified way to load pretrained instances. This means you can load an <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModel">AutoModel</a> like you would load an <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoTokenizer">AutoTokenizer</a>. The only difference is selecting the correct <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModel">AutoModel</a> for the task. Since you are doing text - or sequence - classification, load <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForSequenceClassification">AutoModelForSequenceClassification</a>. The TensorFlow equivalent is simply <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.TFAutoModelForSequenceClassification">TFAutoModelForSequenceClassification</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="Copy code excerpt to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><div><div class="bg-white leading-none border border-gray-100 rounded-lg inline-flex p-0.5 text-sm mb-4 select-none"><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-l false"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <p class="!m-0 ">Pytorch</p> </button><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-r text-gray-500 filter grayscale"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <p class="!m-0 ">TensorFlow</p> </button></div></div><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">&quot;nlptown/bert-base-multilingual-uncased-sentiment&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>pt_model = AutoModelForSequenceClassification.from_pretrained(model_name)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>See the <a href="./task_summary">task summary</a> for which <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModel">AutoModel</a> class to use for which task.</p></div> <p>Now you can pass your preprocessed batch of inputs directly to the model. If you are using a PyTorch model, unpack the dictionary by adding <code>**</code>. For TensorFlow models, pass the dictionary keys directly to the tensors:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="Copy code excerpt to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><div><div class="bg-white leading-none border border-gray-100 rounded-lg inline-flex p-0.5 text-sm mb-4 select-none"><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-l false"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <p class="!m-0 ">Pytorch</p> </button><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-r text-gray-500 filter grayscale"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <p class="!m-0 ">TensorFlow</p> </button></div></div><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>pt_outputs = pt_model(**pt_batch)<!-- HTML_TAG_END --></pre></div> <p>The model outputs the final activations in the <code>logits</code> attribute. Apply the softmax function to the <code>logits</code> to retrieve the probabilities:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="Copy code excerpt to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><div><div class="bg-white leading-none border border-gray-100 rounded-lg inline-flex p-0.5 text-sm mb-4 select-none"><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-l false"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <p class="!m-0 ">Pytorch</p> </button><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-r text-gray-500 filter grayscale"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <p class="!m-0 ">TensorFlow</p> </button></div></div><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> torch <span class="hljs-keyword">import</span> nn <span class="hljs-meta">&gt;&gt;&gt; </span>pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(pt_predictions) tensor([[<span class="hljs-number">0.0021</span>, <span class="hljs-number">0.0018</span>, <span class="hljs-number">0.0115</span>, <span class="hljs-number">0.2121</span>, <span class="hljs-number">0.7725</span>], [<span class="hljs-number">0.2084</span>, <span class="hljs-number">0.1826</span>, <span class="hljs-number">0.1969</span>, <span class="hljs-number">0.1755</span>, <span class="hljs-number">0.2365</span>]], grad_fn=&lt;SoftmaxBackward0&gt;)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>All 🤗 Transformers models (PyTorch or TensorFlow) outputs the tensors <em>before</em> the final activation function (like softmax) because the final activation function is often fused with the loss.</p></div> <p>Models are a standard <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow"><code>torch.nn.Module</code></a> or a <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow"><code>tf.keras.Model</code></a> so you can use them in your usual training loop. However, to make things easier, 🤗 Transformers provides a <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> class for PyTorch that adds functionality for distributed training, mixed precision, and more. For TensorFlow, you can use the <code>fit</code> method from <a href="https://keras.io/" rel="nofollow">Keras</a>. Refer to the <a href="./training">training tutorial</a> for more details.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>🤗 Transformers model outputs are special dataclasses so their attributes are autocompleted in an IDE. The model outputs also behave like a tuple or a dictionary (e.g., you can index with an integer, a slice or a string) in which case the attributes that are <code>None</code> are ignored.</p></div> <h3 class="relative group"><a id="save-a-model" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#save-a-model"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Save a model </span></h3> <p>Once your model is fine-tuned, you can save it with its tokenizer using <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.save_pretrained">PreTrainedModel.save_pretrained()</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="Copy code excerpt to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><div><div class="bg-white leading-none border border-gray-100 rounded-lg inline-flex p-0.5 text-sm mb-4 select-none"><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-l false"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <p class="!m-0 ">Pytorch</p> </button><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-r text-gray-500 filter grayscale"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <p class="!m-0 ">TensorFlow</p> </button></div></div><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>pt_save_directory = <span class="hljs-string">&quot;./pt_save_pretrained&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.save_pretrained(pt_save_directory) <span class="hljs-meta">&gt;&gt;&gt; </span>pt_model.save_pretrained(pt_save_directory)<!-- HTML_TAG_END --></pre></div> <p>When you are ready to use the model again, reload it with <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">PreTrainedModel.from_pretrained()</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="Copy code excerpt to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><div><div class="bg-white leading-none border border-gray-100 rounded-lg inline-flex p-0.5 text-sm mb-4 select-none"><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-l false"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <p class="!m-0 ">Pytorch</p> </button><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-r text-gray-500 filter grayscale"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <p class="!m-0 ">TensorFlow</p> </button></div></div><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>pt_model = AutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;./pt_save_pretrained&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>One particularly cool 🤗 Transformers feature is the ability to save a model and reload it as either a PyTorch or TensorFlow model. The <code>from_pt</code> or <code>from_tf</code> parameter can convert the model from one framework to the other:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="Copy code excerpt to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><div><div class="bg-white leading-none border border-gray-100 rounded-lg inline-flex p-0.5 text-sm mb-4 select-none"><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-l false"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <p class="!m-0 ">Pytorch</p> </button><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-r text-gray-500 filter grayscale"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <p class="!m-0 ">TensorFlow</p> </button></div></div><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(tf_save_directory) <span class="hljs-meta">&gt;&gt;&gt; </span>pt_model = AutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <script type="module" data-hydrate="174b19k"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="174b19k"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/quicktour.mdx-8a4295b9.js") ], params: {} } }); </script>
89
0
hf_public_repos/doc-build-dev/transformers/pr_16143
hf_public_repos/doc-build-dev/transformers/pr_16143/en/model_summary.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;summary-of-the-models&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;decoders-or-autoregressive-models&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;original-gpt&quot;,&quot;title&quot;:&quot;Original GPT&quot;},{&quot;local&quot;:&quot;gpt2&quot;,&quot;title&quot;:&quot;GPT-2&quot;},{&quot;local&quot;:&quot;ctrl&quot;,&quot;title&quot;:&quot;CTRL&quot;},{&quot;local&quot;:&quot;transformerxl&quot;,&quot;title&quot;:&quot;Transformer-XL&quot;},{&quot;local&quot;:&quot;reformer&quot;,&quot;title&quot;:&quot;Reformer&quot;},{&quot;local&quot;:&quot;xlnet&quot;,&quot;title&quot;:&quot;XLNet&quot;}],&quot;title&quot;:&quot;Decoders or autoregressive models&quot;},{&quot;local&quot;:&quot;encoders-or-autoencoding-models&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;bert&quot;,&quot;title&quot;:&quot;BERT&quot;},{&quot;local&quot;:&quot;albert&quot;,&quot;title&quot;:&quot;ALBERT&quot;},{&quot;local&quot;:&quot;roberta&quot;,&quot;title&quot;:&quot;RoBERTa&quot;},{&quot;local&quot;:&quot;distilbert&quot;,&quot;title&quot;:&quot;DistilBERT&quot;},{&quot;local&quot;:&quot;convbert&quot;,&quot;title&quot;:&quot;ConvBERT&quot;},{&quot;local&quot;:&quot;xlm&quot;,&quot;title&quot;:&quot;XLM&quot;},{&quot;local&quot;:&quot;xlmroberta&quot;,&quot;title&quot;:&quot;XLM-RoBERTa&quot;},{&quot;local&quot;:&quot;flaubert&quot;,&quot;title&quot;:&quot;FlauBERT&quot;},{&quot;local&quot;:&quot;electra&quot;,&quot;title&quot;:&quot;ELECTRA&quot;},{&quot;local&quot;:&quot;funnel-transformer&quot;,&quot;title&quot;:&quot;Funnel Transformer&quot;},{&quot;local&quot;:&quot;longformer&quot;,&quot;title&quot;:&quot;Longformer&quot;}],&quot;title&quot;:&quot;Encoders or autoencoding models&quot;},{&quot;local&quot;:&quot;sequencetosequence-models&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;bart&quot;,&quot;title&quot;:&quot;BART&quot;},{&quot;local&quot;:&quot;pegasus&quot;,&quot;title&quot;:&quot;Pegasus&quot;},{&quot;local&quot;:&quot;marianmt&quot;,&quot;title&quot;:&quot;MarianMT&quot;},{&quot;local&quot;:&quot;t5&quot;,&quot;title&quot;:&quot;T5&quot;},{&quot;local&quot;:&quot;mt5&quot;,&quot;title&quot;:&quot;MT5&quot;},{&quot;local&quot;:&quot;mbart&quot;,&quot;title&quot;:&quot;MBart&quot;},{&quot;local&quot;:&quot;prophetnet&quot;,&quot;title&quot;:&quot;ProphetNet&quot;},{&quot;local&quot;:&quot;xlmprophetnet&quot;,&quot;title&quot;:&quot;XLM-ProphetNet&quot;}],&quot;title&quot;:&quot;Sequence-to-sequence models&quot;},{&quot;local&quot;:&quot;multimodal-models&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;mmbt&quot;,&quot;title&quot;:&quot;MMBT&quot;}],&quot;title&quot;:&quot;Multimodal models&quot;},{&quot;local&quot;:&quot;retrievalbased-models&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;dpr&quot;,&quot;title&quot;:&quot;DPR&quot;},{&quot;local&quot;:&quot;rag&quot;,&quot;title&quot;:&quot;RAG&quot;}],&quot;title&quot;:&quot;Retrieval-based models&quot;},{&quot;local&quot;:&quot;more-technical-aspects&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;full-vs-sparse-attention&quot;,&quot;title&quot;:&quot;Full vs sparse attention&quot;},{&quot;local&quot;:&quot;other-tricks&quot;,&quot;title&quot;:&quot;Other tricks&quot;}],&quot;title&quot;:&quot;More technical aspects&quot;}],&quot;title&quot;:&quot;Summary of the models&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/model_summary.mdx-4a71079d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Youtube-27813aed.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <h1 class="relative group"><a id="summary-of-the-models" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#summary-of-the-models"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Summary of the models </span></h1> <p>This is a summary of the models available in 🤗 Transformers. It assumes you’re familiar with the original <a href="https://arxiv.org/abs/1706.03762" rel="nofollow">transformer model</a>. For a gentle introduction check the <a href="http://nlp.seas.harvard.edu/2018/04/03/attention.html" rel="nofollow">annotated transformer</a>. Here we focus on the high-level differences between the models. You can check them more in detail in their respective documentation. Also check out <a href="https://huggingface.co/models" rel="nofollow">the Model Hub</a> where you can filter the checkpoints by model architecture.</p> <p>Each one of the models in the library falls into one of the following categories:</p> <ul><li><a href="#autoregressive-models">autoregressive-models</a></li> <li><a href="#autoencoding-models">autoencoding-models</a></li> <li><a href="#seq-to-seq-models">seq-to-seq-models</a></li> <li><a href="#multimodal-models">multimodal-models</a></li> <li><a href="#retrieval-based-models">retrieval-based-models</a></li></ul> <iframe width="560" height="315" src="https://www.youtube.com/embed/H39Z_720T5s" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Autoregressive models are pretrained on the classic language modeling task: guess the next token having read all the previous ones. They correspond to the decoder of the original transformer model, and a mask is used on top of the full sentence so that the attention heads can only see what was before in the text, and not what’s after. Although those models can be fine-tuned and achieve great results on many tasks, the most natural application is text generation. A typical example of such models is GPT.</p> <p>Autoencoding models are pretrained by corrupting the input tokens in some way and trying to reconstruct the original sentence. They correspond to the encoder of the original transformer model in the sense that they get access to the full inputs without any mask. Those models usually build a bidirectional representation of the whole sentence. They can be fine-tuned and achieve great results on many tasks such as text generation, but their most natural application is sentence classification or token classification. A typical example of such models is BERT.</p> <p>Note that the only difference between autoregressive models and autoencoding models is in the way the model is pretrained. Therefore, the same architecture can be used for both autoregressive and autoencoding models. When a given model has been used for both types of pretraining, we have put it in the category corresponding to the article where it was first introduced.</p> <p>Sequence-to-sequence models use both the encoder and the decoder of the original transformer, either for translation tasks or by transforming other tasks to sequence-to-sequence problems. They can be fine-tuned to many tasks but their most natural applications are translation, summarization and question answering. The original transformer model is an example of such a model (only for translation), T5 is an example that can be fine-tuned on other tasks.</p> <p>Multimodal models mix text inputs with other kinds (e.g. images) and are more specific to a given task.</p> <a id="autoregressive-models"></a> <h2 class="relative group"><a id="decoders-or-autoregressive-models" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#decoders-or-autoregressive-models"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Decoders or autoregressive models </span></h2> <p>As mentioned before, these models rely on the decoder part of the original transformer and use an attention mask so that at each position, the model can only look at the tokens before the attention heads.</p> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/d_ixlCubqQw" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <h3 class="relative group"><a id="original-gpt" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#original-gpt"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Original GPT </span></h3> <div class="flex flex-wrap space-x-1"><a href="https://huggingface.co/models?filter=openai-gpt"><img alt="Models" src="https://img.shields.io/badge/All_model_pages-openai--gpt-blueviolet"></a> <a href="model_doc/openai-gpt"><img alt="Doc" src="https://img.shields.io/badge/Model_documentation-openai--gpt-blueviolet"></a> <a href="https://huggingface.co/spaces/docs-demos/openai-gpt"><img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"></a></div> <p><a href="https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf" rel="nofollow">Improving Language Understanding by Generative Pre-Training</a>, Alec Radford et al.</p> <p>The first autoregressive model based on the transformer architecture, pretrained on the Book Corpus dataset.</p> <p>The library provides versions of the model for language modeling and multitask language modeling/multiple choice classification.</p> <h3 class="relative group"><a id="gpt2" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#gpt2"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>GPT-2 </span></h3> <div class="flex flex-wrap space-x-1"><a href="https://huggingface.co/models?filter=gpt2"><img alt="Models" src="https://img.shields.io/badge/All_model_pages-gpt2-blueviolet"></a> <a href="model_doc/gpt2"><img alt="Doc" src="https://img.shields.io/badge/Model_documentation-gpt2-blueviolet"></a> <a href="https://huggingface.co/spaces/docs-demos/gpt2"><img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"></a></div> <p><a href="https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf" rel="nofollow">Language Models are Unsupervised Multitask Learners</a>, Alec Radford et al.</p> <p>A bigger and better version of GPT, pretrained on WebText (web pages from outgoing links in Reddit with 3 karmas or more).</p> <p>The library provides versions of the model for language modeling and multitask language modeling/multiple choice classification.</p> <h3 class="relative group"><a id="ctrl" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#ctrl"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>CTRL </span></h3> <div class="flex flex-wrap space-x-1"><a href="https://huggingface.co/models?filter=ctrl"><img alt="Models" src="https://img.shields.io/badge/All_model_pages-ctrl-blueviolet"></a> <a href="model_doc/ctrl"><img alt="Doc" src="https://img.shields.io/badge/Model_documentation-ctrl-blueviolet"></a> <a href="https://huggingface.co/spaces/docs-demos/tiny-ctrl"><img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"></a></div> <p><a href="https://arxiv.org/abs/1909.05858" rel="nofollow">CTRL: A Conditional Transformer Language Model for Controllable Generation</a>, Nitish Shirish Keskar et al.</p> <p>Same as the GPT model but adds the idea of control codes. Text is generated from a prompt (can be empty) and one (or several) of those control codes which are then used to influence the text generation: generate with the style of wikipedia article, a book or a movie review.</p> <p>The library provides a version of the model for language modeling only.</p> <h3 class="relative group"><a id="transformerxl" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformerxl"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Transformer-XL </span></h3> <div class="flex flex-wrap space-x-1"><a href="https://huggingface.co/models?filter=transfo-xl"><img alt="Models" src="https://img.shields.io/badge/All_model_pages-transfo--xl-blueviolet"></a> <a href="model_doc/transfo-xl"><img alt="Doc" src="https://img.shields.io/badge/Model_documentation-transfo--xl-blueviolet"></a> <a href="https://huggingface.co/spaces/docs-demos/transfo-xl-wt103"><img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"></a></div> <p><a href="https://arxiv.org/abs/1901.02860" rel="nofollow">Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context</a>, Zihang Dai et al.</p> <p>Same as a regular GPT model, but introduces a recurrence mechanism for two consecutive segments (similar to a regular RNNs with two consecutive inputs). In this context, a segment is a number of consecutive tokens (for instance 512) that may span across multiple documents, and segments are fed in order to the model.</p> <p>Basically, the hidden states of the previous segment are concatenated to the current input to compute the attention scores. This allows the model to pay attention to information that was in the previous segment as well as the current one. By stacking multiple attention layers, the receptive field can be increased to multiple previous segments.</p> <p>This changes the positional embeddings to positional relative embeddings (as the regular positional embeddings would give the same results in the current input and the current hidden state at a given position) and needs to make some adjustments in the way attention scores are computed.</p> <p>The library provides a version of the model for language modeling only.</p> <a id="reformer"></a> <h3 class="relative group"><a id="reformer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#reformer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Reformer </span></h3> <div class="flex flex-wrap space-x-1"><a href="https://huggingface.co/models?filter=reformer"><img alt="Models" src="https://img.shields.io/badge/All_model_pages-reformer-blueviolet"></a> <a href="model_doc/reformer"><img alt="Doc" src="https://img.shields.io/badge/Model_documentation-reformer-blueviolet"></a> <a href="https://huggingface.co/spaces/docs-demos/reformer-crime-and-punishment"><img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"></a></div> <p><a href="https://arxiv.org/abs/2001.04451" rel="nofollow">Reformer: The Efficient Transformer</a>, Nikita Kitaev et al .</p> <p>An autoregressive transformer model with lots of tricks to reduce memory footprint and compute time. Those tricks include:</p> <ul><li>Use <a href="#axial-pos-encoding">Axial position encoding</a> (see below for more details). It’s a mechanism to avoid having a huge positional encoding matrix (when the sequence length is very big) by factorizing it into smaller matrices.</li> <li>Replace traditional attention by <a href="#lsh-attention">LSH (local-sensitive hashing) attention</a> (see below for more details). It’s a technique to avoid computing the full product query-key in the attention layers.</li> <li>Avoid storing the intermediate results of each layer by using reversible transformer layers to obtain them during the backward pass (subtracting the residuals from the input of the next layer gives them back) or recomputing them for results inside a given layer (less efficient than storing them but saves memory).</li> <li>Compute the feedforward operations by chunks and not on the whole batch.</li></ul> <p>With those tricks, the model can be fed much larger sentences than traditional transformer autoregressive models.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>This model could be very well be used in an autoencoding setting, there is no checkpoint for such a pretraining yet, though.</p></div> <p>The library provides a version of the model for language modeling only.</p> <h3 class="relative group"><a id="xlnet" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#xlnet"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XLNet </span></h3> <div class="flex flex-wrap space-x-1"><a href="https://huggingface.co/models?filter=xlnet"><img alt="Models" src="https://img.shields.io/badge/All_model_pages-xlnet-blueviolet"></a> <a href="model_doc/xlnet"><img alt="Doc" src="https://img.shields.io/badge/Model_documentation-xlnet-blueviolet"></a> <a href="https://huggingface.co/spaces/docs-demos/xlnet-base-cased"><img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"></a></div> <p><a href="https://arxiv.org/abs/1906.08237" rel="nofollow">XLNet: Generalized Autoregressive Pretraining for Language Understanding</a>, Zhilin Yang et al.</p> <p>XLNet is not a traditional autoregressive model but uses a training strategy that builds on that. It permutes the tokens in the sentence, then allows the model to use the last n tokens to predict the token n+1. Since this is all done with a mask, the sentence is actually fed in the model in the right order, but instead of masking the first n tokens for n+1, XLNet uses a mask that hides the previous tokens in some given permutation of 1,…,sequence length.</p> <p>XLNet also uses the same recurrence mechanism as Transformer-XL to build long-term dependencies.</p> <p>The library provides a version of the model for language modeling, token classification, sentence classification, multiple choice classification and question answering.</p> <a id="autoencoding-models"></a> <h2 class="relative group"><a id="encoders-or-autoencoding-models" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#encoders-or-autoencoding-models"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Encoders or autoencoding models </span></h2> <p>As mentioned before, these models rely on the encoder part of the original transformer and use no mask so the model can look at all the tokens in the attention heads. For pretraining, targets are the original sentences and inputs are their corrupted versions.</p> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/MUqNwgPjJvQ" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <h3 class="relative group"><a id="bert" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#bert"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BERT </span></h3> <div class="flex flex-wrap space-x-1"><a href="https://huggingface.co/models?filter=bert"><img alt="Models" src="https://img.shields.io/badge/All_model_pages-bert-blueviolet"></a> <a href="model_doc/bert"><img alt="Doc" src="https://img.shields.io/badge/Model_documentation-bert-blueviolet"></a> <a href="https://huggingface.co/spaces/docs-demos/bert-base-uncased"><img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"></a></div> <p><a href="https://arxiv.org/abs/1810.04805" rel="nofollow">BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding</a>, Jacob Devlin et al.</p> <p>Corrupts the inputs by using random masking, more precisely, during pretraining, a given percentage of tokens (usually 15%) is masked by:</p> <ul><li>a special mask token with probability 0.8</li> <li>a random token different from the one masked with probability 0.1</li> <li>the same token with probability 0.1</li></ul> <p>The model must predict the original sentence, but has a second objective: inputs are two sentences A and B (with a separation token in between). With probability 50%, the sentences are consecutive in the corpus, in the remaining 50% they are not related. The model has to predict if the sentences are consecutive or not.</p> <p>The library provides a version of the model for language modeling (traditional or masked), next sentence prediction, token classification, sentence classification, multiple choice classification and question answering.</p> <h3 class="relative group"><a id="albert" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#albert"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ALBERT </span></h3> <div class="flex flex-wrap space-x-1"><a href="https://huggingface.co/models?filter=albert"><img alt="Models" src="https://img.shields.io/badge/All_model_pages-albert-blueviolet"></a> <a href="model_doc/albert"><img alt="Doc" src="https://img.shields.io/badge/Model_documentation-albert-blueviolet"></a> <a href="https://huggingface.co/spaces/docs-demos/albert-base-v2"><img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"></a></div> <p><a href="https://arxiv.org/abs/1909.11942" rel="nofollow">ALBERT: A Lite BERT for Self-supervised Learning of Language Representations</a>, Zhenzhong Lan et al.</p> <p>Same as BERT but with a few tweaks:</p> <ul><li>Embedding size E is different from hidden size H justified because the embeddings are context independent (one embedding vector represents one token), whereas hidden states are context dependent (one hidden state represents a sequence of tokens) so it’s more logical to have H &gt;&gt; E. Also, the embedding matrix is large since it’s V x E (V being the vocab size). If E &lt; H, it has less parameters.</li> <li>Layers are split in groups that share parameters (to save memory).</li> <li>Next sentence prediction is replaced by a sentence ordering prediction: in the inputs, we have two sentences A and B (that are consecutive) and we either feed A followed by B or B followed by A. The model must predict if they have been swapped or not.</li></ul> <p>The library provides a version of the model for masked language modeling, token classification, sentence classification, multiple choice classification and question answering.</p> <h3 class="relative group"><a id="roberta" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#roberta"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RoBERTa </span></h3> <div class="flex flex-wrap space-x-1"><a href="https://huggingface.co/models?filter=roberta"><img alt="Models" src="https://img.shields.io/badge/All_model_pages-roberta-blueviolet"></a> <a href="model_doc/roberta"><img alt="Doc" src="https://img.shields.io/badge/Model_documentation-roberta-blueviolet"></a> <a href="https://huggingface.co/spaces/docs-demos/roberta-base"><img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"></a></div> <p><a href="https://arxiv.org/abs/1907.11692" rel="nofollow">RoBERTa: A Robustly Optimized BERT Pretraining Approach</a>, Yinhan Liu et al.</p> <p>Same as BERT with better pretraining tricks:</p> <ul><li>dynamic masking: tokens are masked differently at each epoch, whereas BERT does it once and for all</li> <li>no NSP (next sentence prediction) loss and instead of putting just two sentences together, put a chunk of contiguous texts together to reach 512 tokens (so the sentences are in an order than may span several documents)</li> <li>train with larger batches</li> <li>use BPE with bytes as a subunit and not characters (because of unicode characters)</li></ul> <p>The library provides a version of the model for masked language modeling, token classification, sentence classification, multiple choice classification and question answering.</p> <h3 class="relative group"><a id="distilbert" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#distilbert"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DistilBERT </span></h3> <div class="flex flex-wrap space-x-1"><a href="https://huggingface.co/models?filter=distilbert"><img alt="Models" src="https://img.shields.io/badge/All_model_pages-distilbert-blueviolet"></a> <a href="model_doc/distilbert"><img alt="Doc" src="https://img.shields.io/badge/Model_documentation-distilbert-blueviolet"></a> <a href="https://huggingface.co/spaces/docs-demos/distilbert-base-uncased"><img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"></a></div> <p><a href="https://arxiv.org/abs/1910.01108" rel="nofollow">DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter</a>, Victor Sanh et al.</p> <p>Same as BERT but smaller. Trained by distillation of the pretrained BERT model, meaning it’s been trained to predict the same probabilities as the larger model. The actual objective is a combination of:</p> <ul><li>finding the same probabilities as the teacher model</li> <li>predicting the masked tokens correctly (but no next-sentence objective)</li> <li>a cosine similarity between the hidden states of the student and the teacher model</li></ul> <p>The library provides a version of the model for masked language modeling, token classification, sentence classification and question answering.</p> <h3 class="relative group"><a id="convbert" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#convbert"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ConvBERT </span></h3> <div class="flex flex-wrap space-x-1"><a href="https://huggingface.co/models?filter=convbert"><img alt="Models" src="https://img.shields.io/badge/All_model_pages-convbert-blueviolet"></a> <a href="model_doc/convbert"><img alt="Doc" src="https://img.shields.io/badge/Model_documentation-convbert-blueviolet"></a> <a href="https://huggingface.co/spaces/docs-demos/conv-bert-base"><img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"></a></div> <p><a href="https://arxiv.org/abs/2008.02496" rel="nofollow">ConvBERT: Improving BERT with Span-based Dynamic Convolution</a>, Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.</p> <p>Pre-trained language models like BERT and its variants have recently achieved impressive performance in various natural language understanding tasks. However, BERT heavily relies on the global self-attention block and thus suffers large memory footprint and computation cost. Although all its attention heads query on the whole input sequence for generating the attention map from a global perspective, we observe some heads only need to learn local dependencies, which means the existence of computation redundancy. We therefore propose a novel span-based dynamic convolution to replace these self-attention heads to directly model local dependencies. The novel convolution heads, together with the rest self-attention heads, form a new mixed attention block that is more efficient at both global and local context learning. We equip BERT with this mixed attention design and build a ConvBERT model. Experiments have shown that ConvBERT significantly outperforms BERT and its variants in various downstream tasks, with lower training cost and fewer model parameters. Remarkably, ConvBERTbase model achieves 86.4 GLUE score, 0.7 higher than ELECTRAbase, while using less than 1/4 training cost.</p> <p>The library provides a version of the model for masked language modeling, token classification, sentence classification and question answering.</p> <h3 class="relative group"><a id="xlm" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#xlm"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XLM </span></h3> <div class="flex flex-wrap space-x-1"><a href="https://huggingface.co/models?filter=xlm"><img alt="Models" src="https://img.shields.io/badge/All_model_pages-xlm-blueviolet"></a> <a href="model_doc/xlm"><img alt="Doc" src="https://img.shields.io/badge/Model_documentation-xlm-blueviolet"></a> <a href="https://huggingface.co/spaces/docs-demos/xlm-mlm-en-2048"><img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"></a></div> <p><a href="https://arxiv.org/abs/1901.07291" rel="nofollow">Cross-lingual Language Model Pretraining</a>, Guillaume Lample and Alexis Conneau</p> <p>A transformer model trained on several languages. There are three different type of training for this model and the library provides checkpoints for all of them:</p> <ul><li>Causal language modeling (CLM) which is the traditional autoregressive training (so this model could be in the previous section as well). One of the languages is selected for each training sample, and the model input is a sentence of 256 tokens, that may span over several documents in one of those languages.</li> <li>Masked language modeling (MLM) which is like RoBERTa. One of the languages is selected for each training sample, and the model input is a sentence of 256 tokens, that may span over several documents in one of those languages, with dynamic masking of the tokens.</li> <li>A combination of MLM and translation language modeling (TLM). This consists of concatenating a sentence in two different languages, with random masking. To predict one of the masked tokens, the model can use both, the surrounding context in language 1 and the context given by language 2.</li></ul> <p>Checkpoints refer to which method was used for pretraining by having <em>clm</em>, <em>mlm</em> or <em>mlm-tlm</em> in their names. On top of positional embeddings, the model has language embeddings. When training using MLM/CLM, this gives the model an indication of the language used, and when training using MLM+TLM, an indication of the language used for each part.</p> <p>The library provides a version of the model for language modeling, token classification, sentence classification and question answering.</p> <h3 class="relative group"><a id="xlmroberta" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#xlmroberta"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XLM-RoBERTa </span></h3> <div class="flex flex-wrap space-x-1"><a href="https://huggingface.co/models?filter=xlm-roberta"><img alt="Models" src="https://img.shields.io/badge/All_model_pages-xlm--roberta-blueviolet"></a> <a href="model_doc/xlm-roberta"><img alt="Doc" src="https://img.shields.io/badge/Model_documentation-xlm--roberta-blueviolet"></a> <a href="https://huggingface.co/spaces/docs-demos/xlm-roberta-base"><img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"></a></div> <p><a href="https://arxiv.org/abs/1911.02116" rel="nofollow">Unsupervised Cross-lingual Representation Learning at Scale</a>, Alexis Conneau et al.</p> <p>Uses RoBERTa tricks on the XLM approach, but does not use the translation language modeling objective. It only uses masked language modeling on sentences coming from one language. However, the model is trained on many more languages (100) and doesn’t use the language embeddings, so it’s capable of detecting the input language by itself.</p> <p>The library provides a version of the model for masked language modeling, token classification, sentence classification, multiple choice classification and question answering.</p> <h3 class="relative group"><a id="flaubert" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#flaubert"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlauBERT </span></h3> <div class="flex flex-wrap space-x-1"><a href="https://huggingface.co/models?filter=flaubert"><img alt="Models" src="https://img.shields.io/badge/All_model_pages-flaubert-blueviolet"></a> <a href="model_doc/flaubert"><img alt="Doc" src="https://img.shields.io/badge/Model_documentation-flaubert-blueviolet"></a> <a href="https://huggingface.co/spaces/docs-demos/flaubert_small_cased"><img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"></a></div> <p><a href="https://arxiv.org/abs/1912.05372" rel="nofollow">FlauBERT: Unsupervised Language Model Pre-training for French</a>, Hang Le et al.</p> <p>Like RoBERTa, without the sentence ordering prediction (so just trained on the MLM objective).</p> <p>The library provides a version of the model for language modeling and sentence classification.</p> <h3 class="relative group"><a id="electra" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#electra"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ELECTRA </span></h3> <div class="flex flex-wrap space-x-1"><a href="https://huggingface.co/models?filter=electra"><img alt="Models" src="https://img.shields.io/badge/All_model_pages-electra-blueviolet"></a> <a href="model_doc/electra"><img alt="Doc" src="https://img.shields.io/badge/Model_documentation-electra-blueviolet"></a> <a href="https://huggingface.co/spaces/docs-demos/electra_large_discriminator_squad2_512"><img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"></a></div> <p><a href="https://arxiv.org/abs/2003.10555" rel="nofollow">ELECTRA: Pre-training Text Encoders as Discriminators Rather Than Generators</a>, Kevin Clark et al.</p> <p>ELECTRA is a transformer model pretrained with the use of another (small) masked language model. The inputs are corrupted by that language model, which takes an input text that is randomly masked and outputs a text in which ELECTRA has to predict which token is an original and which one has been replaced. Like for GAN training, the small language model is trained for a few steps (but with the original texts as objective, not to fool the ELECTRA model like in a traditional GAN setting) then the ELECTRA model is trained for a few steps.</p> <p>The library provides a version of the model for masked language modeling, token classification and sentence classification.</p> <h3 class="relative group"><a id="funnel-transformer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#funnel-transformer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Funnel Transformer </span></h3> <div class="flex flex-wrap space-x-1"><a href="https://huggingface.co/models?filter=funnel"><img alt="Models" src="https://img.shields.io/badge/All_model_pages-funnel-blueviolet"></a> <a href="model_doc/funnel"><img alt="Doc" src="https://img.shields.io/badge/Model_documentation-funnel-blueviolet"></a> <a href="https://huggingface.co/spaces/docs-demos/funnel-transformer-small"><img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"></a></div> <p><a href="https://arxiv.org/abs/2006.03236" rel="nofollow">Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing</a>, Zihang Dai et al.</p> <p>Funnel Transformer is a transformer model using pooling, a bit like a ResNet model: layers are grouped in blocks, and at the beginning of each block (except the first one), the hidden states are pooled among the sequence dimension. This way, their length is divided by 2, which speeds up the computation of the next hidden states. All pretrained models have three blocks, which means the final hidden state has a sequence length that is one fourth of the original sequence length.</p> <p>For tasks such as classification, this is not a problem, but for tasks like masked language modeling or token classification, we need a hidden state with the same sequence length as the original input. In those cases, the final hidden states are upsampled to the input sequence length and go through two additional layers. That’s why there are two versions of each checkpoint. The version suffixed with “-base” contains only the three blocks, while the version without that suffix contains the three blocks and the upsampling head with its additional layers.</p> <p>The pretrained models available use the same pretraining objective as ELECTRA.</p> <p>The library provides a version of the model for masked language modeling, token classification, sentence classification, multiple choice classification and question answering.</p> <a id="longformer"></a> <h3 class="relative group"><a id="longformer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#longformer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Longformer </span></h3> <div class="flex flex-wrap space-x-1"><a href="https://huggingface.co/models?filter=longformer"><img alt="Models" src="https://img.shields.io/badge/All_model_pages-longformer-blueviolet"></a> <a href="model_doc/longformer"><img alt="Doc" src="https://img.shields.io/badge/Model_documentation-longformer-blueviolet"></a> <a href="https://huggingface.co/spaces/docs-demos/longformer-base-4096-finetuned-squadv1"><img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"></a></div> <p><a href="https://arxiv.org/abs/2004.05150" rel="nofollow">Longformer: The Long-Document Transformer</a>, Iz Beltagy et al.</p> <p>A transformer model replacing the attention matrices by sparse matrices to go faster. Often, the local context (e.g., what are the two tokens left and right?) is enough to take action for a given token. Some preselected input tokens are still given global attention, but the attention matrix has way less parameters, resulting in a speed-up. See the <a href="#local-attention">local attention section</a> for more information.</p> <p>It is pretrained the same way a RoBERTa otherwise.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>This model could be very well be used in an autoregressive setting, there is no checkpoint for such a pretraining yet, though.</p></div> <p>The library provides a version of the model for masked language modeling, token classification, sentence classification, multiple choice classification and question answering.</p> <a id="seq-to-seq-models"></a> <h2 class="relative group"><a id="sequencetosequence-models" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#sequencetosequence-models"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Sequence-to-sequence models </span></h2> <p>As mentioned before, these models keep both the encoder and the decoder of the original transformer.</p> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/0_4KEb08xrE" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <h3 class="relative group"><a id="bart" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#bart"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BART </span></h3> <div class="flex flex-wrap space-x-1"><a href="https://huggingface.co/models?filter=bart"><img alt="Models" src="https://img.shields.io/badge/All_model_pages-bart-blueviolet"></a> <a href="model_doc/bart"><img alt="Doc" src="https://img.shields.io/badge/Model_documentation-bart-blueviolet"></a> <a href="https://huggingface.co/spaces/docs-demos/bart-large-mnli"><img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"></a></div> <p><a href="https://arxiv.org/abs/1910.13461" rel="nofollow">BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension</a>, Mike Lewis et al.</p> <p>Sequence-to-sequence model with an encoder and a decoder. Encoder is fed a corrupted version of the tokens, decoder is fed the original tokens (but has a mask to hide the future words like a regular transformers decoder). A composition of the following transformations are applied on the pretraining tasks for the encoder:</p> <ul><li>mask random tokens (like in BERT)</li> <li>delete random tokens</li> <li>mask a span of k tokens with a single mask token (a span of 0 tokens is an insertion of a mask token)</li> <li>permute sentences</li> <li>rotate the document to make it start at a specific token</li></ul> <p>The library provides a version of this model for conditional generation and sequence classification.</p> <h3 class="relative group"><a id="pegasus" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#pegasus"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Pegasus </span></h3> <div class="flex flex-wrap space-x-1"><a href="https://huggingface.co/models?filter=pegasus"><img alt="Models" src="https://img.shields.io/badge/All_model_pages-pegasus-blueviolet"></a> <a href="model_doc/pegasus"><img alt="Doc" src="https://img.shields.io/badge/Model_documentation-pegasus-blueviolet"></a> <a href="https://huggingface.co/spaces/docs-demos/pegasus_paraphrase"><img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"></a></div> <p><a href="https://arxiv.org/pdf/1912.08777.pdf" rel="nofollow">PEGASUS: Pre-training with Extracted Gap-sentences forAbstractive Summarization</a>, Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu on Dec 18, 2019.</p> <p>Sequence-to-sequence model with the same encoder-decoder model architecture as BART. Pegasus is pre-trained jointly on two self-supervised objective functions: Masked Language Modeling (MLM) and a novel summarization specific pretraining objective, called Gap Sentence Generation (GSG).</p> <ul><li>MLM: encoder input tokens are randomly replaced by a mask tokens and have to be predicted by the encoder (like in BERT)</li> <li>GSG: whole encoder input sentences are replaced by a second mask token and fed to the decoder, but which has a causal mask to hide the future words like a regular auto-regressive transformer decoder.</li></ul> <p>In contrast to BART, Pegasus’ pretraining task is intentionally similar to summarization: important sentences are masked and are generated together as one output sequence from the remaining sentences, similar to an extractive summary.</p> <p>The library provides a version of this model for conditional generation, which should be used for summarization.</p> <h3 class="relative group"><a id="marianmt" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#marianmt"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MarianMT </span></h3> <div class="flex flex-wrap space-x-1"><a href="https://huggingface.co/models?filter=marian"><img alt="Models" src="https://img.shields.io/badge/All_model_pages-marian-blueviolet"></a> <a href="model_doc/marian"><img alt="Doc" src="https://img.shields.io/badge/Model_documentation-marian-blueviolet"></a> <a href="https://huggingface.co/spaces/docs-demos/opus-mt-zh-en"><img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"></a></div> <p><a href="https://arxiv.org/abs/1804.00344" rel="nofollow">Marian: Fast Neural Machine Translation in C++</a>, Marcin Junczys-Dowmunt et al.</p> <p>A framework for translation models, using the same models as BART</p> <p>The library provides a version of this model for conditional generation.</p> <h3 class="relative group"><a id="t5" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#t5"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>T5 </span></h3> <div class="flex flex-wrap space-x-1"><a href="https://huggingface.co/models?filter=t5"><img alt="Models" src="https://img.shields.io/badge/All_model_pages-t5-blueviolet"></a> <a href="model_doc/t5"><img alt="Doc" src="https://img.shields.io/badge/Model_documentation-t5-blueviolet"></a> <a href="https://huggingface.co/spaces/docs-demos/t5-base"><img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"></a></div> <p><a href="https://arxiv.org/abs/1910.10683" rel="nofollow">Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer</a>, Colin Raffel et al.</p> <p>Uses the traditional transformer model (with a slight change in the positional embeddings, which are learned at each layer). To be able to operate on all NLP tasks, it transforms them into text-to-text problems by using specific prefixes: “summarize: ”, “question: ”, “translate English to German: ” and so forth.</p> <p>The pretraining includes both supervised and self-supervised training. Supervised training is conducted on downstream tasks provided by the GLUE and SuperGLUE benchmarks (converting them into text-to-text tasks as explained above).</p> <p>Self-supervised training uses corrupted tokens, by randomly removing 15% of the tokens and replacing them with individual sentinel tokens (if several consecutive tokens are marked for removal, the whole group is replaced with a single sentinel token). The input of the encoder is the corrupted sentence, the input of the decoder is the original sentence and the target is then the dropped out tokens delimited by their sentinel tokens.</p> <p>For instance, if we have the sentence “My dog is very cute .”, and we decide to remove the tokens: “dog”, “is” and “cute”, the encoder input becomes “My &lt;x&gt; very &lt;y&gt; .” and the target input becomes “&lt;x&gt; dog is &lt;y&gt; cute .&lt;z&gt;”</p> <p>The library provides a version of this model for conditional generation.</p> <h3 class="relative group"><a id="mt5" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#mt5"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MT5 </span></h3> <div class="flex flex-wrap space-x-1"><a href="https://huggingface.co/models?filter=mt5"><img alt="Models" src="https://img.shields.io/badge/All_model_pages-mt5-blueviolet"></a> <a href="model_doc/mt5"><img alt="Doc" src="https://img.shields.io/badge/Model_documentation-mt5-blueviolet"></a> <a href="https://huggingface.co/spaces/docs-demos/mt5-small-finetuned-arxiv-cs-finetuned-arxiv-cs-full"><img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"></a></div> <p><a href="https://arxiv.org/abs/2010.11934" rel="nofollow">mT5: A massively multilingual pre-trained text-to-text transformer</a>, Linting Xue et al.</p> <p>The model architecture is same as T5. mT5’s pretraining objective includes T5’s self-supervised training, but not T5’s supervised training. mT5 is trained on 101 languages.</p> <p>The library provides a version of this model for conditional generation.</p> <h3 class="relative group"><a id="mbart" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#mbart"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MBart </span></h3> <div class="flex flex-wrap space-x-1"><a href="https://huggingface.co/models?filter=mbart"><img alt="Models" src="https://img.shields.io/badge/All_model_pages-mbart-blueviolet"></a> <a href="model_doc/mbart"><img alt="Doc" src="https://img.shields.io/badge/Model_documentation-mbart-blueviolet"></a> <a href="https://huggingface.co/spaces/docs-demos/mbart-large-50-one-to-many-mmt"><img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"></a></div> <p><a href="https://arxiv.org/abs/2001.08210" rel="nofollow">Multilingual Denoising Pre-training for Neural Machine Translation</a> by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.</p> <p>The model architecture and pretraining objective is same as BART, but MBart is trained on 25 languages and is intended for supervised and unsupervised machine translation. MBart is one of the first methods for pretraining a complete sequence-to-sequence model by denoising full texts in multiple languages,</p> <p>The library provides a version of this model for conditional generation.</p> <p>The <a href="https://huggingface.co/facebook/mbart-large-en-ro" rel="nofollow">mbart-large-en-ro checkpoint</a> can be used for english -&gt; romanian translation.</p> <p>The <a href="https://huggingface.co/facebook/mbart-large-cc25" rel="nofollow">mbart-large-cc25</a> checkpoint can be finetuned for other translation and summarization tasks, using code in <code>examples/pytorch/translation/</code> , but is not very useful without finetuning.</p> <h3 class="relative group"><a id="prophetnet" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#prophetnet"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ProphetNet </span></h3> <div class="flex flex-wrap space-x-1"><a href="https://huggingface.co/models?filter=prophetnet"><img alt="Models" src="https://img.shields.io/badge/All_model_pages-prophetnet-blueviolet"></a> <a href="model_doc/prophetnet"><img alt="Doc" src="https://img.shields.io/badge/Model_documentation-prophetnet-blueviolet"></a> <a href="https://huggingface.co/spaces/docs-demos/prophetnet-large-uncased"><img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"></a></div> <p><a href="https://arxiv.org/abs/2001.04063" rel="nofollow">ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training,</a> by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang, Ming Zhou.</p> <p>ProphetNet introduces a novel <em>sequence-to-sequence</em> pretraining objective, called <em>future n-gram prediction</em>. In future n-gram prediction, the model predicts the next n tokens simultaneously based on previous context tokens at each time step instead instead of just the single next token. The future n-gram prediction explicitly encourages the model to plan for the future tokens and prevent overfitting on strong local correlations. The model architecture is based on the original Transformer, but replaces the “standard” self-attention mechanism in the decoder by a a main self-attention mechanism and a self and n-stream (predict) self-attention mechanism.</p> <p>The library provides a pre-trained version of this model for conditional generation and a fine-tuned version for summarization.</p> <h3 class="relative group"><a id="xlmprophetnet" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#xlmprophetnet"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XLM-ProphetNet </span></h3> <div class="flex flex-wrap space-x-1"><a href="https://huggingface.co/models?filter=xprophetnet"><img alt="Models" src="https://img.shields.io/badge/All_model_pages-xprophetnet-blueviolet"></a> <a href="model_doc/xlm-prophetnet"><img alt="Doc" src="https://img.shields.io/badge/Model_documentation-xprophetnet-blueviolet"></a> <a href="https://huggingface.co/spaces/docs-demos/xprophetnet-large-wiki100-cased-xglue-ntg"><img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"></a></div> <p><a href="https://arxiv.org/abs/2001.04063" rel="nofollow">ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training,</a> by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang, Ming Zhou.</p> <p>XLM-ProphetNet’s model architecture and pretraining objective is same as ProphetNet, but XLM-ProphetNet was pre-trained on the cross-lingual dataset <a href="https://arxiv.org/abs/2004.01401" rel="nofollow">XGLUE</a>.</p> <p>The library provides a pre-trained version of this model for multi-lingual conditional generation and fine-tuned versions for headline generation and question generation, respectively.</p> <a id="multimodal-models"></a> <h2 class="relative group"><a id="multimodal-models" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#multimodal-models"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Multimodal models </span></h2> <p>There is one multimodal model in the library which has not been pretrained in the self-supervised fashion like the others.</p> <h3 class="relative group"><a id="mmbt" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#mmbt"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MMBT </span></h3> <p><a href="https://arxiv.org/abs/1909.02950" rel="nofollow">Supervised Multimodal Bitransformers for Classifying Images and Text</a>, Douwe Kiela et al.</p> <p>A transformers model used in multimodal settings, combining a text and an image to make predictions. The transformer model takes as inputs the embeddings of the tokenized text and the final activations of a pretrained on images resnet (after the pooling layer) that goes through a linear layer (to go from number of features at the end of the resnet to the hidden state dimension of the transformer).</p> <p>The different inputs are concatenated, and on top of the positional embeddings, a segment embedding is added to let the model know which part of the input vector corresponds to the text and which to the image.</p> <p>The pretrained model only works for classification.</p> <a id="retrieval-based-models"></a> <h2 class="relative group"><a id="retrievalbased-models" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#retrievalbased-models"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Retrieval-based models </span></h2> <p>Some models use documents retrieval during (pre)training and inference for open-domain question answering, for example.</p> <h3 class="relative group"><a id="dpr" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#dpr"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DPR </span></h3> <div class="flex flex-wrap space-x-1"><a href="https://huggingface.co/models?filter=dpr"><img alt="Models" src="https://img.shields.io/badge/All_model_pages-dpr-blueviolet"></a> <a href="model_doc/dpr"><img alt="Doc" src="https://img.shields.io/badge/Model_documentation-dpr-blueviolet"></a> <a href="https://huggingface.co/spaces/docs-demos/dpr-question_encoder-bert-base-multilingual"><img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"></a></div> <p><a href="https://arxiv.org/abs/2004.04906" rel="nofollow">Dense Passage Retrieval for Open-Domain Question Answering</a>, Vladimir Karpukhin et al.</p> <p>Dense Passage Retrieval (DPR) - is a set of tools and models for state-of-the-art open-domain question-answering research.</p> <p>DPR consists in three models:</p> <ul><li>Question encoder: encode questions as vectors</li> <li>Context encoder: encode contexts as vectors</li> <li>Reader: extract the answer of the questions inside retrieved contexts, along with a relevance score (high if the inferred span actually answers the question).</li></ul> <p>DPR’s pipeline (not implemented yet) uses a retrieval step to find the top k contexts given a certain question, and then it calls the reader with the question and the retrieved documents to get the answer.</p> <h3 class="relative group"><a id="rag" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#rag"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>RAG </span></h3> <div class="flex flex-wrap space-x-1"><a href="https://huggingface.co/models?filter=rag"><img alt="Models" src="https://img.shields.io/badge/All_model_pages-rag-blueviolet"></a> <a href="model_doc/rag"><img alt="Doc" src="https://img.shields.io/badge/Model_documentation-rag-blueviolet"></a></div> <p><a href="https://arxiv.org/abs/2005.11401" rel="nofollow">Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks</a>, Patrick Lewis, Ethan Perez, Aleksandara Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Küttler, Mike Lewis, Wen-tau Yih, Tim Rocktäschel, Sebastian Riedel, Douwe Kiela</p> <p>Retrieval-augmented generation (“RAG”) models combine the powers of pretrained dense retrieval (DPR) and Seq2Seq models. RAG models retrieve docs, pass them to a seq2seq model, then marginalize to generate outputs. The retriever and seq2seq modules are initialized from pretrained models, and fine-tuned jointly, allowing both retrieval and generation to adapt to downstream tasks.</p> <p>The two models RAG-Token and RAG-Sequence are available for generation.</p> <h2 class="relative group"><a id="more-technical-aspects" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#more-technical-aspects"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>More technical aspects </span></h2> <h3 class="relative group"><a id="full-vs-sparse-attention" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#full-vs-sparse-attention"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Full vs sparse attention </span></h3> <p>Most transformer models use full attention in the sense that the attention matrix is square. It can be a big computational bottleneck when you have long texts. Longformer and reformer are models that try to be more efficient and use a sparse version of the attention matrix to speed up training.</p> <a id="lsh-attention"></a> <p><strong>LSH attention</strong></p> <p><a href="#reformer">Reformer</a> uses LSH attention. In the softmax(QK^t), only the biggest elements (in the softmax dimension) of the matrix QK^t are going to give useful contributions. So for each query q in Q, we can consider only the keys k in K that are close to q. A hash function is used to determine if q and k are close. The attention mask is modified to mask the current token (except at the first position), because it will give a query and a key equal (so very similar to each other). Since the hash can be a bit random, several hash functions are used in practice (determined by a n_rounds parameter) and then are averaged together.</p> <a id="local-attention"></a> <p><strong>Local attention</strong></p> <p><a href="#longformer">Longformer</a> uses local attention: often, the local context (e.g., what are the two tokens to the left and right?) is enough to take action for a given token. Also, by stacking attention layers that have a small window, the last layer will have a receptive field of more than just the tokens in the window, allowing them to build a representation of the whole sentence.</p> <p>Some preselected input tokens are also given global attention: for those few tokens, the attention matrix can access all tokens and this process is symmetric: all other tokens have access to those specific tokens (on top of the ones in their local window). This is shown in Figure 2d of the paper, see below for a sample attention mask:</p> <img scale="50 %" align="center" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/local_attention_mask.png"> <p>Using those attention matrices with less parameters then allows the model to have inputs having a bigger sequence length.</p> <h3 class="relative group"><a id="other-tricks" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#other-tricks"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Other tricks </span></h3> <a id="axial-pos-encoding"></a> <p><strong>Axial positional encodings</strong></p> <p><a href="#reformer">Reformer</a> uses axial positional encodings: in traditional transformer models, the positional encoding E is a matrix of size <!-- HTML_TAG_START --><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>l</mi></mrow><annotation encoding="application/x-tex">l</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.6944em;"></span><span class="mord mathnormal" style="margin-right:0.01968em;">l</span></span></span></span><!-- HTML_TAG_END --> by <!-- HTML_TAG_START --><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>d</mi></mrow><annotation encoding="application/x-tex">d</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.6944em;"></span><span class="mord mathnormal">d</span></span></span></span><!-- HTML_TAG_END -->, <!-- HTML_TAG_START --><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>l</mi></mrow><annotation encoding="application/x-tex">l</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.6944em;"></span><span class="mord mathnormal" style="margin-right:0.01968em;">l</span></span></span></span><!-- HTML_TAG_END --> being the sequence length and <!-- HTML_TAG_START --><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>d</mi></mrow><annotation encoding="application/x-tex">d</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.6944em;"></span><span class="mord mathnormal">d</span></span></span></span><!-- HTML_TAG_END --> the dimension of the hidden state. If you have very long texts, this matrix can be huge and take way too much space on the GPU. To alleviate that, axial positional encodings consist of factorizing that big matrix E in two smaller matrices E1 and E2, with dimensions <!-- HTML_TAG_START --><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>l</mi><mn>1</mn></msub><mo>×</mo><msub><mi>d</mi><mn>1</mn></msub></mrow><annotation encoding="application/x-tex">l_{1} \times d_{1}</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.8444em;vertical-align:-0.15em;"></span><span class="mord"><span class="mord mathnormal" style="margin-right:0.01968em;">l</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3011em;"><span style="top:-2.55em;margin-left:-0.0197em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">1</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2222em;"></span><span class="mbin">×</span><span class="mspace" style="margin-right:0.2222em;"></span></span><span class="base"><span class="strut" style="height:0.8444em;vertical-align:-0.15em;"></span><span class="mord"><span class="mord mathnormal">d</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3011em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">1</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span></span></span></span><!-- HTML_TAG_END --> and <!-- HTML_TAG_START --><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>l</mi><mn>2</mn></msub><mo>×</mo><msub><mi>d</mi><mn>2</mn></msub></mrow><annotation encoding="application/x-tex">l_{2} \times d_{2}</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.8444em;vertical-align:-0.15em;"></span><span class="mord"><span class="mord mathnormal" style="margin-right:0.01968em;">l</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3011em;"><span style="top:-2.55em;margin-left:-0.0197em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">2</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2222em;"></span><span class="mbin">×</span><span class="mspace" style="margin-right:0.2222em;"></span></span><span class="base"><span class="strut" style="height:0.8444em;vertical-align:-0.15em;"></span><span class="mord"><span class="mord mathnormal">d</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3011em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">2</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span></span></span></span><!-- HTML_TAG_END -->, such that <!-- HTML_TAG_START --><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>l</mi><mn>1</mn></msub><mo>×</mo><msub><mi>l</mi><mn>2</mn></msub><mo>=</mo><mi>l</mi></mrow><annotation encoding="application/x-tex">l_{1} \times l_{2} = l</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.8444em;vertical-align:-0.15em;"></span><span class="mord"><span class="mord mathnormal" style="margin-right:0.01968em;">l</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3011em;"><span style="top:-2.55em;margin-left:-0.0197em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">1</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2222em;"></span><span class="mbin">×</span><span class="mspace" style="margin-right:0.2222em;"></span></span><span class="base"><span class="strut" style="height:0.8444em;vertical-align:-0.15em;"></span><span class="mord"><span class="mord mathnormal" style="margin-right:0.01968em;">l</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3011em;"><span style="top:-2.55em;margin-left:-0.0197em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">2</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2778em;"></span></span><span class="base"><span class="strut" style="height:0.6944em;"></span><span class="mord mathnormal" style="margin-right:0.01968em;">l</span></span></span></span><!-- HTML_TAG_END --> and <!-- HTML_TAG_START --><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>d</mi><mn>1</mn></msub><mo>+</mo><msub><mi>d</mi><mn>2</mn></msub><mo>=</mo><mi>d</mi></mrow><annotation encoding="application/x-tex">d_{1} + d_{2} = d</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.8444em;vertical-align:-0.15em;"></span><span class="mord"><span class="mord mathnormal">d</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3011em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">1</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2222em;"></span><span class="mbin">+</span><span class="mspace" style="margin-right:0.2222em;"></span></span><span class="base"><span class="strut" style="height:0.8444em;vertical-align:-0.15em;"></span><span class="mord"><span class="mord mathnormal">d</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3011em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">2</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2778em;"></span></span><span class="base"><span class="strut" style="height:0.6944em;"></span><span class="mord mathnormal">d</span></span></span></span><!-- HTML_TAG_END --> (with the product for the lengths, this ends up being way smaller). The embedding for time step <!-- HTML_TAG_START --><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>j</mi></mrow><annotation encoding="application/x-tex">j</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.854em;vertical-align:-0.1944em;"></span><span class="mord mathnormal" style="margin-right:0.05724em;">j</span></span></span></span><!-- HTML_TAG_END --> in E is obtained by concatenating the embeddings for timestep <!-- HTML_TAG_START --><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>j</mi><mi mathvariant="normal">%</mi><mi>l</mi><mn>1</mn></mrow><annotation encoding="application/x-tex">j \% l1</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.9444em;vertical-align:-0.1944em;"></span><span class="mord mathnormal" style="margin-right:0.05724em;">j</span><span class="mord">%</span><span class="mord mathnormal" style="margin-right:0.01968em;">l</span><span class="mord">1</span></span></span></span><!-- HTML_TAG_END --> in E1 and <!-- HTML_TAG_START --><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>j</mi><mi mathvariant="normal">/</mi><mi mathvariant="normal">/</mi><mi>l</mi><mn>1</mn></mrow><annotation encoding="application/x-tex">j // l1</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mord mathnormal" style="margin-right:0.05724em;">j</span><span class="mord">//</span><span class="mord mathnormal" style="margin-right:0.01968em;">l</span><span class="mord">1</span></span></span></span><!-- HTML_TAG_END --> in E2.</p> <script type="module" data-hydrate="a3m285"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="a3m285"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/model_summary.mdx-4a71079d.js") ], params: {} } }); </script>
90
0
hf_public_repos/doc-build-dev/transformers/pr_16143
hf_public_repos/doc-build-dev/transformers/pr_16143/en/serialization.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;exporting-transformers-models&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;onnx&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;exporting-a-model-to-onnx&quot;,&quot;title&quot;:&quot;Exporting a model to ONNX&quot;},{&quot;local&quot;:&quot;selecting-features-for-different-model-topologies&quot;,&quot;title&quot;:&quot;Selecting features for different model topologies&quot;},{&quot;local&quot;:&quot;exporting-a-model-for-an-unsupported-architecture&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;implementing-a-custom-onnx-configuration&quot;,&quot;title&quot;:&quot;Implementing a custom ONNX configuration&quot;},{&quot;local&quot;:&quot;exporting-the-model&quot;,&quot;title&quot;:&quot;Exporting the model&quot;},{&quot;local&quot;:&quot;validating-the-model-outputs&quot;,&quot;title&quot;:&quot;Validating the model outputs&quot;}],&quot;title&quot;:&quot;Exporting a model for an unsupported architecture&quot;},{&quot;local&quot;:&quot;contributing-a-new-configuration-to-transformers&quot;,&quot;title&quot;:&quot;Contributing a new configuration to 🤗 Transformers&quot;}],&quot;title&quot;:&quot;ONNX&quot;},{&quot;local&quot;:&quot;torchscript&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;implications&quot;,&quot;title&quot;:&quot;Implications&quot;},{&quot;local&quot;:&quot;torchscript-flag-and-tied-weights&quot;,&quot;title&quot;:&quot;TorchScript flag and tied weights&quot;},{&quot;local&quot;:&quot;dummy-inputs-and-standard-lengths&quot;,&quot;title&quot;:&quot;Dummy inputs and standard lengths&quot;},{&quot;local&quot;:&quot;using-torchscript-in-python&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;saving-a-model&quot;,&quot;title&quot;:&quot;Saving a model&quot;},{&quot;local&quot;:&quot;loading-a-model&quot;,&quot;title&quot;:&quot;Loading a model&quot;},{&quot;local&quot;:&quot;using-a-traced-model-for-inference&quot;,&quot;title&quot;:&quot;Using a traced model for inference&quot;}],&quot;title&quot;:&quot;Using TorchScript in Python&quot;},{&quot;local&quot;:&quot;deploying-huggingface-torchscript-models-on-aws-using-the-neuron-sdk&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;implications&quot;,&quot;title&quot;:&quot;Implications&quot;},{&quot;local&quot;:&quot;dependencies&quot;,&quot;title&quot;:&quot;Dependencies&quot;},{&quot;local&quot;:&quot;converting-a-model-for-aws-neuron&quot;,&quot;title&quot;:&quot;Converting a Model for AWS Neuron&quot;}],&quot;title&quot;:&quot;Deploying HuggingFace TorchScript models on AWS using the Neuron SDK&quot;}],&quot;title&quot;:&quot;TorchScript&quot;}],&quot;title&quot;:&quot;Exporting 🤗 Transformers Models&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/serialization.mdx-809e82fb.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlockFw-27a176a0.js"> <h1 class="relative group"><a id="exporting-transformers-models" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#exporting-transformers-models"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Exporting 🤗 Transformers Models </span></h1> <p>If you need to deploy 🤗 Transformers models in production environments, we recommend exporting them to a serialized format that can be loaded and executed on specialized runtimes and hardware. In this guide, we’ll show you how to export 🤗 Transformers models in two widely used formats: ONNX and TorchScript.</p> <p>Once exported, a model can optimized for inference via techniques such as quantization and pruning. If you are interested in optimizing your models to run with maximum efficiency, check out the <a href="https://github.com/huggingface/optimum" rel="nofollow">🤗 Optimum library</a>.</p> <h2 class="relative group"><a id="onnx" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#onnx"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ONNX </span></h2> <p>The <a href="http://onnx.ai" rel="nofollow">ONNX (Open Neural Network eXchange)</a> project is an open standard that defines a common set of operators and a common file format to represent deep learning models in a wide variety of frameworks, including PyTorch and TensorFlow. When a model is exported to the ONNX format, these operators are used to construct a computational graph (often called an <em>intermediate representation</em>) which represents the flow of data through the neural network.</p> <p>By exposing a graph with standardized operators and data types, ONNX makes it easy to switch between frameworks. For example, a model trained in PyTorch can be exported to ONNX format and then imported in TensorFlow (and vice versa).</p> <p>🤗 Transformers provides a <code>transformers.onnx</code> package that enables you to convert model checkpoints to an ONNX graph by leveraging configuration objects. These configuration objects come ready made for a number of model architectures, and are designed to be easily extendable to other architectures.</p> <p>Ready-made configurations include the following architectures:</p> <ul><li>ALBERT</li> <li>BART</li> <li>BERT</li> <li>CamemBERT</li> <li>Data2VecText</li> <li>DistilBERT</li> <li>ELECTRA</li> <li>GPT Neo</li> <li>I-BERT</li> <li>LayoutLM</li> <li>M2M100</li> <li>Marian</li> <li>mBART</li> <li>OpenAI GPT-2</li> <li>PLBart</li> <li>RoBERTa</li> <li>T5</li> <li>ViT</li> <li>XLM-RoBERTa</li> <li>XLM-RoBERTa-XL</li></ul> <p>In the next two sections, we’ll show you how to:</p> <ul><li>Export a supported model using the <code>transformers.onnx</code> package.</li> <li>Export a custom model for an unsupported architecture.</li></ul> <h3 class="relative group"><a id="exporting-a-model-to-onnx" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#exporting-a-model-to-onnx"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Exporting a model to ONNX </span></h3> <p>To export a 🤗 Transformers model to ONNX, you’ll first need to install some extra dependencies:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install transformers[onnx]<!-- HTML_TAG_END --></pre></div> <p>The <code>transformers.onnx</code> package can then be used as a Python module:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -m transformers.onnx --<span class="hljs-built_in">help</span> usage: Hugging Face Transformers ONNX exporter [-h] -m MODEL [--feature {causal-lm, ...}] [--opset OPSET] [--atol ATOL] output positional arguments: output Path indicating <span class="hljs-built_in">where</span> to store generated ONNX model. optional arguments: -h, --<span class="hljs-built_in">help</span> show this <span class="hljs-built_in">help</span> message and <span class="hljs-built_in">exit</span> -m MODEL, --model MODEL Model ID on huggingface.co or path on disk to load model from. --feature {causal-lm, ...} The <span class="hljs-built_in">type</span> of features to <span class="hljs-built_in">export</span> the model with. --opset OPSET ONNX opset version to <span class="hljs-built_in">export</span> the model with. --atol ATOL Absolute difference tolerence when validating the model.<!-- HTML_TAG_END --></pre></div> <p>Exporting a checkpoint using a ready-made configuration can be done as follows:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -m transformers.onnx --model=distilbert-base-uncased onnx/<!-- HTML_TAG_END --></pre></div> <p>which should show the following logs:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->Validating ONNX model... -[✓] ONNX model output names match reference model ({<span class="hljs-string">&#x27;last_hidden_state&#x27;</span>}) - Validating ONNX Model output <span class="hljs-string">&quot;last_hidden_state&quot;</span>: -[✓] (2, 8, 768) matches (2, 8, 768) -[✓] all values close (atol: 1e-05) All good, model saved at: onnx/model.onnx<!-- HTML_TAG_END --></pre></div> <p>This exports an ONNX graph of the checkpoint defined by the <code>--model</code> argument. In this example it is <code>distilbert-base-uncased</code>, but it can be any checkpoint on the Hugging Face Hub or one that’s stored locally.</p> <p>The resulting <code>model.onnx</code> file can then be run on one of the <a href="https://onnx.ai/supported-tools.html#deployModel" rel="nofollow">many accelerators</a> that support the ONNX standard. For example, we can load and run the model with <a href="https://onnxruntime.ai/" rel="nofollow">ONNX Runtime</a> as follows:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> onnxruntime <span class="hljs-keyword">import</span> InferenceSession <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>session = InferenceSession(<span class="hljs-string">&quot;onnx/model.onnx&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># ONNX Runtime expects NumPy arrays as input</span> <span class="hljs-meta">&gt;&gt;&gt; </span>inputs = tokenizer(<span class="hljs-string">&quot;Using DistilBERT with ONNX Runtime!&quot;</span>, return_tensors=<span class="hljs-string">&quot;np&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = session.run(output_names=[<span class="hljs-string">&quot;last_hidden_state&quot;</span>], input_feed=<span class="hljs-built_in">dict</span>(inputs))<!-- HTML_TAG_END --></pre></div> <p>The required output names (i.e. <code>[&quot;last_hidden_state&quot;]</code>) can be obtained by taking a look at the ONNX configuration of each model. For example, for DistilBERT we have:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers.models.distilbert <span class="hljs-keyword">import</span> DistilBertConfig, DistilBertOnnxConfig <span class="hljs-meta">&gt;&gt;&gt; </span>config = DistilBertConfig() <span class="hljs-meta">&gt;&gt;&gt; </span>onnx_config = DistilBertOnnxConfig(config) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(<span class="hljs-built_in">list</span>(onnx_config.outputs.keys())) [<span class="hljs-string">&quot;last_hidden_state&quot;</span>]<!-- HTML_TAG_END --></pre></div> <p>The process is identical for TensorFlow checkpoints on the Hub. For example, we can export a pure TensorFlow checkpoint from the <a href="https://huggingface.co/keras-io" rel="nofollow">Keras organization</a> as follows:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -m transformers.onnx --model=keras-io/transformers-qa onnx/<!-- HTML_TAG_END --></pre></div> <p>To export a model that’s stored locally, you’ll need to have the model’s weights and tokenizer files stored in a directory. For example, we can load and save a checkpoint as follows:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="Copy code excerpt to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><div><div class="bg-white leading-none border border-gray-100 rounded-lg inline-flex p-0.5 text-sm mb-4 select-none"><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-l false"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <p class="!m-0 ">Pytorch</p> </button><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-r text-gray-500 filter grayscale"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <p class="!m-0 ">TensorFlow</p> </button></div></div><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Load tokenizer and PyTorch weights form the Hub</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pt_model = AutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Save to disk</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.save_pretrained(<span class="hljs-string">&quot;local-pt-checkpoint&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pt_model.save_pretrained(<span class="hljs-string">&quot;local-pt-checkpoint&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Once the checkpoint is saved, we can export it to ONNX by pointing the <code>--model</code> argument of the <code>transformers.onnx</code> package to the desired directory:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="Copy code excerpt to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><div><div class="bg-white leading-none border border-gray-100 rounded-lg inline-flex p-0.5 text-sm mb-4 select-none"><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-l false"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <p class="!m-0 ">Pytorch</p> </button><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-r text-gray-500 filter grayscale"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <p class="!m-0 ">TensorFlow</p> </button></div></div><!-- HTML_TAG_START -->python -m transformers.onnx --model=local-pt-checkpoint onnx/<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="selecting-features-for-different-model-topologies" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#selecting-features-for-different-model-topologies"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Selecting features for different model topologies </span></h3> <p>Each ready-made configuration comes with a set of <em>features</em> that enable you to export models for different types of topologies or tasks. As shown in the table below, each feature is associated with a different auto class:</p> <table><thead><tr><th>Feature</th> <th>Auto Class</th></tr></thead> <tbody><tr><td><code>causal-lm</code>, <code>causal-lm-with-past</code></td> <td><code>AutoModelForCausalLM</code></td></tr> <tr><td><code>default</code>, <code>default-with-past</code></td> <td><code>AutoModel</code></td></tr> <tr><td><code>masked-lm</code></td> <td><code>AutoModelForMaskedLM</code></td></tr> <tr><td><code>question-answering</code></td> <td><code>AutoModelForQuestionAnswering</code></td></tr> <tr><td><code>seq2seq-lm</code>, <code>seq2seq-lm-with-past</code></td> <td><code>AutoModelForSeq2SeqLM</code></td></tr> <tr><td><code>sequence-classification</code></td> <td><code>AutoModelForSequenceClassification</code></td></tr> <tr><td><code>token-classification</code></td> <td><code>AutoModelForTokenClassification</code></td></tr></tbody></table> <p>For each configuration, you can find the list of supported features via the <code>FeaturesManager</code>. For example, for DistilBERT we have:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers.onnx.features <span class="hljs-keyword">import</span> FeaturesManager <span class="hljs-meta">&gt;&gt;&gt; </span>distilbert_features = <span class="hljs-built_in">list</span>(FeaturesManager.get_supported_features_for_model_type(<span class="hljs-string">&quot;distilbert&quot;</span>).keys()) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(distilbert_features) [<span class="hljs-string">&quot;default&quot;</span>, <span class="hljs-string">&quot;masked-lm&quot;</span>, <span class="hljs-string">&quot;causal-lm&quot;</span>, <span class="hljs-string">&quot;sequence-classification&quot;</span>, <span class="hljs-string">&quot;token-classification&quot;</span>, <span class="hljs-string">&quot;question-answering&quot;</span>]<!-- HTML_TAG_END --></pre></div> <p>You can then pass one of these features to the <code>--feature</code> argument in the <code>transformers.onnx</code> package. For example, to export a text-classification model we can pick a fine-tuned model from the Hub and run:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -m transformers.onnx --model=distilbert-base-uncased-finetuned-sst-2-english \ --feature=sequence-classification onnx/<!-- HTML_TAG_END --></pre></div> <p>which will display the following logs:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->Validating ONNX model... -[✓] ONNX model output names match reference model ({<span class="hljs-string">&#x27;logits&#x27;</span>}) - Validating ONNX Model output <span class="hljs-string">&quot;logits&quot;</span>: -[✓] (2, 2) matches (2, 2) -[✓] all values close (atol: 1e-05) All good, model saved at: onnx/model.onnx<!-- HTML_TAG_END --></pre></div> <p>Notice that in this case, the output names from the fine-tuned model are <code>logits</code> instead of the <code>last_hidden_state</code> we saw with the <code>distilbert-base-uncased</code> checkpoint earlier. This is expected since the fine-tuned model has a sequence classification head.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>The features that have a <code>with-past</code> suffix (e.g. <code>causal-lm-with-past</code>) correspond to model topologies with precomputed hidden states (key and values in the attention blocks) that can be used for fast autoregressive decoding.</p></div> <h3 class="relative group"><a id="exporting-a-model-for-an-unsupported-architecture" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#exporting-a-model-for-an-unsupported-architecture"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Exporting a model for an unsupported architecture </span></h3> <p>If you wish to export a model whose architecture is not natively supported by the library, there are three main steps to follow:</p> <ol><li>Implement a custom ONNX configuration.</li> <li>Export the model to ONNX.</li> <li>Validate the outputs of the PyTorch and exported models.</li></ol> <p>In this section, we’ll look at how DistilBERT was implemented to show what’s involved with each step.</p> <h4 class="relative group"><a id="implementing-a-custom-onnx-configuration" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#implementing-a-custom-onnx-configuration"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Implementing a custom ONNX configuration </span></h4> <p>Let’s start with the ONNX configuration object. We provide three abstract classes that you should inherit from, depending on the type of model architecture you wish to export:</p> <ul><li>Encoder-based models inherit from <a href="/docs/transformers/pr_16143/en/main_classes/onnx#transformers.onnx.OnnxConfig">OnnxConfig</a></li> <li>Decoder-based models inherit from <a href="/docs/transformers/pr_16143/en/main_classes/onnx#transformers.onnx.OnnxConfigWithPast">OnnxConfigWithPast</a></li> <li>Encoder-decoder models inherit from <a href="/docs/transformers/pr_16143/en/main_classes/onnx#transformers.onnx.OnnxSeq2SeqConfigWithPast">OnnxSeq2SeqConfigWithPast</a></li></ul> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>A good way to implement a custom ONNX configuration is to look at the existing implementation in the <code>configuration_&lt;model_name&gt;.py</code> file of a similar architecture.</p></div> <p>Since DistilBERT is an encoder-based model, its configuration inherits from <code>OnnxConfig</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> typing <span class="hljs-keyword">import</span> Mapping, OrderedDict <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers.onnx <span class="hljs-keyword">import</span> OnnxConfig <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">class</span> <span class="hljs-title class_">DistilBertOnnxConfig</span>(<span class="hljs-title class_ inherited__">OnnxConfig</span>): <span class="hljs-meta">... </span> @<span class="hljs-built_in">property</span> <span class="hljs-meta">... </span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">inputs</span>(<span class="hljs-params">self</span>) -&gt; Mapping[<span class="hljs-built_in">str</span>, Mapping[<span class="hljs-built_in">int</span>, <span class="hljs-built_in">str</span>]]: <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> OrderedDict( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> (<span class="hljs-string">&quot;input_ids&quot;</span>, {<span class="hljs-number">0</span>: <span class="hljs-string">&quot;batch&quot;</span>, <span class="hljs-number">1</span>: <span class="hljs-string">&quot;sequence&quot;</span>}), <span class="hljs-meta">... </span> (<span class="hljs-string">&quot;attention_mask&quot;</span>, {<span class="hljs-number">0</span>: <span class="hljs-string">&quot;batch&quot;</span>, <span class="hljs-number">1</span>: <span class="hljs-string">&quot;sequence&quot;</span>}), <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span> )<!-- HTML_TAG_END --></pre></div> <p>Every configuration object must implement the <code>inputs</code> property and return a mapping, where each key corresponds to an expected input, and each value indicates the axis of that input. For DistilBERT, we can see that two inputs are required: <code>input_ids</code> and <code>attention_mask</code>. These inputs have the same shape of <code>(batch_size, sequence_length)</code> which is why we see the same axes used in the configuration.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Notice that <code>inputs</code> property for <code>DistilBertOnnxConfig</code> returns an <code>OrderedDict</code>. This ensures that the inputs are matched with their relative position within the <code>PreTrainedModel.forward()</code> method when tracing the graph. We recommend using an <code>OrderedDict</code> for the <code>inputs</code> and <code>outputs</code> properties when implementing custom ONNX configurations.</p></div> <p>Once you have implemented an ONNX configuration, you can instantiate it by providing the base model’s configuration as follows:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>onnx_config = DistilBertOnnxConfig(config)<!-- HTML_TAG_END --></pre></div> <p>The resulting object has several useful properties. For example you can view the ONNX operator set that will be used during the export:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(onnx_config.default_onnx_opset) <span class="hljs-number">11</span><!-- HTML_TAG_END --></pre></div> <p>You can also view the outputs associated with the model as follows:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(onnx_config.outputs) OrderedDict([(<span class="hljs-string">&quot;last_hidden_state&quot;</span>, {<span class="hljs-number">0</span>: <span class="hljs-string">&quot;batch&quot;</span>, <span class="hljs-number">1</span>: <span class="hljs-string">&quot;sequence&quot;</span>})])<!-- HTML_TAG_END --></pre></div> <p>Notice that the outputs property follows the same structure as the inputs; it returns an <code>OrderedDict</code> of named outputs and their shapes. The output structure is linked to the choice of feature that the configuration is initialised with. By default, the ONNX configuration is initialized with the <code>default</code> feature that corresponds to exporting a model loaded with the <code>AutoModel</code> class. If you want to export a different model topology, just provide a different feature to the <code>task</code> argument when you initialize the ONNX configuration. For example, if we wished to export DistilBERT with a sequence classification head, we could use:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>onnx_config_for_seq_clf = DistilBertOnnxConfig(config, task=<span class="hljs-string">&quot;sequence-classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(onnx_config_for_seq_clf.outputs) OrderedDict([(<span class="hljs-string">&#x27;logits&#x27;</span>, {<span class="hljs-number">0</span>: <span class="hljs-string">&#x27;batch&#x27;</span>})])<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>All of the base properties and methods associated with <a href="/docs/transformers/pr_16143/en/main_classes/onnx#transformers.onnx.OnnxConfig">OnnxConfig</a> and the other configuration classes can be overriden if needed. Check out <code>BartOnnxConfig</code> for an advanced example.</p></div> <h4 class="relative group"><a id="exporting-the-model" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#exporting-the-model"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Exporting the model </span></h4> <p>Once you have implemented the ONNX configuration, the next step is to export the model. Here we can use the <code>export()</code> function provided by the <code>transformers.onnx</code> package. This function expects the ONNX configuration, along with the base model and tokenizer, and the path to save the exported file:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> pathlib <span class="hljs-keyword">import</span> Path <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers.onnx <span class="hljs-keyword">import</span> export <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModel <span class="hljs-meta">&gt;&gt;&gt; </span>onnx_path = Path(<span class="hljs-string">&quot;model.onnx&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model_ckpt = <span class="hljs-string">&quot;distilbert-base-uncased&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>base_model = AutoModel.from_pretrained(model_ckpt) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(model_ckpt) <span class="hljs-meta">&gt;&gt;&gt; </span>onnx_inputs, onnx_outputs = export(tokenizer, base_model, onnx_config, onnx_config.default_onnx_opset, onnx_path)<!-- HTML_TAG_END --></pre></div> <p>The <code>onnx_inputs</code> and <code>onnx_outputs</code> returned by the <code>export()</code> function are lists of the keys defined in the <code>inputs</code> and <code>outputs</code> properties of the configuration. Once the model is exported, you can test that the model is well formed as follows:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> onnx <span class="hljs-meta">&gt;&gt;&gt; </span>onnx_model = onnx.load(<span class="hljs-string">&quot;model.onnx&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>onnx.checker.check_model(onnx_model)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If your model is larger than 2GB, you will see that many additional files are created during the export. This is <em>expected</em> because ONNX uses <a href="https://developers.google.com/protocol-buffers/" rel="nofollow">Protocol Buffers</a> to store the model and these have a size limit of 2GB. See the <a href="https://github.com/onnx/onnx/blob/master/docs/ExternalData.md" rel="nofollow">ONNX documentation</a> for instructions on how to load models with external data.</p></div> <h4 class="relative group"><a id="validating-the-model-outputs" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#validating-the-model-outputs"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Validating the model outputs </span></h4> <p>The final step is to validate that the outputs from the base and exported model agree within some absolute tolerance. Here we can use the <code>validate_model_outputs()</code> function provided by the <code>transformers.onnx</code> package as follows:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers.onnx <span class="hljs-keyword">import</span> validate_model_outputs <span class="hljs-meta">&gt;&gt;&gt; </span>validate_model_outputs( <span class="hljs-meta">... </span> onnx_config, tokenizer, base_model, onnx_path, onnx_outputs, onnx_config.atol_for_validation <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>This function uses the <code>OnnxConfig.generate_dummy_inputs()</code> method to generate inputs for the base and exported model, and the absolute tolerance can be defined in the configuration. We generally find numerical agreement in the 1e-6 to 1e-4 range, although anything smaller than 1e-3 is likely to be OK.</p> <h3 class="relative group"><a id="contributing-a-new-configuration-to-transformers" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#contributing-a-new-configuration-to-transformers"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Contributing a new configuration to 🤗 Transformers </span></h3> <p>We are looking to expand the set of ready-made configurations and welcome contributions from the community! If you would like to contribute your addition to the library, you will need to:</p> <ul><li>Implement the ONNX configuration in the corresponding <code>configuration_&lt;model_name&gt;.py</code> file</li> <li>Include the model architecture and corresponding features in <code>FeatureManager</code></li> <li>Add your model architecture to the tests in <code>test_onnx_v2.py</code></li></ul> <p>Check out how the configuration for <a href="https://github.com/huggingface/transformers/pull/14868/files" rel="nofollow">IBERT was contributed</a> to get an idea of what’s involved.</p> <h2 class="relative group"><a id="torchscript" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#torchscript"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TorchScript </span></h2> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>This is the very beginning of our experiments with TorchScript and we are still exploring its capabilities with variable-input-size models. It is a focus of interest to us and we will deepen our analysis in upcoming releases, with more code examples, a more flexible implementation, and benchmarks comparing python-based codes with compiled TorchScript.</p></div> <p>According to Pytorch’s documentation: “TorchScript is a way to create serializable and optimizable models from PyTorch code”. Pytorch’s two modules <a href="https://pytorch.org/docs/stable/jit.html" rel="nofollow">JIT and TRACE</a> allow the developer to export their model to be re-used in other programs, such as efficiency-oriented C++ programs.</p> <p>We have provided an interface that allows the export of 🤗 Transformers models to TorchScript so that they can be reused in a different environment than a Pytorch-based python program. Here we explain how to export and use our models using TorchScript.</p> <p>Exporting a model requires two things:</p> <ul><li>a forward pass with dummy inputs.</li> <li>model instantiation with the <code>torchscript</code> flag.</li></ul> <p>These necessities imply several things developers should be careful about. These are detailed below.</p> <h3 class="relative group"><a id="implications" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#implications"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Implications </span></h3> <h3 class="relative group"><a id="torchscript-flag-and-tied-weights" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#torchscript-flag-and-tied-weights"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TorchScript flag and tied weights </span></h3> <p>This flag is necessary because most of the language models in this repository have tied weights between their <code>Embedding</code> layer and their <code>Decoding</code> layer. TorchScript does not allow the export of models that have tied weights, therefore it is necessary to untie and clone the weights beforehand.</p> <p>This implies that models instantiated with the <code>torchscript</code> flag have their <code>Embedding</code> layer and <code>Decoding</code> layer separate, which means that they should not be trained down the line. Training would de-synchronize the two layers, leading to unexpected results.</p> <p>This is not the case for models that do not have a Language Model head, as those do not have tied weights. These models can be safely exported without the <code>torchscript</code> flag.</p> <h3 class="relative group"><a id="dummy-inputs-and-standard-lengths" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#dummy-inputs-and-standard-lengths"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Dummy inputs and standard lengths </span></h3> <p>The dummy inputs are used to do a model forward pass. While the inputs’ values are propagating through the layers, Pytorch keeps track of the different operations executed on each tensor. These recorded operations are then used to create the “trace” of the model.</p> <p>The trace is created relatively to the inputs’ dimensions. It is therefore constrained by the dimensions of the dummy input, and will not work for any other sequence length or batch size. When trying with a different size, an error such as:</p> <p><code>The expanded size of the tensor (3) must match the existing size (7) at non-singleton dimension 2</code></p> <p>will be raised. It is therefore recommended to trace the model with a dummy input size at least as large as the largest input that will be fed to the model during inference. Padding can be performed to fill the missing values. As the model will have been traced with a large input size however, the dimensions of the different matrix will be large as well, resulting in more calculations.</p> <p>It is recommended to be careful of the total number of operations done on each input and to follow performance closely when exporting varying sequence-length models.</p> <h3 class="relative group"><a id="using-torchscript-in-python" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#using-torchscript-in-python"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Using TorchScript in Python </span></h3> <p>Below is an example, showing how to save, load models as well as how to use the trace for inference.</p> <h4 class="relative group"><a id="saving-a-model" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#saving-a-model"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Saving a model </span></h4> <p>This snippet shows how to use TorchScript to export a <code>BertModel</code>. Here the <code>BertModel</code> is instantiated according to a <code>BertConfig</code> class and then saved to disk under the filename <code>traced_bert.pt</code></p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertModel, BertTokenizer, BertConfig <span class="hljs-keyword">import</span> torch enc = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) <span class="hljs-comment"># Tokenizing input text</span> text = <span class="hljs-string">&quot;[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]&quot;</span> tokenized_text = enc.tokenize(text) <span class="hljs-comment"># Masking one of the input tokens</span> masked_index = <span class="hljs-number">8</span> tokenized_text[masked_index] = <span class="hljs-string">&quot;[MASK]&quot;</span> indexed_tokens = enc.convert_tokens_to_ids(tokenized_text) segments_ids = [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>] <span class="hljs-comment"># Creating a dummy input</span> tokens_tensor = torch.tensor([indexed_tokens]) segments_tensors = torch.tensor([segments_ids]) dummy_input = [tokens_tensor, segments_tensors] <span class="hljs-comment"># Initializing the model with the torchscript flag</span> <span class="hljs-comment"># Flag set to True even though it is not necessary as this model does not have an LM Head.</span> config = BertConfig( vocab_size_or_config_json_file=<span class="hljs-number">32000</span>, hidden_size=<span class="hljs-number">768</span>, num_hidden_layers=<span class="hljs-number">12</span>, num_attention_heads=<span class="hljs-number">12</span>, intermediate_size=<span class="hljs-number">3072</span>, torchscript=<span class="hljs-literal">True</span>, ) <span class="hljs-comment"># Instantiating the model</span> model = BertModel(config) <span class="hljs-comment"># The model needs to be in evaluation mode</span> model.<span class="hljs-built_in">eval</span>() <span class="hljs-comment"># If you are instantiating the model with *from_pretrained* you can also easily set the TorchScript flag</span> model = BertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>, torchscript=<span class="hljs-literal">True</span>) <span class="hljs-comment"># Creating the trace</span> traced_model = torch.jit.trace(model, [tokens_tensor, segments_tensors]) torch.jit.save(traced_model, <span class="hljs-string">&quot;traced_bert.pt&quot;</span>)<!-- HTML_TAG_END --></pre></div> <h4 class="relative group"><a id="loading-a-model" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#loading-a-model"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Loading a model </span></h4> <p>This snippet shows how to load the <code>BertModel</code> that was previously saved to disk under the name <code>traced_bert.pt</code>. We are re-using the previously initialised <code>dummy_input</code>.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->loaded_model = torch.jit.load(<span class="hljs-string">&quot;traced_bert.pt&quot;</span>) loaded_model.<span class="hljs-built_in">eval</span>() all_encoder_layers, pooled_output = loaded_model(*dummy_input)<!-- HTML_TAG_END --></pre></div> <h4 class="relative group"><a id="using-a-traced-model-for-inference" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#using-a-traced-model-for-inference"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Using a traced model for inference </span></h4> <p>Using the traced model for inference is as simple as using its <code>__call__</code> dunder method:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->traced_model(tokens_tensor, segments_tensors)<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="deploying-huggingface-torchscript-models-on-aws-using-the-neuron-sdk" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#deploying-huggingface-torchscript-models-on-aws-using-the-neuron-sdk"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Deploying HuggingFace TorchScript models on AWS using the Neuron SDK </span></h3> <p>AWS introduced the <a href="https://aws.amazon.com/ec2/instance-types/inf1/" rel="nofollow">Amazon EC2 Inf1</a> instance family for low cost, high performance machine learning inference in the cloud. The Inf1 instances are powered by the AWS Inferentia chip, a custom-built hardware accelerator, specializing in deep learning inferencing workloads. <a href="https://awsdocs-neuron.readthedocs-hosted.com/en/latest/#" rel="nofollow">AWS Neuron</a> is the SDK for Inferentia that supports tracing and optimizing transformers models for deployment on Inf1. The Neuron SDK provides:</p> <ol><li>Easy-to-use API with one line of code change to trace and optimize a TorchScript model for inference in the cloud.</li> <li>Out of the box performance optimizations for <a href="https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/benchmark/%3E" rel="nofollow">improved cost-performance</a></li> <li>Support for HuggingFace transformers models built with either <a href="https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/pytorch/bert_tutorial/tutorial_pretrained_bert.html" rel="nofollow">PyTorch</a> or <a href="https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/tensorflow/huggingface_bert/huggingface_bert.html" rel="nofollow">TensorFlow</a>.</li></ol> <h4 class="relative group"><a id="implications" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#implications"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Implications </span></h4> <p>Transformers Models based on the <a href="https://huggingface.co/docs/transformers/master/model_doc/bert" rel="nofollow">BERT (Bidirectional Encoder Representations from Transformers)</a> architecture, or its variants such as <a href="https://huggingface.co/docs/transformers/master/model_doc/distilbert" rel="nofollow">distilBERT</a> and <a href="https://huggingface.co/docs/transformers/master/model_doc/roberta" rel="nofollow">roBERTa</a> will run best on Inf1 for non-generative tasks such as Extractive Question Answering, Sequence Classification, Token Classification. Alternatively, text generation tasks can be adapted to run on Inf1, according to this <a href="https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/pytorch/transformers-marianmt.html" rel="nofollow">AWS Neuron MarianMT tutorial</a>. More information about models that can be converted out of the box on Inferentia can be found in the <a href="https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/models/models-inferentia.html#models-inferentia" rel="nofollow">Model Architecture Fit section of the Neuron documentation</a>.</p> <h4 class="relative group"><a id="dependencies" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#dependencies"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Dependencies </span></h4> <p>Using AWS Neuron to convert models requires the following dependencies and environment:</p> <ul><li>A <a href="https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/neuron-frameworks/pytorch-neuron/index.html#installation-guide" rel="nofollow">Neuron SDK environment</a>, which comes pre-configured on <a href="https://docs.aws.amazon.com/dlami/latest/devguide/tutorial-inferentia-launching.html" rel="nofollow">AWS Deep Learning AMI</a>.</li></ul> <h4 class="relative group"><a id="converting-a-model-for-aws-neuron" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#converting-a-model-for-aws-neuron"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Converting a Model for AWS Neuron </span></h4> <p>Using the same script as in <a href="https://huggingface.co/docs/transformers/master/en/serialization#using-torchscript-in-python" rel="nofollow">Using TorchScript in Python</a> to trace a “BertModel”, you import <code>torch.neuron</code> framework extension to access the components of the Neuron SDK through a Python API.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertModel, BertTokenizer, BertConfig <span class="hljs-keyword">import</span> torch <span class="hljs-keyword">import</span> torch.neuron<!-- HTML_TAG_END --></pre></div> <p>And only modify the tracing line of code</p> <p>from:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->torch.jit.trace(model, [tokens_tensor, segments_tensors])<!-- HTML_TAG_END --></pre></div> <p>to:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->torch.neuron.trace(model, [token_tensor, segments_tensors])<!-- HTML_TAG_END --></pre></div> <p>This change enables Neuron SDK to trace the model and optimize it to run in Inf1 instances.</p> <p>To learn more about AWS Neuron SDK features, tools, example tutorials and latest updates, please see the <a href="https://awsdocs-neuron.readthedocs-hosted.com/en/latest/index.html" rel="nofollow">AWS NeuronSDK documentation</a>.</p> <script type="module" data-hydrate="7m2nkx"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="7m2nkx"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/serialization.mdx-809e82fb.js") ], params: {} } }); </script>
91
0
hf_public_repos/doc-build-dev/transformers/pr_16143
hf_public_repos/doc-build-dev/transformers/pr_16143/en/troubleshooting.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;troubleshoot&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;firewalled-environments&quot;,&quot;title&quot;:&quot;Firewalled environments&quot;},{&quot;local&quot;:&quot;cuda-out-of-memory&quot;,&quot;title&quot;:&quot;CUDA out of memory&quot;},{&quot;local&quot;:&quot;unable-to-load-a-saved-tensorflow-model&quot;,&quot;title&quot;:&quot;Unable to load a saved TensorFlow model&quot;},{&quot;local&quot;:&quot;importerror&quot;,&quot;title&quot;:&quot;ImportError&quot;},{&quot;local&quot;:&quot;cuda-error-deviceside-assert-triggered&quot;,&quot;title&quot;:&quot;CUDA error: device-side assert triggered&quot;}],&quot;title&quot;:&quot;Troubleshoot&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/troubleshooting.mdx-e0bea728.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Youtube-27813aed.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="troubleshoot" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#troubleshoot"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Troubleshoot </span></h1> <p>Sometimes errors occur, but we are here to help! This guide covers some of the most common issues we’ve seen and how you can resolve them. However, this guide isn’t meant to be a comprehensive collection of every 🤗 Transformers issue. For more help with troubleshooting your issue, try:</p> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/S2EEG3JIt2A" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <ol><li>Asking for help on the <a href="https://discuss.huggingface.co/" rel="nofollow">forums</a>. There are specific categories you can post your question to, like <a href="https://discuss.huggingface.co/c/beginners/5" rel="nofollow">Beginners</a> or <a href="https://discuss.huggingface.co/c/transformers/9" rel="nofollow">🤗 Transformers</a>. Make sure you write a good descriptive forum post with some reproducible code to maximize the likelihood that your problem is solved!</li></ol> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/_PAli-V4wj0" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <ol start="2"><li><p>Create an <a href="https://github.com/huggingface/transformers/issues/new/choose" rel="nofollow">Issue</a> on the 🤗 Transformers repository if it is a bug related to the library. Try to include as much information describing the bug as possible to help us better figure out what’s wrong and how we can fix it.</p></li> <li><p>Check the <a href="migration">Migration</a> guide if you use an older version of 🤗 Transformers since some important changes have been introduced between versions.</p></li></ol> <p>For more details about troubleshooting and getting help, take a look at <a href="https://huggingface.co/course/chapter8/1?fw=pt" rel="nofollow">Chapter 8</a> of the Hugging Face course.</p> <h2 class="relative group"><a id="firewalled-environments" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#firewalled-environments"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Firewalled environments </span></h2> <p>Some GPU instances on cloud and intranet setups are firewalled to external connections, resulting in a connection error. When your script attempts to download model weights or datasets, the download will hang and then timeout with the following message:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->ValueError: Connection error, <span class="hljs-built_in">and</span> we cannot <span class="hljs-keyword">find</span> the requested <span class="hljs-keyword">files</span> in the cached path. Please <span class="hljs-keyword">try</span> again <span class="hljs-built_in">or</span> <span class="hljs-keyword">make</span> sure your Internet connection <span class="hljs-keyword">is</span> <span class="hljs-keyword">on</span>.<!-- HTML_TAG_END --></pre></div> <p>In this case, you should try to run 🤗 Transformers on <a href="installation#offline-mode">offline mode</a> to avoid the connection error.</p> <h2 class="relative group"><a id="cuda-out-of-memory" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#cuda-out-of-memory"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>CUDA out of memory </span></h2> <p>Training large models with millions of parameters can be challenging without the appropriate hardware. A common error you may encounter when the GPU runs out of memory is:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-attribute">CUDA</span> out of memory. Tried to allocate <span class="hljs-number">256</span>.<span class="hljs-number">00</span> MiB (GPU <span class="hljs-number">0</span>; <span class="hljs-number">11</span>.<span class="hljs-number">17</span> GiB total capacity; <span class="hljs-number">9</span>.<span class="hljs-number">70</span> GiB already allocated; <span class="hljs-number">179</span>.<span class="hljs-number">81</span> MiB free; <span class="hljs-number">9</span>.<span class="hljs-number">85</span> GiB reserved in total by PyTorch)<!-- HTML_TAG_END --></pre></div> <p>Here are some potential solutions you can try to lessen memory use:</p> <ul><li>Reduce the <a href="main_classes/trainer#transformers.TrainingArguments.per_device_train_batch_size"><code>per_device_train_batch_size</code></a> value in <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>.</li> <li>Try using <a href="main_classes/trainer#transformers.TrainingArguments.gradient_accumulation_steps"><code>gradient_accumulation_steps</code></a> in <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> to effectively increase overall batch size.</li></ul> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Refer to the Performance <a href="performance">guide</a> for more details about memory-saving techniques.</p></div> <h2 class="relative group"><a id="unable-to-load-a-saved-tensorflow-model" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#unable-to-load-a-saved-tensorflow-model"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Unable to load a saved TensorFlow model </span></h2> <p>TensorFlow’s <a href="https://www.tensorflow.org/tutorials/keras/save_and_load#save_the_entire_model" rel="nofollow">model.save</a> method will save the entire model - architecture, weights, training configuration - in a single file. However, when you load the model file again, you may run into an error because 🤗 Transformers may not load all the TensorFlow-related objects in the model file. To avoid issues with saving and loading TensorFlow models, we recommend you:</p> <ul><li>Save the model weights as a <code>h5</code> file extension with <a href="https://www.tensorflow.org/tutorials/keras/save_and_load#save_the_entire_model" rel="nofollow"><code>model.save_weights</code></a> and then reload the model with <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a>:</li></ul> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFPreTrainedModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> tensorflow <span class="hljs-keyword">import</span> keras <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_weights(<span class="hljs-string">&quot;some_folder/tf_model.h5&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFPreTrainedModel.from_pretrained(<span class="hljs-string">&quot;some_folder&quot;</span>)<!-- HTML_TAG_END --></pre></div> <ul><li>Save the model with <code>save_pretrained</code> and load it again with <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a>:</li></ul> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFPreTrainedModel <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;path_to/model&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFPreTrainedModel.from_pretrained(<span class="hljs-string">&quot;path_to/model&quot;</span>)<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="importerror" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#importerror"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ImportError </span></h2> <p>Another common error you may encounter, especially if it is a newly released model, is <code>ImportError</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->ImportError: cannot <span class="hljs-keyword">import</span> <span class="hljs-type">name</span> <span class="hljs-string">&#x27;ImageGPTFeatureExtractor&#x27;</span> <span class="hljs-keyword">from</span> <span class="hljs-string">&#x27;transformers&#x27;</span> (<span class="hljs-type">unknown</span> <span class="hljs-keyword">location</span>)<!-- HTML_TAG_END --></pre></div> <p>For these error types, check to make sure you have the latest version of 🤗 Transformers installed to access the most recent models:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install transformers --upgrade<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="cuda-error-deviceside-assert-triggered" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#cuda-error-deviceside-assert-triggered"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>CUDA error: device-side assert triggered </span></h2> <p>Sometimes you may run into a generic CUDA error about an error in the device code.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->RuntimeError: CUDA <span class="hljs-literal">error</span>: device-<span class="hljs-literal">side</span> <span class="hljs-keyword">assert</span> triggered<!-- HTML_TAG_END --></pre></div> <p>You should try to run the code on a CPU first to get a more descriptive error message. Add the following environment variable to the beginning of your code to switch to a CPU:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> os <span class="hljs-meta">&gt;&gt;&gt; </span>os.environ[<span class="hljs-string">&quot;CUDA_VISIBLE_DEVICES&quot;</span>] = <span class="hljs-string">&quot;&quot;</span><!-- HTML_TAG_END --></pre></div> <p>Another option is to get a better traceback from the GPU. Add the following environment variable to the beginning of your code to get the traceback to point to the source of the error:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> os <span class="hljs-meta">&gt;&gt;&gt; </span>os.environ[<span class="hljs-string">&quot;CUDA_LAUNCH_BLOCKING&quot;</span>] = <span class="hljs-string">&quot;1&quot;</span><!-- HTML_TAG_END --></pre></div> <script type="module" data-hydrate="ikps7h"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="ikps7h"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/troubleshooting.mdx-e0bea728.js") ], params: {} } }); </script>
92
0
hf_public_repos/doc-build-dev/transformers/pr_16143
hf_public_repos/doc-build-dev/transformers/pr_16143/en/index.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;transformers&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;if-you-are-looking-for-custom-support-from-the-hugging-face-team&quot;,&quot;title&quot;:&quot;If you are looking for custom support from the Hugging Face team&quot;},{&quot;local&quot;:&quot;contents&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;supported-models&quot;,&quot;title&quot;:&quot;Supported models&quot;},{&quot;local&quot;:&quot;supported-frameworks&quot;,&quot;title&quot;:&quot;Supported frameworks&quot;}],&quot;title&quot;:&quot;Contents&quot;}],&quot;title&quot;:&quot;🤗 Transformers&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/index.mdx-78213d4b.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <h1 class="relative group"><a id="transformers" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>🤗 Transformers </span></h1> <p>State-of-the-art Machine Learning for PyTorch, TensorFlow and JAX.</p> <p>🤗 Transformers provides APIs to easily download and train state-of-the-art pretrained models. Using pretrained models can reduce your compute costs, carbon footprint, and save you time from training a model from scratch. The models can be used across different modalities such as:</p> <ul><li>📝 Text: text classification, information extraction, question answering, summarization, translation, and text generation in over 100 languages.</li> <li>🖼️ Images: image classification, object detection, and segmentation.</li> <li>🗣️ Audio: speech recognition and audio classification.</li> <li>🐙 Multimodal: table question answering, optical character recognition, information extraction from scanned documents, video classification, and visual question answering.</li></ul> <p>Our library supports seamless integration between three of the most popular deep learning libraries: <a href="https://pytorch.org/" rel="nofollow">PyTorch</a>, <a href="https://www.tensorflow.org/" rel="nofollow">TensorFlow</a> and <a href="https://jax.readthedocs.io/en/latest/" rel="nofollow">JAX</a>. Train your model in three lines of code in one framework, and load it for inference with another.</p> <p>Each 🤗 Transformers architecture is defined in a standalone Python module so they can be easily customized for research and experiments.</p> <h2 class="relative group"><a id="if-you-are-looking-for-custom-support-from-the-hugging-face-team" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#if-you-are-looking-for-custom-support-from-the-hugging-face-team"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>If you are looking for custom support from the Hugging Face team </span></h2> <a target="_blank" href="https://huggingface.co/support"><img alt="HuggingFace Expert Acceleration Program" src="https://huggingface.co/front/thumbnails/support.png" style="max-width: 600px; border: 1px solid #eee; border-radius: 4px; box-shadow: 0 1px 2px 0 rgba(0, 0, 0, 0.05);"> </a><br> <h2 class="relative group"><a id="contents" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#contents"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Contents </span></h2> <p>The documentation is organized in five parts:</p> <ul><li><p><strong>GET STARTED</strong> contains a quick tour, the installation instructions and some useful information about our philosophy and a glossary.</p></li> <li><p><strong>USING 🤗 TRANSFORMERS</strong> contains general tutorials on how to use the library.</p></li> <li><p><strong>ADVANCED GUIDES</strong> contains more advanced guides that are more specific to a given script or part of the library.</p></li> <li><p><strong>RESEARCH</strong> focuses on tutorials that have less to do with how to use the library but more about general research in transformers model</p></li> <li><p><strong>API</strong> contains the documentation of each public class and function, grouped in:</p> <ul><li><strong>MAIN CLASSES</strong> for the main classes exposing the important APIs of the library.</li> <li><strong>MODELS</strong> for the classes and functions related to each model implemented in the library.</li> <li><strong>INTERNAL HELPERS</strong> for the classes and functions we use internally.</li></ul></li></ul> <p>The library currently contains Jax, PyTorch and Tensorflow implementations, pretrained model weights, usage scripts and conversion utilities for the following models.</p> <h3 class="relative group"><a id="supported-models" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#supported-models"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Supported models </span></h3> <ol><li><strong><a href="model_doc/albert">ALBERT</a></strong> (from Google Research and the Toyota Technological Institute at Chicago) released with the paper <a href="https://arxiv.org/abs/1909.11942" rel="nofollow">ALBERT: A Lite BERT for Self-supervised Learning of Language Representations</a>, by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.</li> <li><strong><a href="model_doc/bart">BART</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension</a> by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer.</li> <li><strong><a href="model_doc/barthez">BARThez</a></strong> (from École polytechnique) released with the paper <a href="https://arxiv.org/abs/2010.12321" rel="nofollow">BARThez: a Skilled Pretrained French Sequence-to-Sequence Model</a> by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis.</li> <li><strong><a href="model_doc/bartpho">BARTpho</a></strong> (from VinAI Research) released with the paper <a href="https://arxiv.org/abs/2109.09701" rel="nofollow">BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese</a> by Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen.</li> <li><strong><a href="model_doc/beit">BEiT</a></strong> (from Microsoft) released with the paper <a href="https://arxiv.org/abs/2106.08254" rel="nofollow">BEiT: BERT Pre-Training of Image Transformers</a> by Hangbo Bao, Li Dong, Furu Wei.</li> <li><strong><a href="model_doc/bert">BERT</a></strong> (from Google) released with the paper <a href="https://arxiv.org/abs/1810.04805" rel="nofollow">BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding</a> by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova.</li> <li><strong><a href="model_doc/bertweet">BERTweet</a></strong> (from VinAI Research) released with the paper <a href="https://aclanthology.org/2020.emnlp-demos.2/" rel="nofollow">BERTweet: A pre-trained language model for English Tweets</a> by Dat Quoc Nguyen, Thanh Vu and Anh Tuan Nguyen.</li> <li><strong><a href="model_doc/bert-generation">BERT For Sequence Generation</a></strong> (from Google) released with the paper <a href="https://arxiv.org/abs/1907.12461" rel="nofollow">Leveraging Pre-trained Checkpoints for Sequence Generation Tasks</a> by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.</li> <li><strong><a href="model_doc/big_bird">BigBird-RoBERTa</a></strong> (from Google Research) released with the paper <a href="https://arxiv.org/abs/2007.14062" rel="nofollow">Big Bird: Transformers for Longer Sequences</a> by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.</li> <li><strong><a href="model_doc/bigbird_pegasus">BigBird-Pegasus</a></strong> (from Google Research) released with the paper <a href="https://arxiv.org/abs/2007.14062" rel="nofollow">Big Bird: Transformers for Longer Sequences</a> by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.</li> <li><strong><a href="model_doc/blenderbot">Blenderbot</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2004.13637" rel="nofollow">Recipes for building an open-domain chatbot</a> by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.</li> <li><strong><a href="model_doc/blenderbot-small">BlenderbotSmall</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2004.13637" rel="nofollow">Recipes for building an open-domain chatbot</a> by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.</li> <li><strong><a href="model_doc/bort">BORT</a></strong> (from Alexa) released with the paper <a href="https://arxiv.org/abs/2010.10499" rel="nofollow">Optimal Subarchitecture Extraction For BERT</a> by Adrian de Wynter and Daniel J. Perry.</li> <li><strong><a href="model_doc/byt5">ByT5</a></strong> (from Google Research) released with the paper <a href="https://arxiv.org/abs/2105.13626" rel="nofollow">ByT5: Towards a token-free future with pre-trained byte-to-byte models</a> by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel.</li> <li><strong><a href="model_doc/camembert">CamemBERT</a></strong> (from Inria/Facebook/Sorbonne) released with the paper <a href="https://arxiv.org/abs/1911.03894" rel="nofollow">CamemBERT: a Tasty French Language Model</a> by Louis Martin<em>, Benjamin Muller</em>, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.</li> <li><strong><a href="model_doc/canine">CANINE</a></strong> (from Google Research) released with the paper <a href="https://arxiv.org/abs/2103.06874" rel="nofollow">CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation</a> by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting.</li> <li><strong><a href="model_doc/convnext">ConvNeXT</a></strong> (from Facebook AI) released with the paper <a href="https://arxiv.org/abs/2201.03545" rel="nofollow">A ConvNet for the 2020s</a> by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie.</li> <li><strong><a href="model_doc/clip">CLIP</a></strong> (from OpenAI) released with the paper <a href="https://arxiv.org/abs/2103.00020" rel="nofollow">Learning Transferable Visual Models From Natural Language Supervision</a> by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.</li> <li><strong><a href="model_doc/convbert">ConvBERT</a></strong> (from YituTech) released with the paper <a href="https://arxiv.org/abs/2008.02496" rel="nofollow">ConvBERT: Improving BERT with Span-based Dynamic Convolution</a> by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.</li> <li><strong><a href="model_doc/cpm">CPM</a></strong> (from Tsinghua University) released with the paper <a href="https://arxiv.org/abs/2012.00413" rel="nofollow">CPM: A Large-scale Generative Chinese Pre-trained Language Model</a> by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.</li> <li><strong><a href="model_doc/ctrl">CTRL</a></strong> (from Salesforce) released with the paper <a href="https://arxiv.org/abs/1909.05858" rel="nofollow">CTRL: A Conditional Transformer Language Model for Controllable Generation</a> by Nitish Shirish Keskar<em>, Bryan McCann</em>, Lav R. Varshney, Caiming Xiong and Richard Socher.</li> <li><strong><a href="model_doc/data2vec">Data2Vec</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2202.03555" rel="nofollow">Data2Vec: A General Framework for Self-supervised Learning in Speech, Vision and Language</a> by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, Michael Auli.</li> <li><strong><a href="model_doc/deberta">DeBERTa</a></strong> (from Microsoft) released with the paper <a href="https://arxiv.org/abs/2006.03654" rel="nofollow">DeBERTa: Decoding-enhanced BERT with Disentangled Attention</a> by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.</li> <li><strong><a href="model_doc/deberta-v2">DeBERTa-v2</a></strong> (from Microsoft) released with the paper <a href="https://arxiv.org/abs/2006.03654" rel="nofollow">DeBERTa: Decoding-enhanced BERT with Disentangled Attention</a> by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.</li> <li><strong><a href="model_doc/dit">DiT</a></strong> (from Microsoft Research) released with the paper <a href="https://arxiv.org/abs/2203.02378" rel="nofollow">DiT: Self-supervised Pre-training for Document Image Transformer</a> by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei.</li> <li><strong><a href="model_doc/deit">DeiT</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2012.12877" rel="nofollow">Training data-efficient image transformers &amp; distillation through attention</a> by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou.</li> <li><strong><a href="model_doc/detr">DETR</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2005.12872" rel="nofollow">End-to-End Object Detection with Transformers</a> by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko.</li> <li><strong><a href="model_doc/dialogpt">DialoGPT</a></strong> (from Microsoft Research) released with the paper <a href="https://arxiv.org/abs/1911.00536" rel="nofollow">DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation</a> by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan.</li> <li><strong><a href="model_doc/distilbert">DistilBERT</a></strong> (from HuggingFace), released together with the paper <a href="https://arxiv.org/abs/1910.01108" rel="nofollow">DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter</a> by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into <a href="https://github.com/huggingface/transformers/tree/master/examples/research_projects/distillation" rel="nofollow">DistilGPT2</a>, RoBERTa into <a href="https://github.com/huggingface/transformers/tree/master/examples/research_projects/distillation" rel="nofollow">DistilRoBERTa</a>, Multilingual BERT into <a href="https://github.com/huggingface/transformers/tree/master/examples/research_projects/distillation" rel="nofollow">DistilmBERT</a> and a German version of DistilBERT.</li> <li><strong><a href="model_doc/dpr">DPR</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2004.04906" rel="nofollow">Dense Passage Retrieval for Open-Domain Question Answering</a> by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih.</li> <li><strong><a href="model_doc/encoder-decoder">EncoderDecoder</a></strong> (from Google Research) released with the paper <a href="https://arxiv.org/abs/1907.12461" rel="nofollow">Leveraging Pre-trained Checkpoints for Sequence Generation Tasks</a> by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.</li> <li><strong><a href="model_doc/electra">ELECTRA</a></strong> (from Google Research/Stanford University) released with the paper <a href="https://arxiv.org/abs/2003.10555" rel="nofollow">ELECTRA: Pre-training text encoders as discriminators rather than generators</a> by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.</li> <li><strong><a href="model_doc/flaubert">FlauBERT</a></strong> (from CNRS) released with the paper <a href="https://arxiv.org/abs/1912.05372" rel="nofollow">FlauBERT: Unsupervised Language Model Pre-training for French</a> by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.</li> <li><strong><a href="model_doc/fnet">FNet</a></strong> (from Google Research) released with the paper <a href="https://arxiv.org/abs/2105.03824" rel="nofollow">FNet: Mixing Tokens with Fourier Transforms</a> by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon.</li> <li><strong><a href="model_doc/funnel">Funnel Transformer</a></strong> (from CMU/Google Brain) released with the paper <a href="https://arxiv.org/abs/2006.03236" rel="nofollow">Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing</a> by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.</li> <li><strong><a href="model_doc/openai-gpt">GPT</a></strong> (from OpenAI) released with the paper <a href="https://blog.openai.com/language-unsupervised/" rel="nofollow">Improving Language Understanding by Generative Pre-Training</a> by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.</li> <li><strong><a href="model_doc/gpt2">GPT-2</a></strong> (from OpenAI) released with the paper <a href="https://blog.openai.com/better-language-models/" rel="nofollow">Language Models are Unsupervised Multitask Learners</a> by Alec Radford<em>, Jeffrey Wu</em>, Rewon Child, David Luan, Dario Amodei<strong>and Ilya Sutskever</strong>.</li> <li><strong><a href="model_doc/gptj">GPT-J</a></strong> (from EleutherAI) released in the repository <a href="https://github.com/kingoflolz/mesh-transformer-jax/" rel="nofollow">kingoflolz/mesh-transformer-jax</a> by Ben Wang and Aran Komatsuzaki.</li> <li><strong><a href="model_doc/gpt_neo">GPT Neo</a></strong> (from EleutherAI) released in the repository <a href="https://github.com/EleutherAI/gpt-neo" rel="nofollow">EleutherAI/gpt-neo</a> by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy.</li> <li><strong><a href="model_doc/hubert">Hubert</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2106.07447" rel="nofollow">HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units</a> by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.</li> <li><strong><a href="model_doc/ibert">I-BERT</a></strong> (from Berkeley) released with the paper <a href="https://arxiv.org/abs/2101.01321" rel="nofollow">I-BERT: Integer-only BERT Quantization</a> by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer.</li> <li><strong><a href="model_doc/imagegpt">ImageGPT</a></strong> (from OpenAI) released with the paper <a href="https://openai.com/blog/image-gpt/" rel="nofollow">Generative Pretraining from Pixels</a> by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever.</li> <li><strong><a href="model_doc/layoutlm">LayoutLM</a></strong> (from Microsoft Research Asia) released with the paper <a href="https://arxiv.org/abs/1912.13318" rel="nofollow">LayoutLM: Pre-training of Text and Layout for Document Image Understanding</a> by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou.</li> <li><strong><a href="model_doc/layoutlmv2">LayoutLMv2</a></strong> (from Microsoft Research Asia) released with the paper <a href="https://arxiv.org/abs/2012.14740" rel="nofollow">LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding</a> by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou.</li> <li><strong><a href="model_doc/layoutlmv2">LayoutXLM</a></strong> (from Microsoft Research Asia) released with the paper <a href="https://arxiv.org/abs/2104.08836" rel="nofollow">LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding</a> by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei.</li> <li><strong><a href="model_doc/led">LED</a></strong> (from AllenAI) released with the paper <a href="https://arxiv.org/abs/2004.05150" rel="nofollow">Longformer: The Long-Document Transformer</a> by Iz Beltagy, Matthew E. Peters, Arman Cohan.</li> <li><strong><a href="model_doc/longformer">Longformer</a></strong> (from AllenAI) released with the paper <a href="https://arxiv.org/abs/2004.05150" rel="nofollow">Longformer: The Long-Document Transformer</a> by Iz Beltagy, Matthew E. Peters, Arman Cohan.</li> <li><strong><a href="model_doc/luke">LUKE</a></strong> (from Studio Ousia) released with the paper <a href="https://arxiv.org/abs/2010.01057" rel="nofollow">LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention</a> by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto.</li> <li><strong><a href="model_doc/mluke">mLUKE</a></strong> (from Studio Ousia) released with the paper <a href="https://arxiv.org/abs/2110.08151" rel="nofollow">mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models</a> by Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka.</li> <li><strong><a href="model_doc/lxmert">LXMERT</a></strong> (from UNC Chapel Hill) released with the paper <a href="https://arxiv.org/abs/1908.07490" rel="nofollow">LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering</a> by Hao Tan and Mohit Bansal.</li> <li><strong><a href="model_doc/m2m_100">M2M100</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2010.11125" rel="nofollow">Beyond English-Centric Multilingual Machine Translation</a> by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.</li> <li><strong><a href="model_doc/marian">MarianMT</a></strong> Machine translation models trained using <a href="http://opus.nlpl.eu/" rel="nofollow">OPUS</a> data by Jörg Tiedemann. The <a href="https://marian-nmt.github.io/" rel="nofollow">Marian Framework</a> is being developed by the Microsoft Translator Team.</li> <li><strong><a href="model_doc/maskformer">MaskFormer</a></strong> (from Meta and UIUC) released with the paper <a href="https://arxiv.org/abs/2107.06278" rel="nofollow">Per-Pixel Classification is Not All You Need for Semantic Segmentation</a> by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov.</li> <li><strong><a href="model_doc/mbart">MBart</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2001.08210" rel="nofollow">Multilingual Denoising Pre-training for Neural Machine Translation</a> by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.</li> <li><strong><a href="model_doc/mbart">MBart-50</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2008.00401" rel="nofollow">Multilingual Translation with Extensible Multilingual Pretraining and Finetuning</a> by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.</li> <li><strong><a href="model_doc/megatron-bert">Megatron-BERT</a></strong> (from NVIDIA) released with the paper <a href="https://arxiv.org/abs/1909.08053" rel="nofollow">Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism</a> by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.</li> <li><strong><a href="model_doc/megatron_gpt2">Megatron-GPT2</a></strong> (from NVIDIA) released with the paper <a href="https://arxiv.org/abs/1909.08053" rel="nofollow">Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism</a> by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.</li> <li><strong><a href="model_doc/mpnet">MPNet</a></strong> (from Microsoft Research) released with the paper <a href="https://arxiv.org/abs/2004.09297" rel="nofollow">MPNet: Masked and Permuted Pre-training for Language Understanding</a> by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu.</li> <li><strong><a href="model_doc/mt5">MT5</a></strong> (from Google AI) released with the paper <a href="https://arxiv.org/abs/2010.11934" rel="nofollow">mT5: A massively multilingual pre-trained text-to-text transformer</a> by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel.</li> <li><strong><a href="model_doc/nystromformer">Nyströmformer</a></strong> (from the University of Wisconsin - Madison) released with the paper <a href="https://arxiv.org/abs/2102.03902" rel="nofollow">Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention</a> by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh.</li> <li><strong><a href="model_doc/pegasus">Pegasus</a></strong> (from Google) released with the paper <a href="https://arxiv.org/abs/1912.08777" rel="nofollow">PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization</a> by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu.</li> <li><strong><a href="model_doc/perceiver">Perceiver IO</a></strong> (from Deepmind) released with the paper <a href="https://arxiv.org/abs/2107.14795" rel="nofollow">Perceiver IO: A General Architecture for Structured Inputs &amp; Outputs</a> by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira.</li> <li><strong><a href="model_doc/phobert">PhoBERT</a></strong> (from VinAI Research) released with the paper <a href="https://www.aclweb.org/anthology/2020.findings-emnlp.92/" rel="nofollow">PhoBERT: Pre-trained language models for Vietnamese</a> by Dat Quoc Nguyen and Anh Tuan Nguyen.</li> <li><strong><a href="model_doc/plbart">PLBart</a></strong> (from UCLA NLP) released with the paper <a href="https://arxiv.org/abs/2103.06333" rel="nofollow">Unified Pre-training for Program Understanding and Generation</a> by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang.</li> <li><strong><a href="model_doc/poolformer">PoolFormer</a></strong> (from Sea AI Labs) released with the paper <a href="https://arxiv.org/abs/2111.11418" rel="nofollow">MetaFormer is Actually What You Need for Vision</a> by Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng.</li> <li><strong><a href="model_doc/prophetnet">ProphetNet</a></strong> (from Microsoft Research) released with the paper <a href="https://arxiv.org/abs/2001.04063" rel="nofollow">ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training</a> by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.</li> <li><strong><a href="model_doc/qdqbert">QDQBert</a></strong> (from NVIDIA) released with the paper <a href="https://arxiv.org/abs/2004.09602" rel="nofollow">Integer Quantization for Deep Learning Inference: Principles and Empirical Evaluation</a> by Hao Wu, Patrick Judd, Xiaojie Zhang, Mikhail Isaev and Paulius Micikevicius.</li> <li><strong><a href="https://huggingface.co/transformers/model_doc/realm.html" rel="nofollow">REALM</a></strong> (from Google Research) released with the paper <a href="https://arxiv.org/abs/2002.08909" rel="nofollow">REALM: Retrieval-Augmented Language Model Pre-Training</a> by Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat and Ming-Wei Chang.</li> <li><strong><a href="model_doc/reformer">Reformer</a></strong> (from Google Research) released with the paper <a href="https://arxiv.org/abs/2001.04451" rel="nofollow">Reformer: The Efficient Transformer</a> by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya.</li> <li><strong><a href="model_doc/rembert">RemBERT</a></strong> (from Google Research) released with the paper <a href="https://arxiv.org/abs/2010.12821" rel="nofollow">Rethinking embedding coupling in pre-trained language models</a> by Hyung Won Chung, Thibault Févry, Henry Tsai, M. Johnson, Sebastian Ruder.</li> <li><strong><a href="model_doc/roberta">RoBERTa</a></strong> (from Facebook), released together with the paper <a href="https://arxiv.org/abs/1907.11692" rel="nofollow">RoBERTa: A Robustly Optimized BERT Pretraining Approach</a> by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.</li> <li><strong><a href="model_doc/roformer">RoFormer</a></strong> (from ZhuiyiTechnology), released together with the paper <a href="https://arxiv.org/abs/2104.09864" rel="nofollow">RoFormer: Enhanced Transformer with Rotary Position Embedding</a> by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu.</li> <li><strong><a href="model_doc/segformer">SegFormer</a></strong> (from NVIDIA) released with the paper <a href="https://arxiv.org/abs/2105.15203" rel="nofollow">SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers</a> by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo.</li> <li><strong><a href="model_doc/sew">SEW</a></strong> (from ASAPP) released with the paper <a href="https://arxiv.org/abs/2109.06870" rel="nofollow">Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition</a> by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.</li> <li><strong><a href="model_doc/sew_d">SEW-D</a></strong> (from ASAPP) released with the paper <a href="https://arxiv.org/abs/2109.06870" rel="nofollow">Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition</a> by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.</li> <li><strong><a href="model_doc/speech_to_text">SpeechToTextTransformer</a></strong> (from Facebook), released together with the paper <a href="https://arxiv.org/abs/2010.05171" rel="nofollow">fairseq S2T: Fast Speech-to-Text Modeling with fairseq</a> by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino.</li> <li><strong><a href="model_doc/speech_to_text_2">SpeechToTextTransformer2</a></strong> (from Facebook), released together with the paper <a href="https://arxiv.org/abs/2104.06678" rel="nofollow">Large-Scale Self- and Semi-Supervised Learning for Speech Translation</a> by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau.</li> <li><strong><a href="model_doc/splinter">Splinter</a></strong> (from Tel Aviv University), released together with the paper <a href="https://arxiv.org/abs/2101.00438" rel="nofollow">Few-Shot Question Answering by Pretraining Span Selection</a> by Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy.</li> <li><strong><a href="model_doc/squeezebert">SqueezeBert</a></strong> (from Berkeley) released with the paper <a href="https://arxiv.org/abs/2006.11316" rel="nofollow">SqueezeBERT: What can computer vision teach NLP about efficient neural networks?</a> by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer.</li> <li><strong><a href="model_doc/swin">Swin Transformer</a></strong> (from Microsoft) released with the paper <a href="https://arxiv.org/abs/2103.14030" rel="nofollow">Swin Transformer: Hierarchical Vision Transformer using Shifted Windows</a> by Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo.</li> <li><strong><a href="model_doc/t5">T5</a></strong> (from Google AI) released with the paper <a href="https://arxiv.org/abs/1910.10683" rel="nofollow">Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer</a> by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.</li> <li><strong><a href="model_doc/t5v1.1">T5v1.1</a></strong> (from Google AI) released in the repository <a href="https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511" rel="nofollow">google-research/text-to-text-transfer-transformer</a> by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.</li> <li><strong><a href="model_doc/tapas">TAPAS</a></strong> (from Google AI) released with the paper <a href="https://arxiv.org/abs/2004.02349" rel="nofollow">TAPAS: Weakly Supervised Table Parsing via Pre-training</a> by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos.</li> <li><strong><a href="model_doc/transfo-xl">Transformer-XL</a></strong> (from Google/CMU) released with the paper <a href="https://arxiv.org/abs/1901.02860" rel="nofollow">Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context</a> by Zihang Dai<em>, Zhilin Yang</em>, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.</li> <li><strong><a href="model_doc/trocr">TrOCR</a></strong> (from Microsoft), released together with the paper <a href="https://arxiv.org/abs/2109.10282" rel="nofollow">TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models</a> by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.</li> <li><strong><a href="model_doc/unispeech">UniSpeech</a></strong> (from Microsoft Research) released with the paper <a href="https://arxiv.org/abs/2101.07597" rel="nofollow">UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data</a> by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.</li> <li><strong><a href="model_doc/unispeech-sat">UniSpeechSat</a></strong> (from Microsoft Research) released with the paper <a href="https://arxiv.org/abs/2110.05752" rel="nofollow">UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING</a> by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu.</li> <li><strong><a href="model_doc/vilt">ViLT</a></strong> (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper <a href="https://arxiv.org/abs/2102.03334" rel="nofollow">ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision</a> by Wonjae Kim, Bokyung Son, Ildoo Kim.</li> <li><strong><a href="model_doc/vit">Vision Transformer (ViT)</a></strong> (from Google AI) released with the paper <a href="https://arxiv.org/abs/2010.11929" rel="nofollow">An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale</a> by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.</li> <li><strong><a href="model_doc/vit_mae">ViTMAE</a></strong> (from Meta AI) released with the paper <a href="https://arxiv.org/abs/2111.06377" rel="nofollow">Masked Autoencoders Are Scalable Vision Learners</a> by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick.</li> <li><strong><a href="model_doc/visual_bert">VisualBERT</a></strong> (from UCLA NLP) released with the paper <a href="https://arxiv.org/pdf/1908.03557" rel="nofollow">VisualBERT: A Simple and Performant Baseline for Vision and Language</a> by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.</li> <li><strong><a href="model_doc/wavlm">WavLM</a></strong> (from Microsoft Research) released with the paper <a href="https://arxiv.org/abs/2110.13900" rel="nofollow">WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing</a> by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei.</li> <li><strong><a href="model_doc/wav2vec2">Wav2Vec2</a></strong> (from Facebook AI) released with the paper <a href="https://arxiv.org/abs/2006.11477" rel="nofollow">wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations</a> by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.</li> <li><strong><a href="https://huggingface.co/docs/master/transformers/model_doc/wav2vec2_phoneme" rel="nofollow">Wav2Vec2Phoneme</a></strong> (from Facebook AI) released with the paper <a href="https://arxiv.org/abs/2109.11680" rel="nofollow">Simple and Effective Zero-shot Cross-lingual Phoneme Recognition</a> by Qiantong Xu, Alexei Baevski, Michael Auli.</li> <li><strong><a href="https://huggingface.co/docs/master/transformers/model_doc/xglm" rel="nofollow">XGLM</a></strong> (From Facebook AI) released with the paper <a href="https://arxiv.org/abs/2112.10668" rel="nofollow">Few-shot Learning with Multilingual Language Models</a> by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O’Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.</li> <li><strong><a href="model_doc/xlm">XLM</a></strong> (from Facebook) released together with the paper <a href="https://arxiv.org/abs/1901.07291" rel="nofollow">Cross-lingual Language Model Pretraining</a> by Guillaume Lample and Alexis Conneau.</li> <li><strong><a href="model_doc/xlm-prophetnet">XLM-ProphetNet</a></strong> (from Microsoft Research) released with the paper <a href="https://arxiv.org/abs/2001.04063" rel="nofollow">ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training</a> by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.</li> <li><strong><a href="model_doc/xlm-roberta">XLM-RoBERTa</a></strong> (from Facebook AI), released together with the paper <a href="https://arxiv.org/abs/1911.02116" rel="nofollow">Unsupervised Cross-lingual Representation Learning at Scale</a> by Alexis Conneau<em>, Kartikay Khandelwal</em>, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov.</li> <li><strong><a href="model_doc/xlm-roberta-xl">XLM-RoBERTa-XL</a></strong> (from Facebook AI), released together with the paper <a href="https://arxiv.org/abs/2105.00572" rel="nofollow">Larger-Scale Transformers for Multilingual Masked Language Modeling</a> by Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau.</li> <li><strong><a href="model_doc/xlnet">XLNet</a></strong> (from Google/CMU) released with the paper <a href="https://arxiv.org/abs/1906.08237" rel="nofollow">​XLNet: Generalized Autoregressive Pretraining for Language Understanding</a> by Zhilin Yang<em>, Zihang Dai</em>, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.</li> <li><strong><a href="model_doc/xlsr_wav2vec2">XLSR-Wav2Vec2</a></strong> (from Facebook AI) released with the paper <a href="https://arxiv.org/abs/2006.13979" rel="nofollow">Unsupervised Cross-Lingual Representation Learning For Speech Recognition</a> by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli.</li> <li><strong><a href="https://huggingface.co/docs/master/transformers/model_doc/xls_r" rel="nofollow">XLS-R</a></strong> (from Facebook AI) released with the paper <a href="https://arxiv.org/abs/2111.09296" rel="nofollow">XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale</a> by Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, Michael Auli.</li> <li><strong><a href="model_doc/yoso">YOSO</a></strong> (from the University of Wisconsin - Madison) released with the paper <a href="https://arxiv.org/abs/2111.09714" rel="nofollow">You Only Sample (Almost) Once: Linear Cost Self-Attention Via Bernoulli Sampling</a> by Zhanpeng Zeng, Yunyang Xiong, Sathya N. Ravi, Shailesh Acharya, Glenn Fung, Vikas Singh.</li></ol> <h3 class="relative group"><a id="supported-frameworks" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#supported-frameworks"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Supported frameworks </span></h3> <p>The table below represents the current support in the library for each of those models, whether they have a Python tokenizer (called “slow”). A “fast” tokenizer backed by the 🤗 Tokenizers library, whether they have support in Jax (via Flax), PyTorch, and/or TensorFlow.</p> <table><thead><tr><th align="center">Model</th> <th align="center">Tokenizer slow</th> <th align="center">Tokenizer fast</th> <th align="center">PyTorch support</th> <th align="center">TensorFlow support</th> <th align="center">Flax Support</th></tr></thead> <tbody><tr><td align="center">ALBERT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">BART</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">BEiT</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td></tr> <tr><td align="center">BERT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">Bert Generation</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">BigBird</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td></tr> <tr><td align="center">BigBirdPegasus</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Blenderbot</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">BlenderbotSmall</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">CamemBERT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">Canine</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">CLIP</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">ConvBERT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">ConvNext</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">CTRL</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">Data2VecAudio</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Data2VecText</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">DeBERTa</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">DeBERTa-v2</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">DeiT</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">DETR</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">DistilBERT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">DPR</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">ELECTRA</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">Encoder decoder</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">FairSeq Machine-Translation</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">FlauBERT</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">FNet</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Funnel Transformer</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">GPT Neo</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td></tr> <tr><td align="center">GPT-J</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td></tr> <tr><td align="center">Hubert</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">I-BERT</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">ImageGPT</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">LayoutLM</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">LayoutLMv2</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">LED</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">Longformer</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">LUKE</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">LXMERT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">M2M100</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Marian</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">MaskFormer</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">mBART</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">MegatronBert</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">MobileBERT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">MPNet</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">mT5</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">Nystromformer</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">OpenAI GPT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">OpenAI GPT-2</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">Pegasus</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">Perceiver</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">PLBart</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">PoolFormer</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">ProphetNet</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">QDQBert</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">RAG</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">Realm</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Reformer</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">RemBERT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">RetriBERT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">RoBERTa</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">RoFormer</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">SegFormer</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">SEW</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">SEW-D</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Speech Encoder decoder</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td></tr> <tr><td align="center">Speech2Text</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">Speech2Text2</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Splinter</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">SqueezeBERT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Swin</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">T5</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">TAPAS</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">Transformer-XL</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">TrOCR</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">UniSpeech</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">UniSpeechSat</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">ViLT</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Vision Encoder decoder</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">VisionTextDualEncoder</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td></tr> <tr><td align="center">VisualBert</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">ViT</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">ViTMAE</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Wav2Vec2</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">WavLM</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">XGLM</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td></tr> <tr><td align="center">XLM</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">XLM-RoBERTa</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">XLM-RoBERTa-XL</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">XLMProphetNet</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">XLNet</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">YOSO</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr></tbody></table> <script type="module" data-hydrate="w248uv"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="w248uv"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/index.mdx-78213d4b.js") ], params: {} } }); </script>
93
0
hf_public_repos/doc-build-dev/transformers/pr_16143
hf_public_repos/doc-build-dev/transformers/pr_16143/en/sagemaker.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;run-training-on-amazon-sagemaker&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;table-of-content&quot;,&quot;title&quot;:&quot;Table of Content&quot;}],&quot;title&quot;:&quot;Run training on Amazon SageMaker&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/sagemaker.mdx-d221e67e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <h1 class="relative group"><a id="run-training-on-amazon-sagemaker" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#run-training-on-amazon-sagemaker"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Run training on Amazon SageMaker </span></h1> <p>The documentation has been moved to <a href="https://huggingface.co/docs/sagemaker" rel="nofollow">hf.co/docs/sagemaker</a>. This page will be removed in <code>transformers</code> 5.0. </p> <h3 class="relative group"><a id="table-of-content" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#table-of-content"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Table of Content </span></h3> <ul><li><a href="https://huggingface.co/docs/sagemaker/train" rel="nofollow">Train Hugging Face models on Amazon SageMaker with the SageMaker Python SDK</a></li> <li><a href="https://huggingface.co/docs/sagemaker/inference" rel="nofollow">Deploy Hugging Face models to Amazon SageMaker with the SageMaker Python SDK</a></li> <li><a href="https://huggingface.co/docs/sagemaker/faq" rel="nofollow">Frequently Asked Questions</a></li></ul> <script type="module" data-hydrate="tphvsy"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="tphvsy"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/sagemaker.mdx-d221e67e.js") ], params: {} } }); </script>
94
0
hf_public_repos/doc-build-dev/transformers/pr_16143
hf_public_repos/doc-build-dev/transformers/pr_16143/en/model_sharing.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;share-a-model&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;repository-features&quot;,&quot;title&quot;:&quot;Repository features&quot;},{&quot;local&quot;:&quot;setup&quot;,&quot;title&quot;:&quot;Setup&quot;},{&quot;local&quot;:&quot;convert-a-model-for-all-frameworks&quot;,&quot;title&quot;:&quot;Convert a model for all frameworks&quot;},{&quot;local&quot;:&quot;push-a-model-with-trainer&quot;,&quot;title&quot;:&quot;Push a model with `Trainer`&quot;},{&quot;local&quot;:&quot;push-a-model-with-pushtohubcallback&quot;,&quot;title&quot;:&quot;Push a model with `PushToHubCallback`&quot;},{&quot;local&quot;:&quot;use-the-pushtohub-function&quot;,&quot;title&quot;:&quot;Use the `push_to_hub` function&quot;},{&quot;local&quot;:&quot;upload-with-the-web-interface&quot;,&quot;title&quot;:&quot;Upload with the web interface&quot;},{&quot;local&quot;:&quot;add-a-model-card&quot;,&quot;title&quot;:&quot;Add a model card&quot;}],&quot;title&quot;:&quot;Share a model&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/model_sharing.mdx-3b3df35f.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Youtube-27813aed.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="share-a-model" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#share-a-model"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Share a model </span></h1> <p>The last two tutorials showed how you can fine-tune a model with PyTorch, Keras, and 🤗 Accelerate for distributed setups. The next step is to share your model with the community! At Hugging Face, we believe in openly sharing knowledge and resources to democratize artificial intelligence for everyone. We encourage you to consider sharing your model with the community to help others save time and resources.</p> <p>In this tutorial, you will learn two methods for sharing a trained or fine-tuned model on the <a href="https://huggingface.co/models" rel="nofollow">Model Hub</a>:</p> <ul><li>Programmatically push your files to the Hub.</li> <li>Drag-and-drop your files to the Hub with the web interface.</li></ul> <iframe width="560" height="315" src="https://www.youtube.com/embed/XvSGPZFEjDY" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>To share a model with the community, you need an account on <a href="https://huggingface.co/join" rel="nofollow">huggingface.co</a>. You can also join an existing organization or create a new one.</p></div> <h2 class="relative group"><a id="repository-features" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#repository-features"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Repository features </span></h2> <p>Each repository on the Model Hub behaves like a typical GitHub repository. Our repositories offer versioning, commit history, and the ability to visualize differences.</p> <p>The Model Hub’s built-in versioning is based on git and <a href="https://git-lfs.github.com/" rel="nofollow">git-lfs</a>. In other words, you can treat one model as one repository, enabling greater access control and scalability. Version control allows <em>revisions</em>, a method for pinning a specific version of a model with a commit hash, tag or branch.</p> <p>As a result, you can load a specific model version with the <code>revision</code> parameter:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModel.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;julien-c/EsperBERTo-small&quot;</span>, revision=<span class="hljs-string">&quot;v2.0.1&quot;</span> <span class="hljs-comment"># tag name, or branch name, or commit hash</span> <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Files are also easily edited in a repository, and you can view the commit history as well as the difference:</p> <p><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/vis_diff.png" alt="vis_diff"></p> <h2 class="relative group"><a id="setup" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#setup"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Setup </span></h2> <p>Before sharing a model to the Hub, you will need your Hugging Face credentials. If you have access to a terminal, run the following command in the virtual environment where 🤗 Transformers is installed. This will store your access token in your Hugging Face cache folder (<code>~/.cache/</code> by default):</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->huggingface-cli login<!-- HTML_TAG_END --></pre></div> <p>If you are using a notebook like Jupyter or Colaboratory, make sure you have the <a href="https://huggingface.co/docs/hub/adding-a-library" rel="nofollow"><code>huggingface_hub</code></a> library installed. This library allows you to programmatically interact with the Hub.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install huggingface_hub<!-- HTML_TAG_END --></pre></div> <p>Then use <code>notebook_login</code> to sign-in to the Hub, and follow the link <a href="https://huggingface.co/settings/token" rel="nofollow">here</a> to generate a token to login with:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> huggingface_hub <span class="hljs-keyword">import</span> notebook_login <span class="hljs-meta">&gt;&gt;&gt; </span>notebook_login()<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="convert-a-model-for-all-frameworks" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#convert-a-model-for-all-frameworks"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Convert a model for all frameworks </span></h2> <p>To ensure your model can be used by someone working with a different framework, we recommend you convert and upload your model with both PyTorch and TensorFlow checkpoints. While users are still able to load your model from a different framework if you skip this step, it will be slower because 🤗 Transformers will need to convert the checkpoint on-the-fly.</p> <p>Converting a checkpoint for another framework is easy. Make sure you have PyTorch and TensorFlow installed (see <a href="installation">here</a> for installation instructions), and then find the specific model for your task in the other framework. </p> <p>For example, suppose you trained DistilBert for sequence classification in PyTorch and want to convert it to it’s TensorFlow equivalent. Load the TensorFlow equivalent of your model for your task, and specify <code>from_pt=True</code> so 🤗 Transformers will convert the PyTorch checkpoint to a TensorFlow checkpoint:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFDistilBertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;path/to/awesome-name-you-picked&quot;</span>, from_pt=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <p>Then save your new TensorFlow model with it’s new checkpoint:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_model.save_pretrained(<span class="hljs-string">&quot;path/to/awesome-name-you-picked&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Similarly, specify <code>from_tf=True</code> to convert a checkpoint from TensorFlow to PyTorch:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>pt_model = DistilBertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;path/to/awesome-name-you-picked&quot;</span>, from_tf=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pt_model.save_pretrained(<span class="hljs-string">&quot;path/to/awesome-name-you-picked&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>If a model is available in Flax, you can also convert a checkpoint from PyTorch to Flax:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>flax_model = FlaxDistilBertForSequenceClassification.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;path/to/awesome-name-you-picked&quot;</span>, from_pt=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="push-a-model-with-trainer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#push-a-model-with-trainer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Push a model with <code>Trainer</code></span></h2> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/Z1-XMy-GNLQ" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Sharing a model to the Hub is as simple as adding an extra parameter or callback. Remember from the <a href="training">fine-tuning tutorial</a>, the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> class is where you specify hyperparameters and additional training options. One of these training options includes the ability to push a model directly to the Hub. Set <code>push_to_hub=True</code> in your <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments(output_dir=<span class="hljs-string">&quot;my-awesome-model&quot;</span>, push_to_hub=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <p>Pass your training arguments as usual to <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=small_train_dataset, <span class="hljs-meta">... </span> eval_dataset=small_eval_dataset, <span class="hljs-meta">... </span> compute_metrics=compute_metrics, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>After you fine-tune your model, call <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.push_to_hub">push_to_hub()</a> on <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> to push the trained model to the Hub. 🤗 Transformers will even automatically add training hyperparameters, training results and framework versions to your model card!</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>trainer.push_to_hub()<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="push-a-model-with-pushtohubcallback" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#push-a-model-with-pushtohubcallback"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Push a model with <code>PushToHubCallback</code></span></h2> <p>TensorFlow users can enable the same functionality with <a href="/docs/transformers/pr_16143/en/main_classes/keras_callbacks#transformers.PushToHubCallback">PushToHubCallback</a>. In the <a href="/docs/transformers/pr_16143/en/main_classes/keras_callbacks#transformers.PushToHubCallback">PushToHubCallback</a> function, add:</p> <ul><li>An output directory for your model.</li> <li>A tokenizer.</li> <li>The <code>hub_model_id</code>, which is your Hub username and model name.</li></ul> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers.keras.callbacks <span class="hljs-keyword">import</span> PushToHubCallback <span class="hljs-meta">&gt;&gt;&gt; </span>push_to_hub_callback = PushToHubCallback( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./your_model_save_path&quot;</span>, tokenizer=tokenizer, hub_model_id=<span class="hljs-string">&quot;your-username/my-awesome-model&quot;</span> <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Add the callback to <a href="https://keras.io/api/models/model_training_apis/" rel="nofollow"><code>fit</code></a>, and 🤗 Transformers will push the trained model to the Hub:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(tf_train_dataset, validation_data=tf_validation_dataset, epochs=<span class="hljs-number">3</span>, callbacks=push_to_hub_callback)<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="use-the-pushtohub-function" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#use-the-pushtohub-function"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Use the <code>push_to_hub</code> function </span></h2> <p>You can also call <code>push_to_hub</code> directly on your model to upload it to the Hub.</p> <p>Specify your model name in <code>push_to_hub</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>pt_model.push_to_hub(<span class="hljs-string">&quot;my-awesome-model&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>This creates a repository under your username with the model name <code>my-awesome-model</code>. Users can now load your model with the <code>from_pretrained</code> function:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModel.from_pretrained(<span class="hljs-string">&quot;your_username/my-awesome-model&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>If you belong to an organization and want to push your model under the organization name instead, add the <code>organization</code> parameter:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>pt_model.push_to_hub(<span class="hljs-string">&quot;my-awesome-model&quot;</span>, organization=<span class="hljs-string">&quot;my-awesome-org&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>The <code>push_to_hub</code> function can also be used to add other files to a model repository. For example, add a tokenizer to a model repository:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.push_to_hub(<span class="hljs-string">&quot;my-awesome-model&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Or perhaps you’d like to add the TensorFlow version of your fine-tuned PyTorch model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_model.push_to_hub(<span class="hljs-string">&quot;my-awesome-model&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Now when you navigate to the your Hugging Face profile, you should see your newly created model repository. Clicking on the <strong>Files</strong> tab will display all the files you’ve uploaded to the repository.</p> <p>For more details on how to create and upload files to a repository, refer to the Hub documentation <a href="https://huggingface.co/docs/hub/how-to-upstream" rel="nofollow">here</a>.</p> <h2 class="relative group"><a id="upload-with-the-web-interface" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#upload-with-the-web-interface"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Upload with the web interface </span></h2> <p>Users who prefer a no-code approach are able to upload a model through the Hub’s web interface. Visit <a href="https://huggingface.co/new" rel="nofollow">huggingface.co/new</a> to create a new repository:</p> <p><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/new_model_repo.png" alt="new_model_repo"></p> <p>From here, add some information about your model:</p> <ul><li>Select the <strong>owner</strong> of the repository. This can be yourself or any of the organizations you belong to.</li> <li>Pick a name for your model, which will also be the repository name.</li> <li>Choose whether your model is public or private.</li> <li>Specify the license usage for your model.</li></ul> <p>Now click on the <strong>Files</strong> tab and click on the <strong>Add file</strong> button to upload a new file to your repository. Then drag-and-drop a file to upload and add a commit message.</p> <p><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/upload_file.png" alt="upload_file"></p> <h2 class="relative group"><a id="add-a-model-card" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#add-a-model-card"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Add a model card </span></h2> <p>To make sure users understand your model’s capabilities, limitations, potential biases and ethical considerations, please add a model card to your repository. The model card is defined in the <code>README.md</code> file. You can add a model card by:</p> <ul><li>Manually creating and uploading a <code>README.md</code> file.</li> <li>Clicking on the <strong>Edit model card</strong> button in your model repository.</li></ul> <p>Take a look at the DistilBert <a href="https://huggingface.co/distilbert-base-uncased" rel="nofollow">model card</a> for a good example of the type of information a model card should include. For more details about other options you can control in the <code>README.md</code> file such as a model’s carbon footprint or widget examples, refer to the documentation <a href="https://huggingface.co/docs/hub/model-repos" rel="nofollow">here</a>.</p> <script type="module" data-hydrate="19obb8"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="19obb8"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/model_sharing.mdx-3b3df35f.js") ], params: {} } }); </script>
95
0
hf_public_repos/doc-build-dev/transformers/pr_16143
hf_public_repos/doc-build-dev/transformers/pr_16143/en/bertology.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;bertology&quot;,&quot;title&quot;:&quot;BERTology&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/bertology.mdx-1663513a.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <h1 class="relative group"><a id="bertology" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#bertology"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BERTology </span></h1> <p>There is a growing field of study concerned with investigating the inner working of large-scale transformers like BERT (that some call “BERTology”). Some good examples of this field are:</p> <ul><li>BERT Rediscovers the Classical NLP Pipeline by Ian Tenney, Dipanjan Das, Ellie Pavlick: <a href="https://arxiv.org/abs/1905.05950" rel="nofollow">https://arxiv.org/abs/1905.05950</a></li> <li>Are Sixteen Heads Really Better than One? by Paul Michel, Omer Levy, Graham Neubig: <a href="https://arxiv.org/abs/1905.10650" rel="nofollow">https://arxiv.org/abs/1905.10650</a></li> <li>What Does BERT Look At? An Analysis of BERT’s Attention by Kevin Clark, Urvashi Khandelwal, Omer Levy, Christopher D. Manning: <a href="https://arxiv.org/abs/1906.04341" rel="nofollow">https://arxiv.org/abs/1906.04341</a></li></ul> <p>In order to help this new field develop, we have included a few additional features in the BERT/GPT/GPT-2 models to help people access the inner representations, mainly adapted from the great work of Paul Michel (<a href="https://arxiv.org/abs/1905.10650" rel="nofollow">https://arxiv.org/abs/1905.10650</a>):</p> <ul><li>accessing all the hidden-states of BERT/GPT/GPT-2,</li> <li>accessing all the attention weights for each head of BERT/GPT/GPT-2,</li> <li>retrieving heads output values and gradients to be able to compute head importance score and prune head as explained in <a href="https://arxiv.org/abs/1905.10650" rel="nofollow">https://arxiv.org/abs/1905.10650</a>.</li></ul> <p>To help you understand and use these features, we have added a specific example script: <a href="https://github.com/huggingface/transformers/tree/master/examples/research_projects/bertology/run_bertology.py" rel="nofollow">bertology.py</a> while extract information and prune a model pre-trained on GLUE.</p> <script type="module" data-hydrate="1uextls"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1uextls"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/bertology.mdx-1663513a.js") ], params: {} } }); </script>
96
0
hf_public_repos/doc-build-dev/transformers/pr_16143
hf_public_repos/doc-build-dev/transformers/pr_16143/en/perplexity.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;perplexity-of-fixedlength-models&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;calculating-ppl-with-fixedlength-models&quot;,&quot;title&quot;:&quot;Calculating PPL with fixed-length models&quot;},{&quot;local&quot;:&quot;example-calculating-perplexity-with-gpt2-in-transformers&quot;,&quot;title&quot;:&quot;Example: Calculating perplexity with GPT-2 in 🤗 Transformers&quot;}],&quot;title&quot;:&quot;Perplexity of fixed-length models&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/perplexity.mdx-36aeb100.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/DocNotebookDropdown-ecff2a90.js"> <h1 class="relative group"><a id="perplexity-of-fixedlength-models" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#perplexity-of-fixedlength-models"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Perplexity of fixed-length models </span></h1> <div class="flex space-x-1 absolute z-10 right-0 top-0"><div class="relative colab-dropdown "> <button class=" " type="button"> <img alt="Open In Colab" class="!m-0" src="https://colab.research.google.com/assets/colab-badge.svg"> </button> </div> <div class="relative colab-dropdown "> <button class=" " type="button"> <img alt="Open In Studio Lab" class="!m-0" src="https://studiolab.sagemaker.aws/studiolab.svg"> </button> </div></div> <p>Perplexity (PPL) is one of the most common metrics for evaluating language models. Before diving in, we should note that the metric applies specifically to classical language models (sometimes called autoregressive or causal language models) and is not well defined for masked language models like BERT (see <a href="model_summary">summary of the models</a>).</p> <p>Perplexity is defined as the exponentiated average negative log-likelihood of a sequence. If we have a tokenized sequence <!-- HTML_TAG_START --><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>X</mi><mo>=</mo><mo stretchy="false">(</mo><msub><mi>x</mi><mn>0</mn></msub><mo separator="true">,</mo><msub><mi>x</mi><mn>1</mn></msub><mo separator="true">,</mo><mo>…</mo><mo separator="true">,</mo><msub><mi>x</mi><mi>t</mi></msub><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">X = (x_0, x_1, \dots, x_t)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.6833em;"></span><span class="mord mathnormal" style="margin-right:0.07847em;">X</span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2778em;"></span></span><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mopen">(</span><span class="mord"><span class="mord mathnormal">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3011em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">0</span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord"><span class="mord mathnormal">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3011em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">1</span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="minner">…</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord"><span class="mord mathnormal">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.2806em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">t</span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mclose">)</span></span></span></span><!-- HTML_TAG_END -->, then the perplexity of <!-- HTML_TAG_START --><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>X</mi></mrow><annotation encoding="application/x-tex">X</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.6833em;"></span><span class="mord mathnormal" style="margin-right:0.07847em;">X</span></span></span></span><!-- HTML_TAG_END --> is, <!-- HTML_TAG_START --><span class="katex-display"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML" display="block"><semantics><mrow><mtext>PPL</mtext><mo stretchy="false">(</mo><mi>X</mi><mo stretchy="false">)</mo><mo>=</mo><mi>exp</mi><mo>⁡</mo><mrow><mo fence="true">{</mo><mrow><mo>−</mo><mfrac><mn>1</mn><mi>t</mi></mfrac><munderover><mo>∑</mo><mi>i</mi><mi>t</mi></munderover><mi>log</mi><mo>⁡</mo><msub><mi>p</mi><mi>θ</mi></msub><mo stretchy="false">(</mo><msub><mi>x</mi><mi>i</mi></msub><mi mathvariant="normal">∣</mi><msub><mi>x</mi><mrow><mo>&lt;</mo><mi>i</mi></mrow></msub><mo stretchy="false">)</mo></mrow><mo fence="true">}</mo></mrow></mrow><annotation encoding="application/x-tex">\text{PPL}(X) = \exp \left\{ {-\frac{1}{t}\sum_i^t \log p_\theta (x_i|x_{&lt;i}) } \right\}</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mord text"><span class="mord">PPL</span></span><span class="mopen">(</span><span class="mord mathnormal" style="margin-right:0.07847em;">X</span><span class="mclose">)</span><span class="mspace" style="margin-right:0.2778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2778em;"></span></span><span class="base"><span class="strut" style="height:3.0582em;vertical-align:-1.2777em;"></span><span class="mop">exp</span><span class="mspace" style="margin-right:0.1667em;"></span><span class="minner"><span class="mopen delimcenter" style="top:0em;"><span class="delimsizing size4">{</span></span><span class="mord"><span class="mord">−</span><span class="mord"><span class="mopen nulldelimiter"></span><span class="mfrac"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:1.3214em;"><span style="top:-2.314em;"><span class="pstrut" style="height:3em;"></span><span class="mord"><span class="mord mathnormal">t</span></span></span><span style="top:-3.23em;"><span class="pstrut" style="height:3em;"></span><span class="frac-line" style="border-bottom-width:0.04em;"></span></span><span style="top:-3.677em;"><span class="pstrut" style="height:3em;"></span><span class="mord"><span class="mord">1</span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.686em;"><span></span></span></span></span></span><span class="mclose nulldelimiter"></span></span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mop op-limits"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:1.7806em;"><span style="top:-1.8723em;margin-left:0em;"><span class="pstrut" style="height:3.05em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">i</span></span></span><span style="top:-3.05em;"><span class="pstrut" style="height:3.05em;"></span><span><span class="mop op-symbol large-op">∑</span></span></span><span style="top:-4.3em;margin-left:0em;"><span class="pstrut" style="height:3.05em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">t</span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:1.2777em;"><span></span></span></span></span></span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mop">lo<span style="margin-right:0.01389em;">g</span></span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord"><span class="mord mathnormal">p</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3361em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight" style="margin-right:0.02778em;">θ</span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mopen">(</span><span class="mord"><span class="mord mathnormal">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3117em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">i</span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mord">∣</span><span class="mord"><span class="mord mathnormal">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3117em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mrel mtight">&lt;</span><span class="mord mathnormal mtight">i</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.1774em;"><span></span></span></span></span></span></span><span class="mclose">)</span></span><span class="mclose delimcenter" style="top:0em;"><span class="delimsizing size4">}</span></span></span></span></span></span></span><!-- HTML_TAG_END --></p> <p>where <!-- HTML_TAG_START --><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>log</mi><mo>⁡</mo><msub><mi>p</mi><mi>θ</mi></msub><mo stretchy="false">(</mo><msub><mi>x</mi><mi>i</mi></msub><mi mathvariant="normal">∣</mi><msub><mi>x</mi><mrow><mo>&lt;</mo><mi>i</mi></mrow></msub><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">\log p_\theta (x_i|x_{&lt;i})</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mop">lo<span style="margin-right:0.01389em;">g</span></span><span class="mspace" style="margin-right:0.1667em;"></span><span class="mord"><span class="mord mathnormal">p</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3361em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight" style="margin-right:0.02778em;">θ</span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mopen">(</span><span class="mord"><span class="mord mathnormal">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3117em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">i</span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mord">∣</span><span class="mord"><span class="mord mathnormal">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3117em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mrel mtight">&lt;</span><span class="mord mathnormal mtight">i</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.1774em;"><span></span></span></span></span></span></span><span class="mclose">)</span></span></span></span><!-- HTML_TAG_END --> is the log-likelihood of the ith token conditioned on the preceding tokens <!-- HTML_TAG_START --><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>x</mi><mrow><mo>&lt;</mo><mi>i</mi></mrow></msub></mrow><annotation encoding="application/x-tex">x_{&lt;i}</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.6079em;vertical-align:-0.1774em;"></span><span class="mord"><span class="mord mathnormal">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3117em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mrel mtight">&lt;</span><span class="mord mathnormal mtight">i</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.1774em;"><span></span></span></span></span></span></span></span></span></span><!-- HTML_TAG_END --> according to our model. Intuitively, it can be thought of as an evaluation of the model’s ability to predict uniformly among the set of specified tokens in a corpus. Importantly, this means that the tokenization procedure has a direct impact on a model’s perplexity which should always be taken into consideration when comparing different models.</p> <p>This is also equivalent to the exponentiation of the cross-entropy between the data and model predictions. For more intuition about perplexity and its relationship to Bits Per Character (BPC) and data compression, check out this <a href="https://thegradient.pub/understanding-evaluation-metrics-for-language-models/" rel="nofollow">fantastic blog post on The Gradient</a>.</p> <h2 class="relative group"><a id="calculating-ppl-with-fixedlength-models" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#calculating-ppl-with-fixedlength-models"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Calculating PPL with fixed-length models </span></h2> <p>If we weren’t limited by a model’s context size, we would evaluate the model’s perplexity by autoregressively factorizing a sequence and conditioning on the entire preceding subsequence at each step, as shown below.</p> <img width="600" alt="Full decomposition of a sequence with unlimited context length" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/ppl_full.gif"> <p>When working with approximate models, however, we typically have a constraint on the number of tokens the model can process. The largest version of <a href="model_doc/gpt2">GPT-2</a>, for example, has a fixed length of 1024 tokens, so we cannot calculate <!-- HTML_TAG_START --><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>p</mi><mi>θ</mi></msub><mo stretchy="false">(</mo><msub><mi>x</mi><mi>t</mi></msub><mi mathvariant="normal">∣</mi><msub><mi>x</mi><mrow><mo>&lt;</mo><mi>t</mi></mrow></msub><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">p_\theta(x_t|x_{&lt;t})</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mord"><span class="mord mathnormal">p</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.3361em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight" style="margin-right:0.02778em;">θ</span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mopen">(</span><span class="mord"><span class="mord mathnormal">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.2806em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">t</span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mord">∣</span><span class="mord"><span class="mord mathnormal">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.2806em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mrel mtight">&lt;</span><span class="mord mathnormal mtight">t</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.1774em;"><span></span></span></span></span></span></span><span class="mclose">)</span></span></span></span><!-- HTML_TAG_END --> directly when <!-- HTML_TAG_START --><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>t</mi></mrow><annotation encoding="application/x-tex">t</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.6151em;"></span><span class="mord mathnormal">t</span></span></span></span><!-- HTML_TAG_END --> is greater than 1024.</p> <p>Instead, the sequence is typically broken into subsequences equal to the model’s maximum input size. If a model’s max input size is <!-- HTML_TAG_START --><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>k</mi></mrow><annotation encoding="application/x-tex">k</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.6944em;"></span><span class="mord mathnormal" style="margin-right:0.03148em;">k</span></span></span></span><!-- HTML_TAG_END -->, we then approximate the likelihood of a token <!-- HTML_TAG_START --><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>x</mi><mi>t</mi></msub></mrow><annotation encoding="application/x-tex">x_t</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.5806em;vertical-align:-0.15em;"></span><span class="mord"><span class="mord mathnormal">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.2806em;"><span style="top:-2.55em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathnormal mtight">t</span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span></span></span></span><!-- HTML_TAG_END --> by conditioning only on the <!-- HTML_TAG_START --><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>k</mi><mo>−</mo><mn>1</mn></mrow><annotation encoding="application/x-tex">k-1</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.7778em;vertical-align:-0.0833em;"></span><span class="mord mathnormal" style="margin-right:0.03148em;">k</span><span class="mspace" style="margin-right:0.2222em;"></span><span class="mbin">−</span><span class="mspace" style="margin-right:0.2222em;"></span></span><span class="base"><span class="strut" style="height:0.6444em;"></span><span class="mord">1</span></span></span></span><!-- HTML_TAG_END --> tokens that precede it rather than the entire context. When evaluating the model’s perplexity of a sequence, a tempting but suboptimal approach is to break the sequence into disjoint chunks and add up the decomposed log-likelihoods of each segment independently.</p> <img width="600" alt="Suboptimal PPL not taking advantage of full available context" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/ppl_chunked.gif"> <p>This is quick to compute since the perplexity of each segment can be computed in one forward pass, but serves as a poor approximation of the fully-factorized perplexity and will typically yield a higher (worse) PPL because the model will have less context at most of the prediction steps.</p> <p>Instead, the PPL of fixed-length models should be evaluated with a sliding-window strategy. This involves repeatedly sliding the context window so that the model has more context when making each prediction.</p> <img width="600" alt="Sliding window PPL taking advantage of all available context" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/ppl_sliding.gif"> <p>This is a closer approximation to the true decomposition of the sequence probability and will typically yield a more favorable score. The downside is that it requires a separate forward pass for each token in the corpus. A good practical compromise is to employ a strided sliding window, moving the context by larger strides rather than sliding by 1 token a time. This allows computation to proceed much faster while still giving the model a large context to make predictions at each step.</p> <h2 class="relative group"><a id="example-calculating-perplexity-with-gpt2-in-transformers" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#example-calculating-perplexity-with-gpt2-in-transformers"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Example: Calculating perplexity with GPT-2 in 🤗 Transformers </span></h2> <p>Let’s demonstrate this process with GPT-2.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2LMHeadModel, GPT2TokenizerFast device = <span class="hljs-string">&quot;cuda&quot;</span> model_id = <span class="hljs-string">&quot;gpt2-large&quot;</span> model = GPT2LMHeadModel.from_pretrained(model_id).to(device) tokenizer = GPT2TokenizerFast.from_pretrained(model_id)<!-- HTML_TAG_END --></pre></div> <p>We’ll load in the WikiText-2 dataset and evaluate the perplexity using a few different sliding-window strategies. Since this dataset is small and we’re just doing one forward pass over the set, we can just load and encode the entire dataset in memory.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset test = load_dataset(<span class="hljs-string">&quot;wikitext&quot;</span>, <span class="hljs-string">&quot;wikitext-2-raw-v1&quot;</span>, split=<span class="hljs-string">&quot;test&quot;</span>) encodings = tokenizer(<span class="hljs-string">&quot;\n\n&quot;</span>.join(test[<span class="hljs-string">&quot;text&quot;</span>]), return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>With 🤗 Transformers, we can simply pass the <code>input_ids</code> as the <code>labels</code> to our model, and the average negative log-likelihood for each token is returned as the loss. With our sliding window approach, however, there is overlap in the tokens we pass to the model at each iteration. We don’t want the log-likelihood for the tokens we’re just treating as context to be included in our loss, so we can set these targets to <code>-100</code> so that they are ignored. The following is an example of how we could do this with a stride of <code>512</code>. This means that the model will have at least 512 tokens for context when calculating the conditional likelihood of any one token (provided there are 512 preceding tokens available to condition on).</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">import</span> torch <span class="hljs-keyword">from</span> tqdm <span class="hljs-keyword">import</span> tqdm max_length = model.config.n_positions stride = <span class="hljs-number">512</span> nlls = [] <span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> tqdm(<span class="hljs-built_in">range</span>(<span class="hljs-number">0</span>, encodings.input_ids.size(<span class="hljs-number">1</span>), stride)): begin_loc = <span class="hljs-built_in">max</span>(i + stride - max_length, <span class="hljs-number">0</span>) end_loc = <span class="hljs-built_in">min</span>(i + stride, encodings.input_ids.size(<span class="hljs-number">1</span>)) trg_len = end_loc - i <span class="hljs-comment"># may be different from stride on last loop</span> input_ids = encodings.input_ids[:, begin_loc:end_loc].to(device) target_ids = input_ids.clone() target_ids[:, :-trg_len] = -<span class="hljs-number">100</span> <span class="hljs-keyword">with</span> torch.no_grad(): outputs = model(input_ids, labels=target_ids) neg_log_likelihood = outputs[<span class="hljs-number">0</span>] * trg_len nlls.append(neg_log_likelihood) ppl = torch.exp(torch.stack(nlls).<span class="hljs-built_in">sum</span>() / end_loc)<!-- HTML_TAG_END --></pre></div> <p>Running this with the stride length equal to the max input length is equivalent to the suboptimal, non-sliding-window strategy we discussed above. The smaller the stride, the more context the model will have in making each prediction, and the better the reported perplexity will typically be.</p> <p>When we run the above with <code>stride = 1024</code>, i.e. no overlap, the resulting PPL is <code>19.64</code>, which is about the same as the <code>19.93</code> reported in the GPT-2 paper. By using <code>stride = 512</code> and thereby employing our striding window strategy, this jumps down to <code>16.53</code>. This is not only a more favorable score, but is calculated in a way that is closer to the true autoregressive decomposition of a sequence likelihood.</p> <script type="module" data-hydrate="1ab2xb5"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1ab2xb5"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/perplexity.mdx-36aeb100.js") ], params: {} } }); </script>
97
0
hf_public_repos/doc-build-dev/transformers/pr_16143
hf_public_repos/doc-build-dev/transformers/pr_16143/en/accelerate.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;distributed-training-with-accelerate&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;setup&quot;,&quot;title&quot;:&quot;Setup&quot;},{&quot;local&quot;:&quot;prepare-to-accelerate&quot;,&quot;title&quot;:&quot;Prepare to accelerate&quot;},{&quot;local&quot;:&quot;backward&quot;,&quot;title&quot;:&quot;Backward&quot;},{&quot;local&quot;:&quot;train&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;train-with-a-script&quot;,&quot;title&quot;:&quot;Train with a script&quot;},{&quot;local&quot;:&quot;train-with-a-notebook&quot;,&quot;title&quot;:&quot;Train with a notebook&quot;}],&quot;title&quot;:&quot;Train&quot;}],&quot;title&quot;:&quot;Distributed training with 🤗 Accelerate&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/accelerate.mdx-8837c56a.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="distributed-training-with-accelerate" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#distributed-training-with-accelerate"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Distributed training with 🤗 Accelerate </span></h1> <p>As models get bigger, parallelism has emerged as a strategy for training larger models on limited hardware and accelerating training speed by several orders of magnitude. At Hugging Face, we created the <a href="https://huggingface.co/docs/accelerate/index.html" rel="nofollow">🤗 Accelerate</a> library to help users easily train a 🤗 Transformers model on any type of distributed setup, whether it is multiple GPU’s on one machine or multiple GPU’s across several machines. In this tutorial, learn how to customize your native PyTorch training loop to enable training in a distributed environment.</p> <h2 class="relative group"><a id="setup" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#setup"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Setup </span></h2> <p>Get started by installing 🤗 Accelerate:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install accelerate<!-- HTML_TAG_END --></pre></div> <p>Then import and create an <a href="https://huggingface.co/docs/accelerate/accelerator.html#accelerate.Accelerator" rel="nofollow"><code>Accelerator</code></a> object. <code>Accelerator</code> will automatically detect your type of distributed setup and initialize all the necessary components for training. You don’t need to explicitly place your model on a device.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> accelerate <span class="hljs-keyword">import</span> Accelerator <span class="hljs-meta">&gt;&gt;&gt; </span>accelerator = Accelerator()<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="prepare-to-accelerate" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#prepare-to-accelerate"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Prepare to accelerate </span></h2> <p>The next step is to pass all the relevant training objects to the <a href="https://huggingface.co/docs/accelerate/accelerator.html#accelerate.Accelerator.prepare" rel="nofollow"><code>prepare</code></a> method. This includes your training and evaluation DataLoaders, a model and an optimizer:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( <span class="hljs-meta">... </span> train_dataloader, eval_dataloader, model, optimizer <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="backward" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#backward"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Backward </span></h2> <p>The last addition is to replace the typical <code>loss.backward()</code> in your training loop with 🤗 Accelerate’s <a href="https://huggingface.co/docs/accelerate/accelerator.html#accelerate.Accelerator.backward" rel="nofollow"><code>backward</code></a> method:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> epoch <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(num_epochs): <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> batch <span class="hljs-keyword">in</span> train_dataloader: <span class="hljs-meta">... </span> outputs = model(**batch) <span class="hljs-meta">... </span> loss = outputs.loss <span class="hljs-meta">... </span> accelerator.backward(loss) <span class="hljs-meta">... </span> optimizer.step() <span class="hljs-meta">... </span> lr_scheduler.step() <span class="hljs-meta">... </span> optimizer.zero_grad() <span class="hljs-meta">... </span> progress_bar.update(<span class="hljs-number">1</span>)<!-- HTML_TAG_END --></pre></div> <p>As you can see in the following code, you only need to add four additional lines of code to your training loop to enable distributed training!</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-addition">+ from accelerate import Accelerator</span> from transformers import AdamW, AutoModelForSequenceClassification, get_scheduler <span class="hljs-addition">+ accelerator = Accelerator()</span> model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2) optimizer = AdamW(model.parameters(), lr=3e-5) <span class="hljs-deletion">- device = torch.device(&quot;cuda&quot;) if torch.cuda.is_available() else torch.device(&quot;cpu&quot;)</span> <span class="hljs-deletion">- model.to(device)</span> <span class="hljs-addition">+ train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare(</span> <span class="hljs-addition">+ train_dataloader, eval_dataloader, model, optimizer</span> <span class="hljs-addition">+ )</span> num_epochs = 3 num_training_steps = num_epochs * len(train_dataloader) lr_scheduler = get_scheduler( &quot;linear&quot;, optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps ) progress_bar = tqdm(range(num_training_steps)) model.train() for epoch in range(num_epochs): for batch in train_dataloader: <span class="hljs-deletion">- batch = {k: v.to(device) for k, v in batch.items()}</span> outputs = model(**batch) loss = outputs.loss <span class="hljs-deletion">- loss.backward()</span> <span class="hljs-addition">+ accelerator.backward(loss)</span> optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1)<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="train" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#train"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Train </span></h2> <p>Once you’ve added the relevant lines of code, launch your training in a script or a notebook like Colaboratory.</p> <h3 class="relative group"><a id="train-with-a-script" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#train-with-a-script"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Train with a script </span></h3> <p>If you are running your training from a script, run the following command to create and save a configuration file:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->accelerate config<!-- HTML_TAG_END --></pre></div> <p>Then launch your training with:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->accelerate launch train.py<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="train-with-a-notebook" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#train-with-a-notebook"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Train with a notebook </span></h3> <p>🤗 Accelerate can also run in a notebook if you’re planning on using Colaboratory’s TPUs. Wrap all the code responsible for training in a function, and pass it to <code>notebook_launcher</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> accelerate <span class="hljs-keyword">import</span> notebook_launcher <span class="hljs-meta">&gt;&gt;&gt; </span>notebook_launcher(training_function)<!-- HTML_TAG_END --></pre></div> <p>For more information about 🤗 Accelerate and it’s rich features, refer to the <a href="https://huggingface.co/docs/accelerate/index.html" rel="nofollow">documentation</a>.</p> <script type="module" data-hydrate="7aq0q2"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="7aq0q2"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/accelerate.mdx-8837c56a.js") ], params: {} } }); </script>
98
0
hf_public_repos/doc-build-dev/transformers/pr_16143
hf_public_repos/doc-build-dev/transformers/pr_16143/en/contributing.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;how-to-contribute-to-transformers&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;you-can-contribute-in-so-many-ways&quot;,&quot;title&quot;:&quot;You can contribute in so many ways!&quot;},{&quot;local&quot;:&quot;submitting-a-new-issue-or-feature-request&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;did-you-find-a-bug&quot;,&quot;title&quot;:&quot;Did you find a bug?&quot;},{&quot;local&quot;:&quot;do-you-want-to-implement-a-new-model&quot;,&quot;title&quot;:&quot;Do you want to implement a new model?&quot;},{&quot;local&quot;:&quot;do-you-want-a-new-feature-that-is-not-a-model&quot;,&quot;title&quot;:&quot;Do you want a new feature (that is not a model)?&quot;}],&quot;title&quot;:&quot;Submitting a new issue or feature request&quot;},{&quot;local&quot;:&quot;start-contributing-pull-requests&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;checklist&quot;,&quot;title&quot;:&quot;Checklist&quot;},{&quot;local&quot;:&quot;tests&quot;,&quot;title&quot;:&quot;Tests&quot;},{&quot;local&quot;:&quot;style-guide&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;this-guide-was-heavily-inspired-by-the-awesome-scikitlearn-guide-to-contributinghttpsgithubcomscikitlearnscikitlearnblobmastercontributingmd&quot;,&quot;title&quot;:&quot;This guide was heavily inspired by the awesome [scikit-learn guide to contributing](https://github.com/scikit-learn/scikit-learn/blob/master/CONTRIBUTING.md)&quot;}],&quot;title&quot;:&quot;Style guide&quot;},{&quot;local&quot;:&quot;develop-on-windows&quot;,&quot;title&quot;:&quot;Develop on Windows&quot;},{&quot;local&quot;:&quot;syncing-forked-master-with-upstream-huggingface-master&quot;,&quot;title&quot;:&quot;Syncing forked master with upstream (HuggingFace) master&quot;}],&quot;title&quot;:&quot;Start contributing! (Pull Requests)&quot;}],&quot;title&quot;:&quot;How to contribute to transformers?&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/contributing.mdx-24741e59.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="how-to-contribute-to-transformers" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#how-to-contribute-to-transformers"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>How to contribute to transformers? </span></h1> <p>Everyone is welcome to contribute, and we value everybody’s contribution. Code is thus not the only way to help the community. Answering questions, helping others, reaching out and improving the documentations are immensely valuable to the community.</p> <p>It also helps us if you spread the word: reference the library from blog posts on the awesome projects it made possible, shout out on Twitter every time it has helped you, or simply star the repo to say “thank you”.</p> <p>Whichever way you choose to contribute, please be mindful to respect our <a href="https://github.com/huggingface/transformers/blob/master/CODE_OF_CONDUCT.md" rel="nofollow">code of conduct</a>.</p> <h2 class="relative group"><a id="you-can-contribute-in-so-many-ways" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#you-can-contribute-in-so-many-ways"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>You can contribute in so many ways! </span></h2> <p>There are 4 ways you can contribute to transformers:</p> <ul><li>Fixing outstanding issues with the existing code;</li> <li>Implementing new models;</li> <li>Contributing to the examples or to the documentation;</li> <li>Submitting issues related to bugs or desired new features.</li></ul> <p>In particular there is a special <a href="https://github.com/huggingface/transformers/contribute" rel="nofollow">Good First Issue</a> listing. It will give you a list of open Issues that are open to anybody to work on. Just comment in the issue that you’d like to work on it. In that same listing you will also find some Issues with <code>Good Second Issue</code> label. These are typically slightly more complicated than the Issues with just <code>Good First Issue</code> label. But if you feel you know what you’re doing, go for it.</p> <p><em>All are equally valuable to the community.</em></p> <h2 class="relative group"><a id="submitting-a-new-issue-or-feature-request" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#submitting-a-new-issue-or-feature-request"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Submitting a new issue or feature request </span></h2> <p>Do your best to follow these guidelines when submitting an issue or a feature request. It will make it easier for us to come back to you quickly and with good feedback.</p> <h3 class="relative group"><a id="did-you-find-a-bug" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#did-you-find-a-bug"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Did you find a bug? </span></h3> <p>The 🤗 Transformers library is robust and reliable thanks to the users who notify us of the problems they encounter. So thank you for reporting an issue.</p> <p>First, we would really appreciate it if you could <strong>make sure the bug was not already reported</strong> (use the search bar on Github under Issues).</p> <p>Did not find it? :( So we can act quickly on it, please follow these steps:</p> <ul><li>Include your <strong>OS type and version</strong>, the versions of <strong>Python</strong>, <strong>PyTorch</strong> and <strong>Tensorflow</strong> when applicable;</li> <li>A short, self-contained, code snippet that allows us to reproduce the bug in less than 30s;</li> <li>Provide the <em>full</em> traceback if an exception is raised.</li></ul> <p>To get the OS and software versions automatically, you can run the following command:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->transformers-cli <span class="hljs-built_in">env</span><!-- HTML_TAG_END --></pre></div> <p>or from the root of the repository the following command:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python src/transformers/commands/transformers_cli.py <span class="hljs-built_in">env</span><!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="do-you-want-to-implement-a-new-model" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#do-you-want-to-implement-a-new-model"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Do you want to implement a new model? </span></h3> <p>Awesome! Please provide the following information:</p> <ul><li>Short description of the model and link to the paper;</li> <li>Link to the implementation if it is open-source;</li> <li>Link to the model weights if they are available.</li></ul> <p>If you are willing to contribute the model yourself, let us know so we can best guide you.</p> <p>We have added a <strong>detailed guide and templates</strong> to guide you in the process of adding a new model. You can find them in the <a href="https://github.com/huggingface/transformers/tree/master/templates" rel="nofollow"><code>templates</code></a> folder.</p> <h3 class="relative group"><a id="do-you-want-a-new-feature-that-is-not-a-model" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#do-you-want-a-new-feature-that-is-not-a-model"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Do you want a new feature (that is not a model)? </span></h3> <p>A world-class feature request addresses the following points:</p> <ol><li>Motivation first:</li></ol> <ul><li>Is it related to a problem/frustration with the library? If so, please explain why. Providing a code snippet that demonstrates the problem is best.</li> <li>Is it related to something you would need for a project? We’d love to hear about it!</li> <li>Is it something you worked on and think could benefit the community? Awesome! Tell us what problem it solved for you.</li></ul> <ol start="2"><li>Write a <em>full paragraph</em> describing the feature;</li> <li>Provide a <strong>code snippet</strong> that demonstrates its future use;</li> <li>In case this is related to a paper, please attach a link;</li> <li>Attach any additional information (drawings, screenshots, etc.) you think may help.</li></ol> <p>If your issue is well written we’re already 80% of the way there by the time you post it.</p> <p>We have added <strong>templates</strong> to guide you in the process of adding a new example script for training or testing the models in the library. You can find them in the <a href="https://github.com/huggingface/transformers/tree/master/templates" rel="nofollow"><code>templates</code></a> folder.</p> <h2 class="relative group"><a id="start-contributing-pull-requests" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#start-contributing-pull-requests"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Start contributing! (Pull Requests) </span></h2> <p>Before writing code, we strongly advise you to search through the existing PRs or issues to make sure that nobody is already working on the same thing. If you are unsure, it is always a good idea to open an issue to get some feedback.</p> <p>You will need basic <code>git</code> proficiency to be able to contribute to 🤗 Transformers. <code>git</code> is not the easiest tool to use but it has the greatest manual. Type <code>git --help</code> in a shell and enjoy. If you prefer books, <a href="https://git-scm.com/book/en/v2" rel="nofollow">Pro Git</a> is a very good reference.</p> <p>Follow these steps to start contributing:</p> <ol><li><p>Fork the <a href="https://github.com/huggingface/transformers" rel="nofollow">repository</a> by clicking on the ‘Fork’ button on the repository’s page. This creates a copy of the code under your GitHub user account.</p></li> <li><p>Clone your fork to your local disk, and add the base repository as a remote:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->$ git <span class="hljs-built_in">clone</span> [email protected]:&lt;your Github handle&gt;/transformers.git $ <span class="hljs-built_in">cd</span> transformers $ git remote add upstream https://github.com/huggingface/transformers.git<!-- HTML_TAG_END --></pre></div></li> <li><p>Create a new branch to hold your development changes:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->$ git checkout -b a-descriptive-name-for-my-changes<!-- HTML_TAG_END --></pre></div> <p><strong>Do not</strong> work on the <code>master</code> branch.</p></li> <li><p>Set up a development environment by running the following command in a virtual environment:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->$ pip install -e <span class="hljs-string">&quot;.[dev]&quot;</span><!-- HTML_TAG_END --></pre></div> <p>(If transformers was already installed in the virtual environment, remove it with <code>pip uninstall transformers</code> before reinstalling it in editable mode with the <code>-e</code> flag.)</p> <p>To run the full test suite, you might need the additional dependency on <code>datasets</code> which requires a separate source install:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->$ git <span class="hljs-built_in">clone</span> https://github.com/huggingface/datasets $ <span class="hljs-built_in">cd</span> datasets $ pip install -e .<!-- HTML_TAG_END --></pre></div> <p>If you have already cloned that repo, you might need to <code>git pull</code> to get the most recent changes in the <code>datasets</code> library.</p></li> <li><p>Develop the features on your branch.</p> <p>As you work on the features, you should make sure that the test suite passes. You should run the tests impacted by your changes like this:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->$ pytest tests/&lt;TEST_TO_RUN&gt;.py<!-- HTML_TAG_END --></pre></div> <p>You can also run the full suite with the following command, but it takes a beefy machine to produce a result in a decent amount of time now that Transformers has grown a lot. Here is the command for it:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->$ make <span class="hljs-built_in">test</span><!-- HTML_TAG_END --></pre></div> <p>For more information about tests, check out the <a href="https://huggingface.co/docs/transformers/testing" rel="nofollow">dedicated documentation</a></p> <p>🤗 Transformers relies on <code>black</code> and <code>isort</code> to format its source code consistently. After you make changes, apply automatic style corrections and code verifications that can’t be automated in one go with:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->$ make fixup<!-- HTML_TAG_END --></pre></div> <p>This target is also optimized to only work with files modified by the PR you’re working on.</p> <p>If you prefer to run the checks one after the other, the following command apply the style corrections:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->$ make style<!-- HTML_TAG_END --></pre></div> <p>🤗 Transformers also uses <code>flake8</code> and a few custom scripts to check for coding mistakes. Quality control runs in CI, however you can also run the same checks with:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->$ make quality<!-- HTML_TAG_END --></pre></div> <p>Finally we have a lot of scripts that check we didn’t forget to update some files when adding a new model, that you can run with</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->$ make repo-consistency<!-- HTML_TAG_END --></pre></div> <p>To learn more about those checks and how to fix any issue with them, check out the <a href="https://huggingface.co/docs/transformers/pr_checks" rel="nofollow">documentation</a></p> <p>If you’re modifying documents under <code>docs/source</code>, make sure to validate that they can still be built. This check also runs in CI. To run a local check make sure you have installed the documentation builder requirements. First you will need to clone the repository containing our tools to build the documentation:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->$ pip install git+https://github.com/huggingface/doc-builder<!-- HTML_TAG_END --></pre></div> <p>Then, make sure you have all the dependencies to be able to build the doc with:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->$ pip install <span class="hljs-string">&quot;.[docs]&quot;</span><!-- HTML_TAG_END --></pre></div> <p>Finally run the following command from the root of the repository:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->$ doc-builder build transformers docs/source/ --build_dir ~/tmp/test-build<!-- HTML_TAG_END --></pre></div> <p>This will build the documentation in the <code>~/tmp/test-build</code> folder where you can inspect the generated Markdown files with your favorite editor. You won’t be able to see the final rendering on the website before your PR is merged, we are actively working on adding a tool for this.</p> <p>Once you’re happy with your changes, add changed files using <code>git add</code> and make a commit with <code>git commit</code> to record your changes locally:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->$ git add modified_file.py $ git commit<!-- HTML_TAG_END --></pre></div> <p>Please write <a href="https://chris.beams.io/posts/git-commit/" rel="nofollow">good commit messages</a>.</p> <p>It is a good idea to sync your copy of the code with the original repository regularly. This way you can quickly account for changes:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->$ git fetch upstream $ git rebase upstream/master<!-- HTML_TAG_END --></pre></div> <p>Push the changes to your account using:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->$ git push -u origin a-descriptive-name-for-my-changes<!-- HTML_TAG_END --></pre></div></li> <li><p>Once you are satisfied (<strong>and the checklist below is happy too</strong>), go to the webpage of your fork on GitHub. Click on ‘Pull request’ to send your changes to the project maintainers for review.</p></li> <li><p>It’s ok if maintainers ask you for changes. It happens to core contributors too! So everyone can see the changes in the Pull request, work in your local branch and push the changes to your fork. They will automatically appear in the pull request.</p></li></ol> <h3 class="relative group"><a id="checklist" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#checklist"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Checklist </span></h3> <ol><li>The title of your pull request should be a summary of its contribution;</li> <li>If your pull request addresses an issue, please mention the issue number in the pull request description to make sure they are linked (and people consulting the issue know you are working on it);</li> <li>To indicate a work in progress please prefix the title with <code>[WIP]</code>. These are useful to avoid duplicated work, and to differentiate it from PRs ready to be merged;</li> <li>Make sure existing tests pass;</li> <li>Add high-coverage tests. No quality testing = no merge.<ul><li>If you are adding a new model, make sure that you use <code>ModelTester.all_model_classes = (MyModel, MyModelWithLMHead,...)</code>, which triggers the common tests.</li> <li>If you are adding new <code>@slow</code> tests, make sure they pass using <code>RUN_SLOW=1 python -m pytest tests/test_my_new_model.py</code>.</li> <li>If you are adding a new tokenizer, write tests, and make sure <code>RUN_SLOW=1 python -m pytest tests/test_tokenization_{your_model_name}.py</code> passes. CircleCI does not run the slow tests, but github actions does every night!</li></ul></li> <li>All public methods must have informative docstrings that work nicely with sphinx. See <code>modeling_bert.py</code> for an example.</li> <li>Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos and other non-text files. We prefer to leverage a hf.co hosted <code>dataset</code> like the ones hosted on <a href="https://huggingface.co/hf-internal-testing" rel="nofollow"><code>hf-internal-testing</code></a> in which to place these files and reference them by URL. We recommend putting them in the following dataset: <a href="https://huggingface.co/datasets/huggingface/documentation-images" rel="nofollow">huggingface/documentation-images</a>. If an external contribution, feel free to add the images to your PR and ask a Hugging Face member to migrate your images to this dataset.</li></ol> <p>See more about the checks run on a pull request in our <a href="pr_checks">PR guide</a></p> <h3 class="relative group"><a id="tests" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#tests"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Tests </span></h3> <p>An extensive test suite is included to test the library behavior and several examples. Library tests can be found in the <a href="https://github.com/huggingface/transformers/tree/master/tests" rel="nofollow">tests folder</a> and examples tests in the <a href="https://github.com/huggingface/transformers/tree/master/examples" rel="nofollow">examples folder</a>.</p> <p>We like <code>pytest</code> and <code>pytest-xdist</code> because it’s faster. From the root of the repository, here’s how to run tests with <code>pytest</code> for the library:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->$ python -m pytest -n auto --dist=loadfile -s -v ./tests/<!-- HTML_TAG_END --></pre></div> <p>and for the examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->$ pip install -r examples/xxx/requirements.txt <span class="hljs-comment"># only needed the first time</span> $ python -m pytest -n auto --dist=loadfile -s -v ./examples/<!-- HTML_TAG_END --></pre></div> <p>In fact, that’s how <code>make test</code> and <code>make test-examples</code> are implemented (sans the <code>pip install</code> line)!</p> <p>You can specify a smaller set of tests in order to test only the feature you’re working on.</p> <p>By default, slow tests are skipped. Set the <code>RUN_SLOW</code> environment variable to <code>yes</code> to run them. This will download many gigabytes of models — make sure you have enough disk space and a good Internet connection, or a lot of patience!</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->$ RUN_SLOW=<span class="hljs-built_in">yes</span> python -m pytest -n auto --dist=loadfile -s -v ./tests/ $ RUN_SLOW=<span class="hljs-built_in">yes</span> python -m pytest -n auto --dist=loadfile -s -v ./examples/<!-- HTML_TAG_END --></pre></div> <p>Likewise, set the <code>RUN_CUSTOM_TOKENIZERS</code> environment variable to <code>yes</code> to run tests for custom tokenizers, which don’t run by default either.</p> <p>🤗 Transformers uses <code>pytest</code> as a test runner only. It doesn’t use any <code>pytest</code>-specific features in the test suite itself.</p> <p>This means <code>unittest</code> is fully supported. Here’s how to run tests with <code>unittest</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->$ python -m unittest discover -s tests -t . -v $ python -m unittest discover -s examples -t examples -v<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="style-guide" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#style-guide"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Style guide </span></h3> <p>For documentation strings, 🤗 Transformers follows the <a href="https://google.github.io/styleguide/pyguide.html" rel="nofollow">google style</a>. Check our <a href="https://github.com/huggingface/transformers/tree/master/docs#writing-documentation---specification" rel="nofollow">documentation writing guide</a> for more information.</p> <h4 class="relative group"><a id="this-guide-was-heavily-inspired-by-the-awesome-scikitlearn-guide-to-contributinghttpsgithubcomscikitlearnscikitlearnblobmastercontributingmd" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#this-guide-was-heavily-inspired-by-the-awesome-scikitlearn-guide-to-contributinghttpsgithubcomscikitlearnscikitlearnblobmastercontributingmd"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>This guide was heavily inspired by the awesome [scikit-learn guide to contributing](https://github.com/scikit-learn/scikit-learn/blob/master/CONTRIBUTING.md) </span></h4> <h3 class="relative group"><a id="develop-on-windows" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#develop-on-windows"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Develop on Windows </span></h3> <p>On windows, you need to configure git to transform Windows <code>CRLF</code> line endings to Linux <code>LF</code> line endings:</p> <p><code>git config core.autocrlf input</code></p> <p>One way one can run the make command on Window is to pass by MSYS2:</p> <ol><li><a href="https://www.msys2.org/" rel="nofollow">Download MSYS2</a>, we assume to have it installed in C:\msys64</li> <li>Open the command line C:\msys64\msys2.exe (it should be available from the start menu)</li> <li>Run in the shell: <code>pacman -Syu</code> and install make with <code>pacman -S make</code></li> <li>Add <code>C:\msys64\usr\bin</code> to your PATH environment variable.</li></ol> <p>You can now use <code>make</code> from any terminal (Powershell, cmd.exe, etc) 🎉</p> <h3 class="relative group"><a id="syncing-forked-master-with-upstream-huggingface-master" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#syncing-forked-master-with-upstream-huggingface-master"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Syncing forked master with upstream (HuggingFace) master </span></h3> <p>To avoid pinging the upstream repository which adds reference notes to each upstream PR and sends unnecessary notifications to the developers involved in these PRs, when syncing the master branch of a forked repository, please, follow these steps:</p> <ol><li>When possible, avoid syncing with the upstream using a branch and PR on the forked repository. Instead merge directly into the forked master.</li> <li>If a PR is absolutely necessary, use the following steps after checking out your branch:</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-symbol">$</span> git checkout -b your-branch-<span class="hljs-keyword">for</span>-syncing <span class="hljs-symbol">$</span> git pull --squash --<span class="hljs-keyword">no</span>-commit upstream master <span class="hljs-symbol">$</span> git commit -m <span class="hljs-string">&#x27;&lt;your message without GitHub references&gt;&#x27;</span> <span class="hljs-symbol">$</span> git push --<span class="hljs-keyword">set</span>-upstream <span class="hljs-comment">origin your-branch-for-syncing</span><!-- HTML_TAG_END --></pre></div> <script type="module" data-hydrate="1diqqj9"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1diqqj9"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/contributing.mdx-24741e59.js") ], params: {} } }); </script>
99